mirror of
https://github.com/mudler/luet.git
synced 2025-09-02 07:45:02 +00:00
Compare commits
125 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
29ec19a8a1 | ||
|
440e07c418 | ||
|
acf74f5896 | ||
|
1ee1894ffa | ||
|
c8573f9535 | ||
|
76b70ebeb4 | ||
|
2efb17a06c | ||
|
64ab3711ca | ||
|
eb5d7ba35b | ||
|
b6b91cfd7a | ||
|
4d8a9a544b | ||
|
654b5b48cd | ||
|
92e18d5782 | ||
|
8780e4f16f | ||
|
a7b4ae67c9 | ||
|
68edfd58e7 | ||
|
0658020c60 | ||
|
c3b552103f | ||
|
796967cc9d | ||
|
5cccc34f32 | ||
|
5ef1d04055 | ||
|
1bd4d520a4 | ||
|
b12c7678d4 | ||
|
32a99a4a49 | ||
|
56e9c6f82e | ||
|
92ea69a2b9 | ||
|
838899aa83 | ||
|
76695b2fc8 | ||
|
5c84e5b0a7 | ||
|
06fa8b1c87 | ||
|
ff153f367f | ||
|
459676397c | ||
|
93057fbf6d | ||
|
5e1a7c50df | ||
|
0ceaf09615 | ||
|
0dc78ebe41 | ||
|
27c2e3c51f | ||
|
e83f600ed3 | ||
|
6344e47eb3 | ||
|
8b1c5558b2 | ||
|
c277ac0f94 | ||
|
d8c8c2194f | ||
|
4494385f5b | ||
|
85a7968ecc | ||
|
1ba987b0f1 | ||
|
c72b5be364 | ||
|
1ef18ed2c5 | ||
|
4b1b711a5c | ||
|
7f047e4fc2 | ||
|
356350f724 | ||
|
9d2ee1b760 | ||
|
fd12227d53 | ||
|
1e617b0c67 | ||
|
77b49d9c4a | ||
|
4c3532e3c6 | ||
|
f2ec065a89 | ||
|
7193ea03f9 | ||
|
beeb0dcaaa | ||
|
0de3177ddd | ||
|
45c8dfa19f | ||
|
186ac33156 | ||
|
bdc24b84a4 | ||
|
e5d6d21178 | ||
|
0379855592 | ||
|
958b8c32e1 | ||
|
b0b95d1721 | ||
|
f85891e362 | ||
|
946524f90d | ||
|
2cbd97ff3a | ||
|
a4d77f8f99 | ||
|
adcb459fd2 | ||
|
55ae67be0f | ||
|
848215eef0 | ||
|
7bfff97f57 | ||
|
a73f5f9b65 | ||
|
0288eedbc3 | ||
|
b27237b7ff | ||
|
c9aed37fa7 | ||
|
788b889d14 | ||
|
ef92f23221 | ||
|
562fcc2421 | ||
|
54be45dcff | ||
|
413572a8e3 | ||
|
ecc41ce370 | ||
|
dd6501a642 | ||
|
a7b355ed2f | ||
|
9f3e7fd0b2 | ||
|
ac3843e342 | ||
|
612477718e | ||
|
802b0b5201 | ||
|
c5587f9dfc | ||
|
c27d4d258e | ||
|
9202bcbbbe | ||
|
fadd5a10a3 | ||
|
d297b92483 | ||
|
7ba7add2a8 | ||
|
57c769b4a5 | ||
|
44cae094e8 | ||
|
c022c75239 | ||
|
3250d63072 | ||
|
b8961be793 | ||
|
036b5c08c6 | ||
|
c7b79bf630 | ||
|
83cb6a2804 | ||
|
182afa315a | ||
|
ec19b34ca8 | ||
|
88307b1912 | ||
|
a83be204e8 | ||
|
b8352a81a2 | ||
|
ebf907fb45 | ||
|
f0a34f1cf0 | ||
|
4f1e4c0b41 | ||
|
c736c002af | ||
|
2c32af2951 | ||
|
ce349ca1b7 | ||
|
662742851a | ||
|
f8ef1c0889 | ||
|
23513f2c75 | ||
|
268239a561 | ||
|
f8989e464e | ||
|
0028dd3a92 | ||
|
caa1cfad5c | ||
|
39839edda9 | ||
|
ecd4be4ad3 | ||
|
675170939d |
72
.github/workflows/release.yml
vendored
72
.github/workflows/release.yml
vendored
@@ -1,8 +1,46 @@
|
||||
on: push
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
name: Build and release on push
|
||||
jobs:
|
||||
release:
|
||||
name: Test and Release
|
||||
tests-integration:
|
||||
name: Integration tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Login to quay
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Login to quay with img
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Tests with Img backend
|
||||
run: |
|
||||
sudo -E env "PATH=$PATH" \
|
||||
env "LUET_BACKEND=img" \
|
||||
make test-integration
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo -E \
|
||||
env "PATH=$PATH" \
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make test-integration
|
||||
|
||||
tests-unit:
|
||||
name: Unit tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
@@ -22,13 +60,6 @@ jobs:
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build test
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
- name: Login to quay with img
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Tests with Img backend
|
||||
run: |
|
||||
sudo -E env "PATH=$PATH" \
|
||||
env "LUET_BACKEND=img" \
|
||||
make test-integration
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo -E \
|
||||
@@ -36,7 +67,28 @@ jobs:
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make test-integration test-coverage
|
||||
make test-coverage
|
||||
|
||||
release:
|
||||
name: Test and Release
|
||||
needs: ["tests-integration","tests-unit"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Login to quay
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small && sudo chmod -R 777 release/
|
||||
- name: Release
|
||||
|
30
.github/workflows/test.yml
vendored
30
.github/workflows/test.yml
vendored
@@ -2,7 +2,31 @@
|
||||
on: pull_request
|
||||
name: Build and Test
|
||||
jobs:
|
||||
test:
|
||||
tests-integration:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x]
|
||||
platform: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Tests with Img backend
|
||||
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make test-integration
|
||||
tests-unit:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x]
|
||||
@@ -24,7 +48,5 @@ jobs:
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
- name: Tests with Img backend
|
||||
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make test-integration test-coverage
|
||||
run: sudo -E env "PATH=$PATH" make test-coverage
|
||||
|
53
CONTRIBUTING.md
Normal file
53
CONTRIBUTING.md
Normal file
@@ -0,0 +1,53 @@
|
||||
|
||||
We love your input! We want to make contributing to this project as easy and transparent as possible, whether it's:
|
||||
|
||||
- Reporting a bug
|
||||
- Discussing the current state of the code
|
||||
- Submitting a fix
|
||||
- Proposing new features
|
||||
- Becoming a maintainer
|
||||
|
||||
## We Develop with Github
|
||||
We use github to host code, to track issues and feature requests, as well as accept pull requests.
|
||||
|
||||
## Stay in touch
|
||||
|
||||
Join us in [slack](https://luet.slack.com/join/shared_invite/enQtOTQxMjcyNDQ0MDUxLWQ5ODVlNTI1MTYzNDRkYzkyYmM1YWE5YjM0NTliNDEzNmQwMTkxNDRhNDIzM2Y5NDBlOTZjZTYxYWQyNDE4YzY#/) and hang out with the community! It will be much easier to get started and do your first steps in contributing to the project.
|
||||
|
||||
## All Code Changes Happen Through Pull Requests
|
||||
Pull requests are the best way to propose changes to the codebase. We actively welcome your pull requests:
|
||||
|
||||
1. Fork the repo you want to contribute to and create your branch from `develop`.
|
||||
2. If you've added code that should be tested, add tests.
|
||||
3. If you've changed APIs, update the [documentation](https://github.com/Luet-lab/docs).
|
||||
4. Ensure the test suite passes.
|
||||
5. Make sure your code lints.
|
||||
6. Issue that pull request!
|
||||
|
||||
## Any contributions you make will be under the Software License of the repository
|
||||
In short, when you submit code changes, your submissions are understood to be under the same License that covers the project. Feel free to contact the maintainers if that's a concern.
|
||||
|
||||
## Report bugs using Github's [issues](https://github.com/mudler/luet/issues)
|
||||
We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/mudler/luet/issues/new); it's that easy!
|
||||
|
||||
## Write bug reports with detail, background, and sample code
|
||||
Try to be as more descriptive as possible. When opening a new issue you will be prompted to choose between a bug or a feature request, with a small template to fill details with. Be specific!
|
||||
|
||||
**Great Bug Reports** tend to have:
|
||||
|
||||
- A quick summary and/or background
|
||||
- Steps to reproduce
|
||||
- Be specific!
|
||||
- Give sample code if you can.
|
||||
- What you expected would happen
|
||||
- What actually happens
|
||||
- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work)
|
||||
|
||||
People *love* thorough bug reports.
|
||||
|
||||
|
||||
## License
|
||||
By contributing, you agree that your contributions will be licensed under the project Licenses.
|
||||
|
||||
## References
|
||||
This document was adapted from the open-source contribution guidelines from https://gist.github.com/briandk/3d2e8b3ec8daf5a27a62
|
13
README.md
13
README.md
@@ -1,19 +1,24 @@
|
||||
|
||||
<p align="center">
|
||||
<img width=150 height=150 src="https://user-images.githubusercontent.com/2420543/119691600-0293d700-be4b-11eb-827f-49ff1174a07a.png">
|
||||
</p>
|
||||
|
||||
# luet - Container-based Package manager
|
||||
|
||||
[](https://quay.io/repository/luet/base)
|
||||
[](https://goreportcard.com/report/github.com/mudler/luet)
|
||||
[](https://travis-ci.org/mudler/luet)
|
||||
[](https://github.com/mudler/luet/actions/workflows/release.yml)
|
||||
[](https://godoc.org/github.com/mudler/luet)
|
||||
[](https://codecov.io/gh/mudler/luet)
|
||||
|
||||
[](https://asciinema.org/a/388348)
|
||||
|
||||
Luet is a multi-platform Package Manager based off from containers - it uses Docker (and others) to build packages. It has zero dependencies and it is well suitable for "from scratch" environments. It can also version entire rootfs and enables delivery of OTA-alike updates, making it a perfect fit for the Edge computing era and IoT embedded devices.
|
||||
|
||||
It offers a simple [specfile format](https://luet-lab.github.io/docs/docs/concepts/packages/specfile/) in YAML notation to define both [packages](https://luet-lab.github.io/docs/docs/concepts/packages/) and [rootfs](https://luet-lab.github.io/docs/docs/concepts/packages/#package-layers). As it is based on containers, it can be also used to build stages for Linux From Scratch installations and it can build and track updates for those systems.
|
||||
|
||||
It is written entirely in Golang and where used as package manager, it can run in from scratch environment, with zero dependencies.
|
||||
|
||||
[](https://asciinema.org/a/388348)
|
||||
|
||||
|
||||
## In a glance
|
||||
|
||||
- Luet can reuse Gentoo's portage tree hierarchy, and it is heavily inspired from it.
|
||||
|
138
cmd/build.go
138
cmd/build.go
@@ -16,14 +16,18 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
"github.com/mudler/luet/pkg/installer"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -40,17 +44,17 @@ var buildCmd = &cobra.Command{
|
||||
Long: `Builds one or more packages from a tree (current directory is implied):
|
||||
|
||||
$ luet build utils/busybox utils/yq ...
|
||||
|
||||
|
||||
Builds all packages
|
||||
|
||||
|
||||
$ luet build --all
|
||||
|
||||
|
||||
Builds only the leaf packages:
|
||||
|
||||
|
||||
$ luet build --full
|
||||
|
||||
Build package revdeps:
|
||||
|
||||
|
||||
$ luet build --revdeps utils/yq
|
||||
|
||||
Build package without dependencies (needs the images already in the host, or either need to be available online):
|
||||
@@ -65,7 +69,6 @@ Build packages specifying multiple definition trees:
|
||||
viper.BindPFlag("destination", cmd.Flags().Lookup("destination"))
|
||||
viper.BindPFlag("backend", cmd.Flags().Lookup("backend"))
|
||||
viper.BindPFlag("privileged", cmd.Flags().Lookup("privileged"))
|
||||
viper.BindPFlag("database", cmd.Flags().Lookup("database"))
|
||||
viper.BindPFlag("revdeps", cmd.Flags().Lookup("revdeps"))
|
||||
viper.BindPFlag("all", cmd.Flags().Lookup("all"))
|
||||
viper.BindPFlag("compression", cmd.Flags().Lookup("compression"))
|
||||
@@ -97,10 +100,9 @@ Build packages specifying multiple definition trees:
|
||||
privileged := viper.GetBool("privileged")
|
||||
revdeps := viper.GetBool("revdeps")
|
||||
all := viper.GetBool("all")
|
||||
databaseType := viper.GetString("database")
|
||||
compressionType := viper.GetString("compression")
|
||||
imageRepository := viper.GetString("image-repository")
|
||||
values := viper.GetString("values")
|
||||
values := viper.GetStringSlice("values")
|
||||
wait := viper.GetBool("wait")
|
||||
push := viper.GetBool("push")
|
||||
pull := viper.GetBool("pull")
|
||||
@@ -109,6 +111,8 @@ Build packages specifying multiple definition trees:
|
||||
onlydeps := viper.GetBool("onlydeps")
|
||||
onlyTarget, _ := cmd.Flags().GetBool("only-target-package")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
rebuild, _ := cmd.Flags().GetBool("rebuild")
|
||||
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
var results Results
|
||||
backendArgs := viper.GetStringSlice("backend-args")
|
||||
@@ -118,34 +122,28 @@ Build packages specifying multiple definition trees:
|
||||
LuetCfg.GetLogging().SetLogLevel("error")
|
||||
}
|
||||
pretend, _ := cmd.Flags().GetBool("pretend")
|
||||
compilerSpecs := compiler.NewLuetCompilationspecs()
|
||||
fromRepo, _ := cmd.Flags().GetBool("from-repositories")
|
||||
|
||||
compilerSpecs := compilerspec.NewLuetCompilationspecs()
|
||||
var db pkg.PackageDatabase
|
||||
|
||||
compilerBackend := backend.NewBackend(backendType)
|
||||
compilerBackend, err := compiler.NewBackend(backendType)
|
||||
helpers.CheckErr(err)
|
||||
|
||||
switch databaseType {
|
||||
case "memory":
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
|
||||
case "boltdb":
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
db = pkg.NewBoltDatabase(tmpdir)
|
||||
|
||||
}
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
defer db.Clean()
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(db)
|
||||
|
||||
if fromRepo {
|
||||
if err := installer.LoadBuildTree(generalRecipe, db, LuetCfg); err != nil {
|
||||
Warning("errors while loading trees from repositories", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
for _, src := range treePaths {
|
||||
Info("Loading tree", src)
|
||||
|
||||
err := generalRecipe.Load(src)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
helpers.CheckErr(generalRecipe.Load(src))
|
||||
}
|
||||
|
||||
Info("Building in", dst)
|
||||
@@ -154,38 +152,44 @@ Build packages specifying multiple definition trees:
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
|
||||
|
||||
LuetCfg.GetGeneral().ShowBuildOutput = LuetCfg.Viper.GetBool("general.show_build_output")
|
||||
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
|
||||
|
||||
opts := compiler.NewDefaultCompilerOptions()
|
||||
opts.SolverOptions = *LuetCfg.GetSolverOptions()
|
||||
opts.ImageRepository = imageRepository
|
||||
opts.PullFirst = pull
|
||||
opts.KeepImg = keepImages
|
||||
opts.Push = push
|
||||
opts.OnlyDeps = onlydeps
|
||||
opts.NoDeps = nodeps
|
||||
opts.Wait = wait
|
||||
opts.PackageTargetOnly = onlyTarget
|
||||
opts.BuildValuesFile = values
|
||||
var solverOpts solver.Options
|
||||
if concurrent {
|
||||
solverOpts = solver.Options{Type: solver.ParallelSimple, Concurrency: concurrency}
|
||||
} else {
|
||||
solverOpts = solver.Options{Type: solver.SingleCoreSimple, Concurrency: concurrency}
|
||||
opts := &LuetSolverOptions{
|
||||
Type: stype,
|
||||
LearnRate: float32(rate),
|
||||
Discount: float32(discount),
|
||||
MaxAttempts: attempts,
|
||||
}
|
||||
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(), opts, solverOpts)
|
||||
luetCompiler.SetBackendArgs(backendArgs)
|
||||
luetCompiler.SetConcurrency(concurrency)
|
||||
luetCompiler.SetCompressionType(compiler.CompressionImplementation(compressionType))
|
||||
Debug("Solver", opts.CompactString())
|
||||
|
||||
if concurrent {
|
||||
opts.Options = solver.Options{Type: solver.ParallelSimple, Concurrency: concurrency}
|
||||
} else {
|
||||
opts.Options = solver.Options{Type: solver.SingleCoreSimple, Concurrency: concurrency}
|
||||
}
|
||||
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(),
|
||||
options.NoDeps(nodeps),
|
||||
options.WithBackendType(backendType),
|
||||
options.PushImages(push),
|
||||
options.WithBuildValues(values),
|
||||
options.WithPullRepositories(pullRepo),
|
||||
options.WithPushRepository(imageRepository),
|
||||
options.Rebuild(rebuild),
|
||||
options.WithSolverOptions(*opts),
|
||||
options.Wait(wait),
|
||||
options.OnlyTarget(onlyTarget),
|
||||
options.PullFirst(pull),
|
||||
options.KeepImg(keepImages),
|
||||
options.OnlyDeps(onlydeps),
|
||||
options.BackendArgs(backendArgs),
|
||||
options.Concurrency(concurrency),
|
||||
options.WithCompressionType(compression.Implementation(compressionType)),
|
||||
)
|
||||
|
||||
if full {
|
||||
specs, err := luetCompiler.FromDatabase(generalRecipe.GetDatabase(), true, dst)
|
||||
if err != nil {
|
||||
@@ -198,7 +202,6 @@ Build packages specifying multiple definition trees:
|
||||
}
|
||||
} else if !all {
|
||||
for _, a := range args {
|
||||
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
if err != nil {
|
||||
Fatal("Invalid package string ", a, ": ", err.Error())
|
||||
@@ -226,13 +229,13 @@ Build packages specifying multiple definition trees:
|
||||
}
|
||||
}
|
||||
|
||||
var artifact []compiler.Artifact
|
||||
var artifact []*artifact.PackageArtifact
|
||||
var errs []error
|
||||
if revdeps {
|
||||
artifact, errs = luetCompiler.CompileWithReverseDeps(privileged, compilerSpecs)
|
||||
|
||||
} else if pretend {
|
||||
toCalculate := []compiler.CompilationSpec{}
|
||||
toCalculate := []*compilerspec.LuetCompilationSpec{}
|
||||
if full {
|
||||
var err error
|
||||
toCalculate, err = luetCompiler.ComputeMinimumCompilableSet(compilerSpecs.All()...)
|
||||
@@ -244,11 +247,12 @@ Build packages specifying multiple definition trees:
|
||||
}
|
||||
|
||||
for _, sp := range toCalculate {
|
||||
packs, err := luetCompiler.ComputeDepTree(sp)
|
||||
ht := compiler.NewHashTree(generalRecipe.GetDatabase())
|
||||
hashTree, err := ht.Query(luetCompiler, sp)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
for _, p := range packs {
|
||||
for _, p := range hashTree.Dependencies {
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: p.Package.GetName(),
|
||||
@@ -282,6 +286,7 @@ Build packages specifying multiple definition trees:
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
||||
artifact, errs = luetCompiler.CompileParallel(privileged, compilerSpecs)
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
@@ -291,7 +296,7 @@ Build packages specifying multiple definition trees:
|
||||
Fatal("Bailing out")
|
||||
}
|
||||
for _, a := range artifact {
|
||||
Info("Artifact generated:", a.GetPath())
|
||||
Info("Artifact generated:", a.Path)
|
||||
}
|
||||
},
|
||||
}
|
||||
@@ -304,12 +309,11 @@ func init() {
|
||||
|
||||
buildCmd.Flags().StringSliceP("tree", "t", []string{path}, "Path of the tree to use.")
|
||||
buildCmd.Flags().String("backend", "docker", "backend used (docker,img)")
|
||||
buildCmd.Flags().Bool("privileged", false, "Privileged (Keep permissions)")
|
||||
buildCmd.Flags().String("database", "memory", "database used for solving (memory,boltdb)")
|
||||
buildCmd.Flags().Bool("privileged", true, "Privileged (Keep permissions)")
|
||||
buildCmd.Flags().Bool("revdeps", false, "Build with revdeps")
|
||||
buildCmd.Flags().Bool("all", false, "Build all specfiles in the tree")
|
||||
buildCmd.Flags().Bool("full", false, "Build all packages (optimized)")
|
||||
buildCmd.Flags().String("values", "", "Build values file to interpolate with each package")
|
||||
buildCmd.Flags().StringSlice("values", []string{}, "Build values file to interpolate with each package")
|
||||
buildCmd.Flags().StringSliceP("backend-args", "a", []string{}, "Backend args")
|
||||
|
||||
buildCmd.Flags().String("destination", filepath.Join(path, "build"), "Destination folder")
|
||||
@@ -328,8 +332,10 @@ func init() {
|
||||
buildCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
|
||||
buildCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
buildCmd.Flags().Bool("live-output", LuetCfg.GetGeneral().ShowBuildOutput, "Enable live output of the build phase.")
|
||||
|
||||
buildCmd.Flags().Bool("from-repositories", false, "Consume the user-defined repositories to pull specfiles from")
|
||||
buildCmd.Flags().Bool("rebuild", false, "To combine with --pull. Allows to rebuild the target package even if an image is available, against a local values file")
|
||||
buildCmd.Flags().Bool("pretend", false, "Just print what packages will be compiled")
|
||||
buildCmd.Flags().StringArrayP("pull-repository", "p", []string{}, "A list of repositories to pull the cache from")
|
||||
|
||||
buildCmd.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -47,7 +47,7 @@ var cleanupCmd = &cobra.Command{
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
// Check if cache dir exists
|
||||
if helpers.Exists(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) {
|
||||
if fileHelper.Exists(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) {
|
||||
|
||||
files, err := ioutil.ReadDir(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath())
|
||||
if err != nil {
|
||||
|
@@ -18,11 +18,13 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
// . "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -74,7 +76,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var err error
|
||||
var repo installer.Repository
|
||||
var repo *installer.LuetSystemRepository
|
||||
|
||||
treePaths := viper.GetStringSlice("tree")
|
||||
dst := viper.GetString("output")
|
||||
@@ -90,19 +92,19 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
metaName := viper.GetString("meta-filename")
|
||||
source_repo := viper.GetString("repo")
|
||||
backendType := viper.GetString("backend")
|
||||
fromRepo, _ := cmd.Flags().GetBool("from-repositories")
|
||||
|
||||
treeFile := installer.NewDefaultTreeRepositoryFile()
|
||||
metaFile := installer.NewDefaultMetaRepositoryFile()
|
||||
compilerBackend := backend.NewBackend(backendType)
|
||||
compilerBackend, err := compiler.NewBackend(backendType)
|
||||
helpers.CheckErr(err)
|
||||
force := viper.GetBool("force-push")
|
||||
imagePush := viper.GetBool("push-images")
|
||||
|
||||
if source_repo != "" {
|
||||
// Search for system repository
|
||||
lrepo, err := LuetCfg.GetSystemRepository(source_repo)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
helpers.CheckErr(err)
|
||||
|
||||
if len(treePaths) <= 0 {
|
||||
treePaths = []string{lrepo.TreePath}
|
||||
@@ -118,19 +120,23 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
lrepo.Priority,
|
||||
packages,
|
||||
treePaths,
|
||||
pkg.NewInMemoryDatabase(false), compilerBackend, dst, imagePush, force)
|
||||
pkg.NewInMemoryDatabase(false),
|
||||
compilerBackend,
|
||||
dst,
|
||||
imagePush,
|
||||
force,
|
||||
fromRepo,
|
||||
LuetCfg)
|
||||
helpers.CheckErr(err)
|
||||
|
||||
} else {
|
||||
repo, err = installer.GenerateRepository(name, descr, t, urls, 1, packages,
|
||||
treePaths, pkg.NewInMemoryDatabase(false), compilerBackend, dst, imagePush, force)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
treePaths, pkg.NewInMemoryDatabase(false), compilerBackend, dst, imagePush, force, fromRepo, LuetCfg)
|
||||
helpers.CheckErr(err)
|
||||
}
|
||||
|
||||
if treetype != "" {
|
||||
treeFile.SetCompressionType(compiler.CompressionImplementation(treetype))
|
||||
treeFile.SetCompressionType(compression.Implementation(treetype))
|
||||
}
|
||||
|
||||
if treeName != "" {
|
||||
@@ -138,7 +144,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
}
|
||||
|
||||
if metatype != "" {
|
||||
metaFile.SetCompressionType(compiler.CompressionImplementation(metatype))
|
||||
metaFile.SetCompressionType(compression.Implementation(metatype))
|
||||
}
|
||||
|
||||
if metaName != "" {
|
||||
@@ -149,17 +155,15 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
repo.SetRepositoryFile(installer.REPOFILE_META_KEY, metaFile)
|
||||
|
||||
err = repo.Write(dst, reset, true)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
helpers.CheckErr(err)
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
helpers.CheckErr(err)
|
||||
|
||||
createrepoCmd.Flags().String("packages", filepath.Join(path, "build"), "Packages folder (output from build)")
|
||||
createrepoCmd.Flags().StringSliceP("tree", "t", []string{path}, "Path of the source trees to use.")
|
||||
createrepoCmd.Flags().String("output", filepath.Join(path, "build"), "Destination for generated archives. With 'docker' repository type, it should be an image reference (e.g 'foo/bar')")
|
||||
@@ -178,6 +182,7 @@ func init() {
|
||||
createrepoCmd.Flags().String("tree-filename", installer.TREE_TARBALL, "Repository tree filename")
|
||||
createrepoCmd.Flags().String("meta-compression", "none", "Compression alg: none, gzip, zstd")
|
||||
createrepoCmd.Flags().String("meta-filename", installer.REPOSITORY_METAFILE+".tar", "Repository metadata filename")
|
||||
createrepoCmd.Flags().Bool("from-repositories", false, "Consume the user-defined repositories to pull specfiles from")
|
||||
|
||||
RootCmd.AddCommand(createrepoCmd)
|
||||
}
|
||||
|
@@ -18,7 +18,8 @@ package cmd_database
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
@@ -66,21 +67,27 @@ For reference, inspect a "metadata.yaml" file generated while running "luet buil
|
||||
if err != nil {
|
||||
Fatal("Failed reading ", a, ": ", err.Error())
|
||||
}
|
||||
art, err := compiler.NewPackageArtifactFromYaml(dat)
|
||||
art, err := artifact.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
Fatal("Failed reading yaml ", a, ": ", err.Error())
|
||||
}
|
||||
|
||||
files := art.GetFiles()
|
||||
files := art.Files
|
||||
|
||||
if _, err := systemDB.CreatePackage(art.GetCompileSpec().GetPackage()); err != nil {
|
||||
// Check if the package is already present
|
||||
if p, err := systemDB.FindPackage(art.CompileSpec.GetPackage()); err == nil && p.GetName() != "" {
|
||||
Fatal("Package", art.CompileSpec.GetPackage().HumanReadableString(),
|
||||
" already present.")
|
||||
}
|
||||
|
||||
if _, err := systemDB.CreatePackage(art.CompileSpec.GetPackage()); err != nil {
|
||||
Fatal("Failed to create ", a, ": ", err.Error())
|
||||
}
|
||||
if err := systemDB.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: art.GetCompileSpec().GetPackage().GetFingerPrint(), Files: files}); err != nil {
|
||||
if err := systemDB.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: art.CompileSpec.GetPackage().GetFingerPrint(), Files: files}); err != nil {
|
||||
Fatal("Failed setting package files for ", a, ": ", err.Error())
|
||||
}
|
||||
|
||||
Info(art.GetCompileSpec().GetPackage().HumanReadableString(), " created")
|
||||
Info(art.CompileSpec.GetPackage().HumanReadableString(), " created")
|
||||
}
|
||||
|
||||
},
|
||||
|
@@ -22,6 +22,8 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
_gentoo "github.com/Sabayon/pkgs-checker/pkg/gentoo"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
version "github.com/mudler/luet/pkg/versioner"
|
||||
@@ -56,7 +58,8 @@ func packageData(p string) (string, string) {
|
||||
}
|
||||
func ParsePackageStr(p string) (*pkg.DefaultPackage, error) {
|
||||
|
||||
if !strings.HasPrefix(p, "=") {
|
||||
if !(strings.HasPrefix(p, "=") || strings.HasPrefix(p, ">") ||
|
||||
strings.HasPrefix(p, "<")) {
|
||||
ver := ">=0"
|
||||
cat := ""
|
||||
name := ""
|
||||
@@ -111,3 +114,9 @@ func ParsePackageStr(p string) (*pkg.DefaultPackage, error) {
|
||||
|
||||
return pack, nil
|
||||
}
|
||||
|
||||
func CheckErr(err error) {
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
}
|
||||
|
@@ -66,5 +66,37 @@ var _ = Describe("CLI Helpers", func() {
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal("1.2"))
|
||||
})
|
||||
|
||||
It("accept gentoo regex parsing with with condition", func() {
|
||||
pack, err := ParsePackageStr(">=cat/foo-1.2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pack.GetName()).To(Equal("foo"))
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal(">=1.2"))
|
||||
})
|
||||
|
||||
It("accept gentoo regex parsing with with condition2", func() {
|
||||
pack, err := ParsePackageStr("<cat/foo-1.2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pack.GetName()).To(Equal("foo"))
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal("<1.2"))
|
||||
})
|
||||
|
||||
It("accept gentoo regex parsing with with condition3", func() {
|
||||
pack, err := ParsePackageStr(">cat/foo-1.2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pack.GetName()).To(Equal("foo"))
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal(">1.2"))
|
||||
})
|
||||
|
||||
It("accept gentoo regex parsing with with condition4", func() {
|
||||
pack, err := ParsePackageStr("<=cat/foo-1.2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pack.GetName()).To(Equal("foo"))
|
||||
Expect(pack.GetCategory()).To(Equal("cat"))
|
||||
Expect(pack.GetVersion()).To(Equal("<=1.2"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -70,16 +70,6 @@ To force install a package:
|
||||
toInstall = append(toInstall, pack)
|
||||
}
|
||||
|
||||
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
|
||||
repos := installer.Repositories{}
|
||||
for _, repo := range LuetCfg.SystemRepositories {
|
||||
if !repo.Enable {
|
||||
continue
|
||||
}
|
||||
r := installer.NewSystemRepository(repo)
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
@@ -111,6 +101,7 @@ To force install a package:
|
||||
}
|
||||
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
|
||||
repos := installer.SystemRepositories(LuetCfg)
|
||||
|
||||
// Load config protect configs
|
||||
installer.LoadConfigProtectConfs(LuetCfg)
|
||||
|
22
cmd/pack.go
22
cmd/pack.go
@@ -20,7 +20,9 @@ import (
|
||||
"time"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
@@ -64,21 +66,21 @@ Afterwards, you can use the content generated and associate it with a tree and a
|
||||
Fatal("Invalid package string ", packageName, ": ", err.Error())
|
||||
}
|
||||
|
||||
spec := &compiler.LuetCompilationSpec{Package: p}
|
||||
artifact := compiler.NewPackageArtifact(filepath.Join(dst, p.GetFingerPrint()+".package.tar"))
|
||||
artifact.SetCompressionType(compiler.CompressionImplementation(compressionType))
|
||||
err = artifact.Compress(sourcePath, concurrency)
|
||||
spec := &compilerspec.LuetCompilationSpec{Package: p}
|
||||
a := artifact.NewPackageArtifact(filepath.Join(dst, p.GetFingerPrint()+".package.tar"))
|
||||
a.CompressionType = compression.Implementation(compressionType)
|
||||
err = a.Compress(sourcePath, concurrency)
|
||||
if err != nil {
|
||||
Fatal("failed compressing ", packageName, ": ", err.Error())
|
||||
}
|
||||
artifact.SetCompileSpec(spec)
|
||||
filelist, err := artifact.FileList()
|
||||
a.CompileSpec = spec
|
||||
filelist, err := a.FileList()
|
||||
if err != nil {
|
||||
Fatal("failed generating file list for ", packageName, ": ", err.Error())
|
||||
}
|
||||
artifact.SetFiles(filelist)
|
||||
artifact.GetCompileSpec().GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
err = artifact.WriteYaml(dst)
|
||||
a.Files = filelist
|
||||
a.CompileSpec.GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
err = a.WriteYaml(dst)
|
||||
if err != nil {
|
||||
Fatal("failed writing metadata yaml file for ", packageName, ": ", err.Error())
|
||||
}
|
||||
|
@@ -72,8 +72,8 @@ func NewRepoListCommand() *cobra.Command {
|
||||
if repo.Cached {
|
||||
|
||||
r := installer.NewSystemRepository(repo)
|
||||
localRepo, _ := r.(*installer.LuetSystemRepository).ReadSpecFile(filepath.Join(repobasedir,
|
||||
installer.REPOSITORY_SPECFILE), false)
|
||||
localRepo, _ := r.ReadSpecFile(filepath.Join(repobasedir,
|
||||
installer.REPOSITORY_SPECFILE))
|
||||
if localRepo != nil {
|
||||
tsec, _ := strconv.ParseInt(localRepo.GetLastUpdate(), 10, 64)
|
||||
repoRevision = Bold(Red(localRepo.GetRevision())).String() +
|
||||
|
76
cmd/root.go
76
cmd/root.go
@@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/marcsauter/single"
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
extensions "github.com/mudler/cobra-extensions"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
@@ -40,8 +41,14 @@ var Verbose bool
|
||||
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
||||
|
||||
const (
|
||||
LuetCLIVersion = "0.11.4"
|
||||
LuetCLIVersion = "0.17.0"
|
||||
LuetEnvPrefix = "LUET"
|
||||
license = `
|
||||
Luet Copyright (C) 2019-2021 Ettore Di Giacinto
|
||||
This program comes with ABSOLUTELY NO WARRANTY.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions.
|
||||
`
|
||||
)
|
||||
|
||||
// Build time and commit information.
|
||||
@@ -52,6 +59,47 @@ var (
|
||||
BuildCommit string
|
||||
)
|
||||
|
||||
func version() string {
|
||||
return fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime)
|
||||
}
|
||||
|
||||
var noBannerCommands = []string{"search", "exec", "tree", "database", "box"}
|
||||
|
||||
func displayVersionBanner() {
|
||||
display := true
|
||||
if len(os.Args) > 1 {
|
||||
for _, c := range noBannerCommands {
|
||||
if os.Args[1] == c {
|
||||
display = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if display {
|
||||
Info("Luet version", version())
|
||||
Info(license)
|
||||
}
|
||||
}
|
||||
|
||||
func handleLock() {
|
||||
if os.Getenv("LUET_NOLOCK") != "true" {
|
||||
if len(os.Args) > 1 {
|
||||
for _, lockedCmd := range LockedCommands {
|
||||
if os.Args[1] == lockedCmd {
|
||||
s := single.New("luet")
|
||||
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
|
||||
Fatal("another instance of the app is already running, exiting")
|
||||
} else if err != nil {
|
||||
// Another error occurred, might be worth handling it as well
|
||||
Fatal("failed to acquire exclusive app lock:", err.Error())
|
||||
}
|
||||
defer s.TryUnlock()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RootCmd represents the base command when called without any subcommands
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "luet",
|
||||
@@ -79,8 +127,9 @@ To build a package, from a tree definition:
|
||||
$ luet build --tree tree/path package
|
||||
|
||||
`,
|
||||
Version: fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime),
|
||||
Version: version(),
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
err := LoadConfig(config.LuetCfg)
|
||||
if err != nil {
|
||||
Fatal("failed to load configuration:", err.Error())
|
||||
@@ -97,7 +146,7 @@ To build a package, from a tree definition:
|
||||
|
||||
plugin := viper.GetStringSlice("plugin")
|
||||
|
||||
bus.Manager.Load(plugin...).Register()
|
||||
bus.Manager.Initialize(plugin...)
|
||||
if len(bus.Manager.Plugins) != 0 {
|
||||
Info(":lollipop:Enabled plugins:")
|
||||
for _, p := range bus.Manager.Plugins {
|
||||
@@ -154,23 +203,8 @@ func LoadConfig(c *config.LuetConfig) error {
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
|
||||
if os.Getenv("LUET_NOLOCK") != "true" {
|
||||
if len(os.Args) > 1 {
|
||||
for _, lockedCmd := range LockedCommands {
|
||||
if os.Args[1] == lockedCmd {
|
||||
s := single.New("luet")
|
||||
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
|
||||
Fatal("another instance of the app is already running, exiting")
|
||||
} else if err != nil {
|
||||
// Another error occurred, might be worth handling it as well
|
||||
Fatal("failed to acquire exclusive app lock:", err.Error())
|
||||
}
|
||||
defer s.TryUnlock()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
handleLock()
|
||||
displayVersionBanner()
|
||||
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
@@ -252,7 +286,7 @@ func initConfig() {
|
||||
}
|
||||
homeDir := helpers.GetHomeDir()
|
||||
|
||||
if helpers.Exists(filepath.Join(pwdDir, ".luet.yaml")) || (homeDir != "" && helpers.Exists(filepath.Join(homeDir, ".luet.yaml"))) {
|
||||
if fileHelper.Exists(filepath.Join(pwdDir, ".luet.yaml")) || (homeDir != "" && fileHelper.Exists(filepath.Join(homeDir, ".luet.yaml"))) {
|
||||
viper.AddConfigPath(".")
|
||||
if homeDir != "" {
|
||||
viper.AddConfigPath(homeDir)
|
||||
|
@@ -158,20 +158,23 @@ func searchOnline(term string, l list.Writer, t table.Writer, label, labelMatch,
|
||||
} else {
|
||||
matches = synced.Search(term)
|
||||
}
|
||||
|
||||
for _, m := range matches {
|
||||
if !revdeps {
|
||||
if !m.Package.IsHidden() || m.Package.IsHidden() && hidden {
|
||||
t.AppendRow(packageToRow(m.Repo.GetName(), m.Package))
|
||||
packageToList(l, m.Repo.GetName(), m.Package)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: m.Package.GetName(),
|
||||
Version: m.Package.GetVersion(),
|
||||
Category: m.Package.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: m.Package.IsHidden(),
|
||||
Files: m.Artifact.GetFiles(),
|
||||
})
|
||||
r := &PackageResult{
|
||||
Name: m.Package.GetName(),
|
||||
Version: m.Package.GetVersion(),
|
||||
Category: m.Package.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: m.Package.IsHidden(),
|
||||
}
|
||||
if m.Artifact != nil {
|
||||
r.Files = m.Artifact.Files
|
||||
}
|
||||
results.Packages = append(results.Packages, *r)
|
||||
}
|
||||
} else {
|
||||
packs, _ := m.Repo.GetTree().GetDatabase().GetRevdeps(m.Package)
|
||||
@@ -179,15 +182,17 @@ func searchOnline(term string, l list.Writer, t table.Writer, label, labelMatch,
|
||||
if !revdep.IsHidden() || revdep.IsHidden() && hidden {
|
||||
t.AppendRow(packageToRow(m.Repo.GetName(), revdep))
|
||||
packageToList(l, m.Repo.GetName(), revdep)
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: revdep.IsHidden(),
|
||||
Files: m.Artifact.GetFiles(),
|
||||
})
|
||||
r := &PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: revdep.IsHidden(),
|
||||
}
|
||||
if m.Artifact != nil {
|
||||
r.Files = m.Artifact.Files
|
||||
}
|
||||
results.Packages = append(results.Packages, *r)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -257,7 +262,7 @@ func searchFiles(term string, l list.Writer, t table.Writer) Results {
|
||||
Category: m.Package.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: m.Package.IsHidden(),
|
||||
Files: m.Artifact.GetFiles(),
|
||||
Files: m.Artifact.Files,
|
||||
})
|
||||
}
|
||||
return results
|
||||
|
@@ -25,6 +25,7 @@ import (
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -57,6 +58,7 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
|
||||
treePath, _ := cmd.Flags().GetStringArray("tree")
|
||||
imageRepository := viper.GetString("image-repository")
|
||||
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
|
||||
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
if out != "terminal" {
|
||||
@@ -73,12 +75,15 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
}
|
||||
compilerBackend := backend.NewSimpleDockerBackend()
|
||||
|
||||
opts := compiler.NewDefaultCompilerOptions()
|
||||
opts.SolverOptions = *LuetCfg.GetSolverOptions()
|
||||
opts.ImageRepository = imageRepository
|
||||
|
||||
solverOpts := solver.Options{Type: solver.SingleCoreSimple, Concurrency: 1}
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, reciper.GetDatabase(), opts, solverOpts)
|
||||
opts := *LuetCfg.GetSolverOptions()
|
||||
opts.Options = solver.Options{Type: solver.SingleCoreSimple, Concurrency: 1}
|
||||
luetCompiler := compiler.NewLuetCompiler(
|
||||
compilerBackend,
|
||||
reciper.GetDatabase(),
|
||||
options.WithPushRepository(imageRepository),
|
||||
options.WithPullRepositories(pullRepo),
|
||||
options.WithSolverOptions(opts),
|
||||
)
|
||||
|
||||
a := args[0]
|
||||
|
||||
@@ -91,9 +96,14 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
asserts, err := luetCompiler.ComputeDepTree(spec)
|
||||
|
||||
for _, assertion := range asserts { //highly dependent on the order
|
||||
ht := compiler.NewHashTree(reciper.GetDatabase())
|
||||
hashtree, err := ht.Query(luetCompiler, spec)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
for _, assertion := range hashtree.Solution { //highly dependent on the order
|
||||
|
||||
//buildImageHash := imageRepository + ":" + assertion.Hash.BuildHash
|
||||
currentPackageImageHash := imageRepository + ":" + assertion.Hash.PackageHash
|
||||
@@ -135,6 +145,7 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
ans.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
|
||||
ans.Flags().StringArrayP("tree", "t", []string{path}, "Path of the tree to use.")
|
||||
ans.Flags().String("image-repository", "luet/cache", "Default base image string for generated image")
|
||||
ans.Flags().StringArrayP("pull-repository", "p", []string{}, "A list of repositories to pull the cache from")
|
||||
|
||||
return ans
|
||||
}
|
||||
|
@@ -67,10 +67,12 @@ var uninstallCmd = &cobra.Command{
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
keepProtected, _ := cmd.Flags().GetBool("keep-protected-files")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
LuetCfg.ConfigProtectSkip = !keepProtected
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
@@ -123,6 +125,7 @@ func init() {
|
||||
uninstallCmd.Flags().Bool("full-clean", false, "(experimental) Uninstall packages and all the other deps/revdeps of it.")
|
||||
uninstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
uninstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
uninstallCmd.Flags().BoolP("keep-protected-files", "k", false, "Keep package protected files around")
|
||||
|
||||
RootCmd.AddCommand(uninstallCmd)
|
||||
}
|
||||
|
36
cmd/util.go
Normal file
36
cmd/util.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// Daniele Rondina <geaaru@sabayonlinux.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
. "github.com/mudler/luet/cmd/util"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var utilGroup = &cobra.Command{
|
||||
Use: "util [command] [OPTIONS]",
|
||||
Short: "General luet internal utilities exposed",
|
||||
}
|
||||
|
||||
func init() {
|
||||
RootCmd.AddCommand(utilGroup)
|
||||
|
||||
utilGroup.AddCommand(
|
||||
NewUnpackCommand(),
|
||||
)
|
||||
}
|
98
cmd/util/unpack.go
Normal file
98
cmd/util/unpack.go
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewUnpackCommand() *cobra.Command {
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "unpack image path",
|
||||
Short: "Unpack a docker image natively",
|
||||
Long: `unpack doesn't need the docker daemon to run, and unpacks a docker image in the specified directory:
|
||||
|
||||
luet util unpack golang:alpine /alpine
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
if len(args) != 2 {
|
||||
Fatal("Expects an image and a path")
|
||||
}
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
image := args[0]
|
||||
destination, err := filepath.Abs(args[1])
|
||||
if err != nil {
|
||||
Error("Invalid path %s", destination)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
verify, _ := cmd.Flags().GetBool("verify")
|
||||
user, _ := cmd.Flags().GetString("auth-username")
|
||||
pass, _ := cmd.Flags().GetString("auth-password")
|
||||
authType, _ := cmd.Flags().GetString("auth-type")
|
||||
server, _ := cmd.Flags().GetString("auth-server-address")
|
||||
identity, _ := cmd.Flags().GetString("auth-identity-token")
|
||||
registryToken, _ := cmd.Flags().GetString("auth-registry-token")
|
||||
|
||||
temp, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
Fatal("Cannot create a tempdir", err.Error())
|
||||
}
|
||||
|
||||
Info("Downloading", image, "to", destination)
|
||||
auth := &types.AuthConfig{
|
||||
Username: user,
|
||||
Password: pass,
|
||||
ServerAddress: server,
|
||||
Auth: authType,
|
||||
IdentityToken: identity,
|
||||
RegistryToken: registryToken,
|
||||
}
|
||||
|
||||
info, err := docker.DownloadAndExtractDockerImage(temp, image, destination, auth, verify)
|
||||
if err != nil {
|
||||
Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
Info(fmt.Sprintf("Pulled: %s %s", info.Target.Digest, info.Name))
|
||||
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().String("auth-username", "", "Username to authenticate to registry/notary")
|
||||
c.Flags().String("auth-password", "", "Password to authenticate to registry")
|
||||
c.Flags().String("auth-type", "", "Auth type")
|
||||
c.Flags().String("auth-server-address", "", "Authentication server address")
|
||||
c.Flags().String("auth-identity-token", "", "Authentication identity token")
|
||||
c.Flags().String("auth-registry-token", "", "Authentication registry token")
|
||||
c.Flags().Bool("verify", false, "Verify signed images to notary before to pull")
|
||||
return c
|
||||
}
|
@@ -7,7 +7,7 @@ fi
|
||||
set -ex
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
LUET_VERSION=$(curl -s https://api.github.com/repos/mudler/luet/releases/latest | ( grep -oP '"tag_name": "\K(.*)(?=")' || echo "0.9.24" ))
|
||||
LUET_VERSION=$(curl -s https://api.github.com/repos/mudler/luet/releases/latest | grep tag_name | awk '{ print $2 }' | sed -e 's/\"//g' -e 's/,//g' || echo "0.9.24" )
|
||||
LUET_ROOTFS=${LUET_ROOTFS:-/}
|
||||
LUET_DATABASE_PATH=${LUET_DATABASE_PATH:-/var/luet/db}
|
||||
LUET_DATABASE_ENGINE=${LUET_DATABASE_ENGINE:-boltdb}
|
||||
|
25
go.mod
25
go.mod
@@ -4,23 +4,26 @@ go 1.14
|
||||
|
||||
require (
|
||||
github.com/DataDog/zstd v1.4.4 // indirect
|
||||
github.com/Sabayon/pkgs-checker v0.7.2
|
||||
github.com/Sabayon/pkgs-checker v0.8.1
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
|
||||
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154
|
||||
github.com/briandowns/spinner v1.12.1-0.20201220203425-e201aaea0a31
|
||||
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
|
||||
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
|
||||
github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765
|
||||
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd
|
||||
github.com/fsouza/go-dockerclient v1.6.4
|
||||
github.com/genuinetools/img v0.5.11
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/google/go-containerregistry v0.2.1
|
||||
github.com/google/renameio v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/go-version v1.2.0
|
||||
github.com/hashicorp/go-version v1.2.1
|
||||
github.com/imdario/mergo v0.3.9
|
||||
github.com/jedib0t/go-pretty v4.3.0+incompatible
|
||||
github.com/jedib0t/go-pretty/v6 v6.0.5
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
|
||||
@@ -30,27 +33,33 @@ require (
|
||||
github.com/kyokomi/emoji v2.1.0+incompatible
|
||||
github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e
|
||||
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.1
|
||||
github.com/moby/buildkit v0.7.2
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
|
||||
github.com/mudler/docker-companion v0.4.6-0.20200418093252-41846f112d87
|
||||
github.com/mudler/go-pluggable v0.0.0-20201113184918-d36448fc8f82
|
||||
github.com/mudler/go-pluggable v0.0.0-20210513155700-54c6443073af
|
||||
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
|
||||
github.com/onsi/ginkgo v1.14.2
|
||||
github.com/onsi/gomega v1.10.3
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578
|
||||
github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/schollz/progressbar/v3 v3.7.1
|
||||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/spf13/viper v1.7.0
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/viper v1.7.1
|
||||
github.com/theupdateframework/notary v0.7.0
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.uber.org/atomic v1.5.1 // indirect
|
||||
go.uber.org/multierr v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.4.0
|
||||
go.uber.org/zap v1.13.0
|
||||
golang.org/x/mod v0.4.2
|
||||
google.golang.org/grpc v1.29.1
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
gotest.tools/v3 v3.0.2 // indirect
|
||||
helm.sh/helm/v3 v3.3.4
|
||||
|
||||
)
|
||||
|
@@ -23,9 +23,9 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Box interface {
|
||||
@@ -107,7 +107,7 @@ func (b *DefaultBox) Exec() error {
|
||||
|
||||
func (b *DefaultBox) Run() error {
|
||||
|
||||
if !helpers.Exists(b.Root) {
|
||||
if !fileHelper.Exists(b.Root) {
|
||||
return errors.New(b.Root + " does not exist")
|
||||
}
|
||||
|
||||
|
@@ -2,6 +2,7 @@ package bus
|
||||
|
||||
import (
|
||||
"github.com/mudler/go-pluggable"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -44,24 +45,59 @@ var (
|
||||
EventRepositoryPreBuild pluggable.EventType = "repository.pre.build"
|
||||
// EventRepositoryPostBuild is the event fired after a repository was built
|
||||
EventRepositoryPostBuild pluggable.EventType = "repository.post.build"
|
||||
|
||||
// Image unpack
|
||||
|
||||
// EventImagePreUnPack is the event fired before unpacking an image to a local dir
|
||||
EventImagePreUnPack pluggable.EventType = "image.pre.unpack"
|
||||
// EventImagePostUnPack is the event fired after unpacking an image to a local dir
|
||||
EventImagePostUnPack pluggable.EventType = "image.post.unpack"
|
||||
)
|
||||
|
||||
// Manager is the bus instance manager, which subscribes plugins to events emitted by Luet
|
||||
var Manager *pluggable.Manager = pluggable.NewManager(
|
||||
[]pluggable.EventType{
|
||||
EventPackageInstall,
|
||||
EventPackageUnInstall,
|
||||
EventPackagePreBuild,
|
||||
EventPackagePreBuildArtifact,
|
||||
EventPackagePostBuildArtifact,
|
||||
EventPackagePostBuild,
|
||||
EventRepositoryPreBuild,
|
||||
EventRepositoryPostBuild,
|
||||
EventImagePreBuild,
|
||||
EventImagePrePull,
|
||||
EventImagePrePush,
|
||||
EventImagePostBuild,
|
||||
EventImagePostPull,
|
||||
EventImagePostPush,
|
||||
},
|
||||
)
|
||||
var Manager *Bus = &Bus{
|
||||
Manager: pluggable.NewManager(
|
||||
[]pluggable.EventType{
|
||||
EventPackageInstall,
|
||||
EventPackageUnInstall,
|
||||
EventPackagePreBuild,
|
||||
EventPackagePreBuildArtifact,
|
||||
EventPackagePostBuildArtifact,
|
||||
EventPackagePostBuild,
|
||||
EventRepositoryPreBuild,
|
||||
EventRepositoryPostBuild,
|
||||
EventImagePreBuild,
|
||||
EventImagePrePull,
|
||||
EventImagePrePush,
|
||||
EventImagePostBuild,
|
||||
EventImagePostPull,
|
||||
EventImagePostPush,
|
||||
EventImagePreUnPack,
|
||||
EventImagePostUnPack,
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
type Bus struct {
|
||||
*pluggable.Manager
|
||||
}
|
||||
|
||||
func (b *Bus) Initialize(plugin ...string) {
|
||||
b.Manager.Load(plugin...).Register()
|
||||
|
||||
for _, e := range b.Manager.Events {
|
||||
b.Manager.Response(e, func(p *pluggable.Plugin, r *pluggable.EventResponse) {
|
||||
if r.Errored() {
|
||||
logrus.Fatal("Plugin", p.Name, "at", p.Executable, "Error", r.Error)
|
||||
}
|
||||
logrus.Debug(
|
||||
"plugin_event",
|
||||
"received from",
|
||||
p.Name,
|
||||
"at",
|
||||
p.Executable,
|
||||
r,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -1,33 +1,52 @@
|
||||
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package backend
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
)
|
||||
|
||||
func NewBackend(s string) (CompilerBackend, error) {
|
||||
var compilerBackend CompilerBackend
|
||||
|
||||
switch s {
|
||||
case backend.ImgBackend:
|
||||
compilerBackend = backend.NewSimpleImgBackend()
|
||||
case backend.DockerBackend:
|
||||
compilerBackend = backend.NewSimpleDockerBackend()
|
||||
default:
|
||||
return nil, errors.New("invalid backend. Unsupported")
|
||||
}
|
||||
|
||||
return compilerBackend, nil
|
||||
}
|
||||
|
||||
type CompilerBackend interface {
|
||||
BuildImage(backend.Options) error
|
||||
ExportImage(backend.Options) error
|
||||
RemoveImage(backend.Options) error
|
||||
ImageDefinitionToTar(backend.Options) error
|
||||
ExtractRootfs(opts backend.Options, keepPerms bool) error
|
||||
|
||||
CopyImage(string, string) error
|
||||
DownloadImage(opts backend.Options) error
|
||||
|
||||
Push(opts backend.Options) error
|
||||
ImageAvailable(string) bool
|
||||
|
||||
ImageExists(string) bool
|
||||
}
|
||||
|
||||
// GenerateChanges generates changes between two images using a backend by leveraging export/extractrootfs methods
|
||||
// example of json return: [
|
||||
// {
|
||||
@@ -54,46 +73,46 @@ import (
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.CompilerBackendOptions) ([]compiler.ArtifactLayer, error) {
|
||||
func GenerateChanges(b CompilerBackend, fromImage, toImage backend.Options) ([]artifact.ArtifactLayer, error) {
|
||||
|
||||
res := compiler.ArtifactLayer{FromImage: fromImage.ImageName, ToImage: toImage.ImageName}
|
||||
res := artifact.ArtifactLayer{FromImage: fromImage.ImageName, ToImage: toImage.ImageName}
|
||||
|
||||
tmpdiffs, err := config.LuetCfg.GetSystem().TempDir("extraction")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(tmpdiffs) // clean up
|
||||
|
||||
srcRootFS, err := ioutil.TempDir(tmpdiffs, "src")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(srcRootFS) // clean up
|
||||
|
||||
dstRootFS, err := ioutil.TempDir(tmpdiffs, "dst")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(dstRootFS) // clean up
|
||||
|
||||
srcImageExtract := compiler.CompilerBackendOptions{
|
||||
srcImageExtract := backend.Options{
|
||||
ImageName: fromImage.ImageName,
|
||||
Destination: srcRootFS,
|
||||
}
|
||||
Debug("Extracting source image", fromImage.ImageName)
|
||||
err = b.ExtractRootfs(srcImageExtract, false) // No need to keep permissions as we just collect file diffs
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking src image "+fromImage.ImageName)
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking src image "+fromImage.ImageName)
|
||||
}
|
||||
|
||||
dstImageExtract := compiler.CompilerBackendOptions{
|
||||
dstImageExtract := backend.Options{
|
||||
ImageName: toImage.ImageName,
|
||||
Destination: dstRootFS,
|
||||
}
|
||||
Debug("Extracting destination image", toImage.ImageName)
|
||||
err = b.ExtractRootfs(dstImageExtract, false)
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking dst image "+toImage.ImageName)
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking dst image "+toImage.ImageName)
|
||||
}
|
||||
|
||||
// Get Additions/Changes. dst -> src
|
||||
@@ -114,7 +133,7 @@ func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.Com
|
||||
|
||||
if sizeA != sizeB {
|
||||
// fmt.Println("File changed", path, filepath.Join(srcRootFS, realpath))
|
||||
res.Diffs.Changes = append(res.Diffs.Changes, compiler.ArtifactNode{
|
||||
res.Diffs.Changes = append(res.Diffs.Changes, artifact.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
Size: int(sizeB),
|
||||
})
|
||||
@@ -127,7 +146,7 @@ func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.Com
|
||||
if s, err := os.Lstat(filepath.Join(dstRootFS, realpath)); err == nil {
|
||||
sizeB = s.Size()
|
||||
}
|
||||
res.Diffs.Additions = append(res.Diffs.Additions, compiler.ArtifactNode{
|
||||
res.Diffs.Additions = append(res.Diffs.Additions, artifact.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
Size: int(sizeB),
|
||||
})
|
||||
@@ -138,7 +157,7 @@ func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.Com
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image destination")
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image destination")
|
||||
}
|
||||
|
||||
// Get deletions. src -> dst
|
||||
@@ -150,7 +169,7 @@ func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.Com
|
||||
realpath := strings.Replace(path, srcRootFS, "", -1)
|
||||
if _, err = os.Lstat(filepath.Join(dstRootFS, realpath)); err != nil {
|
||||
// fmt.Println("File deleted", path, filepath.Join(srcRootFS, realpath))
|
||||
res.Diffs.Deletions = append(res.Diffs.Deletions, compiler.ArtifactNode{
|
||||
res.Diffs.Deletions = append(res.Diffs.Deletions, artifact.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
})
|
||||
}
|
||||
@@ -158,8 +177,71 @@ func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.Com
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image source")
|
||||
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image source")
|
||||
}
|
||||
|
||||
return []compiler.ArtifactLayer{res}, nil
|
||||
diffs := []artifact.ArtifactLayer{res}
|
||||
|
||||
if config.LuetCfg.GetGeneral().Debug {
|
||||
summary := ComputeArtifactLayerSummary(diffs)
|
||||
for _, l := range summary.Layers {
|
||||
Debug(fmt.Sprintf("Diff %s -> %s: add %d (%d bytes), del %d (%d bytes), change %d (%d bytes)",
|
||||
l.FromImage, l.ToImage,
|
||||
l.AddFiles, l.AddSizes,
|
||||
l.DelFiles, l.DelSizes,
|
||||
l.ChangeFiles, l.ChangeSizes))
|
||||
}
|
||||
}
|
||||
|
||||
return diffs, nil
|
||||
}
|
||||
|
||||
type ArtifactLayerSummary struct {
|
||||
FromImage string `json:"image1"`
|
||||
ToImage string `json:"image2"`
|
||||
AddFiles int `json:"add_files"`
|
||||
AddSizes int64 `json:"add_sizes"`
|
||||
DelFiles int `json:"del_files"`
|
||||
DelSizes int64 `json:"del_sizes"`
|
||||
ChangeFiles int `json:"change_files"`
|
||||
ChangeSizes int64 `json:"change_sizes"`
|
||||
}
|
||||
|
||||
type ArtifactLayersSummary struct {
|
||||
Layers []ArtifactLayerSummary `json:"summary"`
|
||||
}
|
||||
|
||||
func ComputeArtifactLayerSummary(diffs []artifact.ArtifactLayer) ArtifactLayersSummary {
|
||||
|
||||
ans := ArtifactLayersSummary{
|
||||
Layers: make([]ArtifactLayerSummary, 0),
|
||||
}
|
||||
|
||||
for _, layer := range diffs {
|
||||
sum := ArtifactLayerSummary{
|
||||
FromImage: layer.FromImage,
|
||||
ToImage: layer.ToImage,
|
||||
AddFiles: 0,
|
||||
AddSizes: 0,
|
||||
DelFiles: 0,
|
||||
DelSizes: 0,
|
||||
ChangeFiles: 0,
|
||||
ChangeSizes: 0,
|
||||
}
|
||||
for _, a := range layer.Diffs.Additions {
|
||||
sum.AddFiles++
|
||||
sum.AddSizes += int64(a.Size)
|
||||
}
|
||||
for _, d := range layer.Diffs.Deletions {
|
||||
sum.DelFiles++
|
||||
sum.DelSizes += int64(d.Size)
|
||||
}
|
||||
for _, c := range layer.Diffs.Changes {
|
||||
sum.ChangeFiles++
|
||||
sum.ChangeSizes += int64(c.Size)
|
||||
}
|
||||
ans.Layers = append(ans.Layers, sum)
|
||||
}
|
||||
|
||||
return ans
|
||||
}
|
@@ -18,7 +18,6 @@ package backend
|
||||
import (
|
||||
"os/exec"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
@@ -36,16 +35,13 @@ func imageAvailable(image string) bool {
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func NewBackend(s string) compiler.CompilerBackend {
|
||||
var compilerBackend compiler.CompilerBackend
|
||||
|
||||
switch s {
|
||||
case ImgBackend:
|
||||
compilerBackend = NewSimpleImgBackend()
|
||||
case DockerBackend:
|
||||
compilerBackend = NewSimpleDockerBackend()
|
||||
}
|
||||
return compilerBackend
|
||||
type Options struct {
|
||||
ImageName string
|
||||
SourcePath string
|
||||
DockerFileName string
|
||||
Destination string
|
||||
Context string
|
||||
BackendArgs []string
|
||||
}
|
||||
|
||||
func runCommand(cmd *exec.Cmd) error {
|
||||
@@ -75,7 +71,7 @@ func runCommand(cmd *exec.Cmd) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func genBuildCommand(opts compiler.CompilerBackendOptions) []string {
|
||||
func genBuildCommand(opts Options) []string {
|
||||
context := opts.Context
|
||||
|
||||
if context == "" {
|
||||
|
@@ -17,18 +17,17 @@ package backend
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
docker "github.com/fsouza/go-dockerclient"
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
capi "github.com/mudler/docker-companion/api"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
@@ -37,13 +36,14 @@ import (
|
||||
|
||||
type SimpleDocker struct{}
|
||||
|
||||
func NewSimpleDockerBackend() compiler.CompilerBackend {
|
||||
func NewSimpleDockerBackend() *SimpleDocker {
|
||||
return &SimpleDocker{}
|
||||
}
|
||||
|
||||
// TODO: Missing still: labels, and build args expansion
|
||||
func (*SimpleDocker) BuildImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) BuildImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
bus.Manager.Publish(bus.EventImagePreBuild, opts)
|
||||
|
||||
buildarg := genBuildCommand(opts)
|
||||
Info(":whale2: Building image " + name)
|
||||
@@ -56,23 +56,7 @@ func (*SimpleDocker) BuildImage(opts compiler.CompilerBackendOptions) error {
|
||||
|
||||
Info(":whale: Building image " + name + " done")
|
||||
|
||||
if os.Getenv("DOCKER_SQUASH") == "true" {
|
||||
Info(":whale: Squashing image " + name)
|
||||
var client *docker.Client
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
client, err = docker.NewClientFromEnv()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not connect to the Docker daemon")
|
||||
}
|
||||
err = capi.Squash(client, name, name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed squashing image")
|
||||
}
|
||||
|
||||
Info(":whale: Squashing image " + name + " done")
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostBuild, opts)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -88,8 +72,10 @@ func (*SimpleDocker) CopyImage(src, dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleDocker) DownloadImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) DownloadImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
bus.Manager.Publish(bus.EventImagePrePull, opts)
|
||||
|
||||
buildarg := []string{"pull", name}
|
||||
Debug(":whale: Downloading image " + name)
|
||||
|
||||
@@ -103,6 +89,8 @@ func (*SimpleDocker) DownloadImage(opts compiler.CompilerBackendOptions) error {
|
||||
}
|
||||
|
||||
Info(":whale: Downloaded image:", name)
|
||||
bus.Manager.Publish(bus.EventImagePostPull, opts)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -123,7 +111,7 @@ func (*SimpleDocker) ImageAvailable(imagename string) bool {
|
||||
return imageAvailable(imagename)
|
||||
}
|
||||
|
||||
func (*SimpleDocker) RemoveImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) RemoveImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
buildarg := []string{"rmi", name}
|
||||
out, err := exec.Command("docker", buildarg...).CombinedOutput()
|
||||
@@ -135,9 +123,10 @@ func (*SimpleDocker) RemoveImage(opts compiler.CompilerBackendOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleDocker) Push(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) Push(opts Options) error {
|
||||
name := opts.ImageName
|
||||
pusharg := []string{"push", name}
|
||||
bus.Manager.Publish(bus.EventImagePrePush, opts)
|
||||
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
@@ -147,11 +136,13 @@ func (*SimpleDocker) Push(opts compiler.CompilerBackendOptions) error {
|
||||
return errors.Wrap(err, "Failed pushing image: "+string(out))
|
||||
}
|
||||
Info(":whale: Pushed image:", name)
|
||||
bus.Manager.Publish(bus.EventImagePostPush, opts)
|
||||
|
||||
//Info(string(out))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SimpleDocker) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) error {
|
||||
func (s *SimpleDocker) ImageDefinitionToTar(opts Options) error {
|
||||
if err := s.BuildImage(opts); err != nil {
|
||||
return errors.Wrap(err, "Failed building image")
|
||||
}
|
||||
@@ -164,7 +155,7 @@ func (s *SimpleDocker) ImageDefinitionToTar(opts compiler.CompilerBackendOptions
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleDocker) ExportImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleDocker) ExportImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
path := opts.Destination
|
||||
|
||||
@@ -187,10 +178,16 @@ type ManifestEntry struct {
|
||||
Layers []string `json:"Layers"`
|
||||
}
|
||||
|
||||
func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
|
||||
func (b *SimpleDocker) ExtractRootfs(opts Options, keepPerms bool) error {
|
||||
name := opts.ImageName
|
||||
dst := opts.Destination
|
||||
|
||||
if !b.ImageExists(name) {
|
||||
if err := b.DownloadImage(opts); err != nil {
|
||||
return errors.Wrap(err, "failed pulling image "+name+" during extraction")
|
||||
}
|
||||
}
|
||||
|
||||
tempexport, err := ioutil.TempDir(dst, "tmprootfs")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
@@ -202,7 +199,7 @@ func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepP
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
if err := b.ExportImage(compiler.CompilerBackendOptions{ImageName: name, Destination: imageExport}); err != nil {
|
||||
if err := b.ExportImage(Options{ImageName: name, Destination: imageExport}); err != nil {
|
||||
return errors.Wrap(err, "failed while extracting rootfs for "+name)
|
||||
}
|
||||
|
||||
@@ -217,7 +214,7 @@ func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepP
|
||||
}
|
||||
defer os.RemoveAll(tempUnpack) // clean up
|
||||
imageExport := filepath.Join(tempUnpack, "image.tar")
|
||||
if err := b.ExportImage(compiler.CompilerBackendOptions{ImageName: opts.ImageName, Destination: imageExport}); err != nil {
|
||||
if err := b.ExportImage(Options{ImageName: opts.ImageName, Destination: imageExport}); err != nil {
|
||||
return errors.Wrap(err, "while exporting image before extraction")
|
||||
}
|
||||
src = imageExport
|
||||
@@ -241,7 +238,7 @@ func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepP
|
||||
return errors.Wrap(err, "Error met while unpacking rootfs")
|
||||
}
|
||||
|
||||
manifest, err := helpers.Read(filepath.Join(rootfs, "manifest.json"))
|
||||
manifest, err := fileHelper.Read(filepath.Join(rootfs, "manifest.json"))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while reading image manifest")
|
||||
}
|
||||
@@ -269,7 +266,7 @@ func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepP
|
||||
return err
|
||||
}
|
||||
|
||||
err = export.UnPackLayers(layers_sha, dst, "")
|
||||
err = export.UnPackLayers(layers_sha, dst, "containerd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -281,21 +278,3 @@ func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepP
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Changes retrieves changes between image layers
|
||||
func (d *SimpleDocker) Changes(fromImage, toImage compiler.CompilerBackendOptions) ([]compiler.ArtifactLayer, error) {
|
||||
diffs, err := GenerateChanges(d, fromImage, toImage)
|
||||
|
||||
if config.LuetCfg.GetGeneral().Debug {
|
||||
summary := compiler.ComputeArtifactLayerSummary(diffs)
|
||||
for _, l := range summary.Layers {
|
||||
Debug(fmt.Sprintf("Diff %s -> %s: add %d (%d bytes), del %d (%d bytes), change %d (%d bytes)",
|
||||
l.FromImage, l.ToImage,
|
||||
l.AddFiles, l.AddSizes,
|
||||
l.DelFiles, l.DelSizes,
|
||||
l.ChangeFiles, l.ChangeSizes))
|
||||
}
|
||||
}
|
||||
|
||||
return diffs, err
|
||||
}
|
||||
|
@@ -16,15 +16,17 @@
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
. "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -41,13 +43,10 @@ var _ = Describe("Docker backend", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
cc := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
lspec, err := cc.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
lspec, ok := spec.(*LuetCompilationSpec)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
Expect(lspec.Image).To(Equal("luet/base"))
|
||||
Expect(lspec.Seed).To(Equal("alpine"))
|
||||
@@ -61,7 +60,7 @@ var _ = Describe("Docker backend", func() {
|
||||
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -71,7 +70,7 @@ ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin`))
|
||||
b := NewSimpleDockerBackend()
|
||||
opts := CompilerBackendOptions{
|
||||
opts := backend.Options{
|
||||
ImageName: "luet/base",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "Dockerfile",
|
||||
@@ -80,11 +79,11 @@ ENV PACKAGE_CATEGORY=app-admin`))
|
||||
|
||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM luet/base
|
||||
@@ -95,7 +94,7 @@ ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin
|
||||
RUN echo foo > /test
|
||||
RUN echo bar > /test2`))
|
||||
opts2 := CompilerBackendOptions{
|
||||
opts2 := backend.Options{
|
||||
ImageName: "test",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "LuetDockerfile",
|
||||
@@ -104,28 +103,28 @@ RUN echo bar > /test2`))
|
||||
|
||||
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
|
||||
artifacts := []ArtifactNode{{
|
||||
artifacts := []artifact.ArtifactNode{{
|
||||
Name: "/luetbuild/LuetDockerfile",
|
||||
Size: 175,
|
||||
}}
|
||||
if os.Getenv("DOCKER_BUILDKIT") == "1" {
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/etc/resolv.conf", Size: 0})
|
||||
artifacts = append(artifacts, artifact.ArtifactNode{Name: "/etc/resolv.conf", Size: 0})
|
||||
}
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/test", Size: 4})
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/test2", Size: 4})
|
||||
artifacts = append(artifacts, artifact.ArtifactNode{Name: "/test", Size: 4})
|
||||
artifacts = append(artifacts, artifact.ArtifactNode{Name: "/test2", Size: 4})
|
||||
|
||||
Expect(b.Changes(opts, opts2)).To(Equal(
|
||||
[]ArtifactLayer{{
|
||||
Expect(compiler.GenerateChanges(b, opts, opts2)).To(Equal(
|
||||
[]artifact.ArtifactLayer{{
|
||||
FromImage: "luet/base",
|
||||
ToImage: "test",
|
||||
Diffs: ArtifactDiffs{
|
||||
Diffs: artifact.ArtifactDiffs{
|
||||
Additions: artifacts,
|
||||
},
|
||||
}}))
|
||||
|
||||
opts2 = CompilerBackendOptions{
|
||||
opts2 = backend.Options{
|
||||
ImageName: "test",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "LuetDockerfile",
|
||||
@@ -133,7 +132,7 @@ RUN echo bar > /test2`))
|
||||
}
|
||||
|
||||
Expect(b.ImageDefinitionToTar(opts2)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output3.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir, "output3.tar"))).To(BeTrue())
|
||||
Expect(b.ImageExists(opts2.ImageName)).To(BeFalse())
|
||||
})
|
||||
|
||||
|
@@ -20,7 +20,8 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -28,13 +29,14 @@ import (
|
||||
|
||||
type SimpleImg struct{}
|
||||
|
||||
func NewSimpleImgBackend() compiler.CompilerBackend {
|
||||
func NewSimpleImgBackend() *SimpleImg {
|
||||
return &SimpleImg{}
|
||||
}
|
||||
|
||||
// TODO: Missing still: labels, and build args expansion
|
||||
func (*SimpleImg) BuildImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleImg) BuildImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
bus.Manager.Publish(bus.EventImagePreBuild, opts)
|
||||
|
||||
buildarg := genBuildCommand(opts)
|
||||
|
||||
@@ -46,13 +48,14 @@ func (*SimpleImg) BuildImage(opts compiler.CompilerBackendOptions) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostBuild, opts)
|
||||
|
||||
Info(":tea: Building image " + name + " done")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleImg) RemoveImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleImg) RemoveImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
buildarg := []string{"rm", name}
|
||||
Spinner(22)
|
||||
@@ -66,8 +69,10 @@ func (*SimpleImg) RemoveImage(opts compiler.CompilerBackendOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleImg) DownloadImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleImg) DownloadImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
bus.Manager.Publish(bus.EventImagePrePull, opts)
|
||||
|
||||
buildarg := []string{"pull", name}
|
||||
Debug(":tea: Downloading image " + name)
|
||||
|
||||
@@ -81,6 +86,7 @@ func (*SimpleImg) DownloadImage(opts compiler.CompilerBackendOptions) error {
|
||||
}
|
||||
|
||||
Info(":tea: Image " + name + " downloaded")
|
||||
bus.Manager.Publish(bus.EventImagePostPull, opts)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -116,7 +122,7 @@ func (*SimpleImg) ImageExists(imagename string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *SimpleImg) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) error {
|
||||
func (s *SimpleImg) ImageDefinitionToTar(opts Options) error {
|
||||
if err := s.BuildImage(opts); err != nil {
|
||||
return errors.Wrap(err, "Failed building image")
|
||||
}
|
||||
@@ -129,7 +135,7 @@ func (s *SimpleImg) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*SimpleImg) ExportImage(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleImg) ExportImage(opts Options) error {
|
||||
name := opts.ImageName
|
||||
path := opts.Destination
|
||||
buildarg := []string{"save", "-o", path, name}
|
||||
@@ -147,7 +153,7 @@ func (*SimpleImg) ExportImage(opts compiler.CompilerBackendOptions) error {
|
||||
}
|
||||
|
||||
// ExtractRootfs extracts the docker image content inside the destination
|
||||
func (s *SimpleImg) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
|
||||
func (s *SimpleImg) ExtractRootfs(opts Options, keepPerms bool) error {
|
||||
name := opts.ImageName
|
||||
path := opts.Destination
|
||||
|
||||
@@ -173,20 +179,18 @@ func (s *SimpleImg) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerm
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Use container-diff (https://github.com/GoogleContainerTools/container-diff) for checking out layer diffs
|
||||
// Changes uses container-diff (https://github.com/GoogleContainerTools/container-diff) for retrieving out layer diffs
|
||||
func (i *SimpleImg) Changes(fromImage, toImage compiler.CompilerBackendOptions) ([]compiler.ArtifactLayer, error) {
|
||||
return GenerateChanges(i, fromImage, toImage)
|
||||
}
|
||||
|
||||
func (*SimpleImg) Push(opts compiler.CompilerBackendOptions) error {
|
||||
func (*SimpleImg) Push(opts Options) error {
|
||||
name := opts.ImageName
|
||||
bus.Manager.Publish(bus.EventImagePrePush, opts)
|
||||
|
||||
pusharg := []string{"push", name}
|
||||
out, err := exec.Command("img", pusharg...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed pushing image: "+string(out))
|
||||
}
|
||||
Info(":tea: Pushed image:", name)
|
||||
bus.Manager.Publish(bus.EventImagePostPush, opts)
|
||||
|
||||
//Info(string(out))
|
||||
return nil
|
||||
}
|
||||
|
@@ -13,10 +13,10 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package backend_test
|
||||
package compiler_test
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler/backend"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
var _ = Describe("Docker image diffs", func() {
|
||||
var b compiler.CompilerBackend
|
||||
var b CompilerBackend
|
||||
|
||||
BeforeEach(func() {
|
||||
b = NewSimpleDockerBackend()
|
||||
@@ -32,7 +32,7 @@ var _ = Describe("Docker image diffs", func() {
|
||||
|
||||
Context("Generate diffs from docker images", func() {
|
||||
It("Detect no changes", func() {
|
||||
opts := compiler.CompilerBackendOptions{
|
||||
opts := Options{
|
||||
ImageName: "alpine:latest",
|
||||
}
|
||||
err := b.DownloadImage(opts)
|
||||
@@ -47,18 +47,18 @@ var _ = Describe("Docker image diffs", func() {
|
||||
})
|
||||
|
||||
It("Detects additions and changed files", func() {
|
||||
err := b.DownloadImage(compiler.CompilerBackendOptions{
|
||||
err := b.DownloadImage(Options{
|
||||
ImageName: "quay.io/mocaccino/micro",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = b.DownloadImage(compiler.CompilerBackendOptions{
|
||||
err = b.DownloadImage(Options{
|
||||
ImageName: "quay.io/mocaccino/extra",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
layers, err := GenerateChanges(b, compiler.CompilerBackendOptions{
|
||||
layers, err := GenerateChanges(b, Options{
|
||||
ImageName: "quay.io/mocaccino/micro",
|
||||
}, compiler.CompilerBackendOptions{
|
||||
}, Options{
|
||||
ImageName: "quay.io/mocaccino/extra",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
File diff suppressed because it is too large
Load Diff
@@ -21,9 +21,12 @@ import (
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
sd "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -39,7 +42,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -54,19 +57,18 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(2)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
@@ -74,6 +76,68 @@ var _ = Describe("Compiler", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Context("Copy and Join", func() {
|
||||
It("Compiles it correctly with Copy", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/copy")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.2"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(spec.Rel("result"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("bina/busybox"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles it correctly with Join", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/join")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.2"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(spec.Rel("newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Simple package build definition", func() {
|
||||
It("Compiles it in parallel", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
@@ -83,7 +147,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(1))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -98,12 +162,11 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(2)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec, spec2))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2))
|
||||
Expect(errs).To(BeNil())
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
})
|
||||
@@ -118,7 +181,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/templates")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
pkg, err := generalRecipe.GetDatabase().FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
@@ -142,7 +205,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -159,30 +222,29 @@ var _ = Describe("Compiler", func() {
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(2)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(3))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("c"))
|
||||
content1, err := fileHelper.Read(spec.Rel("c"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("cd"))
|
||||
content2, err := fileHelper.Read(spec.Rel("cd"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("c\n"))
|
||||
Expect(content2).To(Equal("c\n"))
|
||||
|
||||
content1, err = helpers.Read(spec.Rel("d"))
|
||||
content1, err = fileHelper.Read(spec.Rel("d"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err = helpers.Read(spec.Rel("dd"))
|
||||
content2, err = fileHelper.Read(spec.Rel("dd"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("s\n"))
|
||||
Expect(content2).To(Equal("dd\n"))
|
||||
@@ -199,7 +261,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(1))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "extra", Category: "layer", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -207,27 +269,26 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
artifacts2, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec2))
|
||||
artifacts2, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec2))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts2)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, artifact := range artifacts2 {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("etc/hosts"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test1"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("etc/hosts"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test1"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and includes ony wanted files", func() {
|
||||
@@ -241,7 +302,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -250,19 +311,18 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and excludes files", func() {
|
||||
@@ -276,7 +336,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -285,20 +345,19 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles includes and excludes files", func() {
|
||||
@@ -312,7 +371,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -321,20 +380,19 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and excludes ony wanted files also from unpacked packages", func() {
|
||||
@@ -348,7 +406,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -357,18 +415,17 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles includes and excludes ony wanted files also from unpacked packages", func() {
|
||||
@@ -382,7 +439,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -391,18 +448,17 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and includes ony wanted files also from unpacked packages", func() {
|
||||
@@ -416,7 +472,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -425,22 +481,21 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("var/lib/udhcpd"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test2"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("lib/firmware"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("var/lib/udhcpd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test2"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("lib/firmware"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles a more complex tree", func() {
|
||||
@@ -454,7 +509,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "pkgs-checker", Category: "package", Version: "9999"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -463,25 +518,24 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("extra-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("base-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("base-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles with provides support", func() {
|
||||
@@ -495,7 +549,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -504,27 +558,26 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("c-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("d"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("dd"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("c"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("cd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("d"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("dd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("c"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("cd"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles with provides and selectors support", func() {
|
||||
@@ -539,7 +592,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -548,27 +601,26 @@ var _ = Describe("Compiler", func() {
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("c-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("d"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("dd"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("c"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("cd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("d"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("dd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("c"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("cd"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
})
|
||||
It("Compiles revdeps", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
@@ -581,7 +633,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "extra", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -591,21 +643,21 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifacts, errs := compiler.CompileWithReverseDeps(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileWithReverseDeps(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(2))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("extra-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles complex dependencies trees with best matches", func() {
|
||||
@@ -619,7 +671,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(10))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "vhba", Category: "sys-fs-5.4.2", Version: "20190410"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -629,22 +681,22 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(6))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(6))
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("vhba-sys-fs-5.4.2-20190410.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("sabayon-build-portage-layer-0.20191126.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("build-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("build-sabayon-overlay-layer-0.20191212.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("build-sabayon-overlays-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("linux-sabayon-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("sabayon-sources-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("vhba"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("sabayon-build-portage-layer-0.20191126.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("build-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("build-sabayon-overlay-layer-0.20191212.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("build-sabayon-overlays-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("linux-sabayon-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("sabayon-sources-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("vhba"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles revdeps with seeds", func() {
|
||||
@@ -658,42 +710,42 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifacts, errs := compiler.CompileWithReverseDeps(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileWithReverseDeps(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(4))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
// A deps on B, so A artifacts are here:
|
||||
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
|
||||
// B
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("artifact42"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("artifact42"))).To(BeTrue())
|
||||
|
||||
// C depends on B, so B is here
|
||||
content1, err := helpers.Read(spec.Rel("c"))
|
||||
content1, err := fileHelper.Read(spec.Rel("c"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("cd"))
|
||||
content2, err := fileHelper.Read(spec.Rel("cd"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("c\n"))
|
||||
Expect(content2).To(Equal("c\n"))
|
||||
|
||||
// D is here as it requires C, and C was recompiled
|
||||
content1, err = helpers.Read(spec.Rel("d"))
|
||||
content1, err = fileHelper.Read(spec.Rel("d"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err = helpers.Read(spec.Rel("dd"))
|
||||
content2, err = fileHelper.Read(spec.Rel("dd"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("s\n"))
|
||||
Expect(content2).To(Equal("dd\n"))
|
||||
@@ -710,7 +762,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -722,24 +774,23 @@ var _ = Describe("Compiler", func() {
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(2)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
for _, d := range artifact.GetDependencies() {
|
||||
Expect(helpers.Exists(d.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(d.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
for _, d := range artifact.Dependencies {
|
||||
Expect(fileHelper.Exists(d.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(d.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
})
|
||||
})
|
||||
@@ -753,7 +804,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -765,15 +816,14 @@ var _ = Describe("Compiler", func() {
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
Expect(helpers.Untar(spec.Rel("runtime-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -786,7 +836,7 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{
|
||||
Name: "dironly",
|
||||
@@ -814,21 +864,19 @@ var _ = Describe("Compiler", func() {
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir2)
|
||||
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec, spec2))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(2))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(0))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(0))
|
||||
|
||||
Expect(helpers.Untar(spec.Rel("dironly-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("test1"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test2"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test1"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test2"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Untar(spec2.Rel("dironly_filter-test-1.0.package.tar"), tmpdir2, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec2.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec2.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec2.Rel("artifact42"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("artifact42"))).ToNot(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -841,11 +889,11 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler.SetCompressionType(GZip)
|
||||
compiler.Options.CompressionType = compression.GZip
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
@@ -853,18 +901,17 @@ var _ = Describe("Compiler", func() {
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.package.tar"))).To(BeFalse())
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
Expect(fileHelper.Exists(spec.Rel("runtime-layer-0.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("runtime-layer-0.1.package.tar"))).To(BeFalse())
|
||||
Expect(artifacts[0].Unpack(tmpdir, false)).ToNot(HaveOccurred())
|
||||
// Expect(helpers.Untar(spec.Rel("runtime-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -877,7 +924,7 @@ var _ = Describe("Compiler", func() {
|
||||
err := generalRecipe.Load("../../tests/fixtures/includeimage")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
specs, err := compiler.FromDatabase(generalRecipe.GetDatabase(), true, "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -896,11 +943,11 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler.SetCompressionType(GZip)
|
||||
compiler.Options.CompressionType = compression.GZip
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
@@ -908,20 +955,19 @@ var _ = Describe("Compiler", func() {
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(artifacts[0].GetFiles()).To(ContainElement("bin/busybox"))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
Expect(artifacts[0].Files).To(ContainElement("bin/busybox"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("runtime-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
art, err := LoadArtifactFromYaml(spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
files := art.GetFiles()
|
||||
files := art.Files
|
||||
Expect(files).To(ContainElement("bin/busybox"))
|
||||
})
|
||||
})
|
||||
|
161
pkg/compiler/imagehashtree.go
Normal file
161
pkg/compiler/imagehashtree.go
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ImageHashTree is holding the Database
|
||||
// and the options to resolve PackageImageHashTrees
|
||||
// for a given specfile
|
||||
// It is responsible of returning a concrete result
|
||||
// which identifies a Package in a HashTree
|
||||
type ImageHashTree struct {
|
||||
Database pkg.PackageDatabase
|
||||
SolverOptions config.LuetSolverOptions
|
||||
}
|
||||
|
||||
// PackageImageHashTree represent the Package into a given image hash tree
|
||||
// The hash tree is constructed by a set of images representing
|
||||
// the package during its build stage. A Hash is assigned to each image
|
||||
// from the package fingerprint, plus the SAT solver assertion result (which is hashed as well)
|
||||
// and the specfile signatures. This guarantees that each image of the build stage
|
||||
// is unique and can be identified later on.
|
||||
type PackageImageHashTree struct {
|
||||
Target *solver.PackageAssert
|
||||
Dependencies solver.PackagesAssertions
|
||||
Solution solver.PackagesAssertions
|
||||
dependencyBuilderImageHashes map[string]string
|
||||
SourceHash string
|
||||
BuilderImageHash string
|
||||
}
|
||||
|
||||
func NewHashTree(db pkg.PackageDatabase) *ImageHashTree {
|
||||
return &ImageHashTree{
|
||||
Database: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *PackageImageHashTree) DependencyBuildImage(p pkg.Package) (string, error) {
|
||||
found, ok := ht.dependencyBuilderImageHashes[p.GetFingerPrint()]
|
||||
if !ok {
|
||||
return "", errors.New("package hash not found")
|
||||
}
|
||||
return found, nil
|
||||
}
|
||||
|
||||
func (ht *PackageImageHashTree) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Target buildhash: %s\nTarget packagehash: %s\nBuilder Imagehash: %s\nSource Imagehash: %s\n",
|
||||
ht.Target.Hash.BuildHash,
|
||||
ht.Target.Hash.PackageHash,
|
||||
ht.BuilderImageHash,
|
||||
ht.SourceHash,
|
||||
)
|
||||
}
|
||||
|
||||
// Query takes a compiler and a compilation spec and returns a PackageImageHashTree tied to it.
|
||||
// PackageImageHashTree contains all the informations to resolve the spec build images in order to
|
||||
// reproducibly re-build images from packages
|
||||
func (ht *ImageHashTree) Query(cs *LuetCompiler, p *compilerspec.LuetCompilationSpec) (*PackageImageHashTree, error) {
|
||||
assertions, err := ht.resolve(cs, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetAssertion := assertions.Search(p.GetPackage().GetFingerPrint())
|
||||
|
||||
dependencies := assertions.Drop(p.GetPackage())
|
||||
var sourceHash string
|
||||
imageHashes := map[string]string{}
|
||||
for _, assertion := range dependencies {
|
||||
var depbuildImageTag string
|
||||
compileSpec, err := cs.FromPackage(assertion.Package)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while generating compilespec for "+assertion.Package.GetName())
|
||||
}
|
||||
if compileSpec.GetImage() != "" {
|
||||
depbuildImageTag = assertion.Hash.BuildHash
|
||||
} else {
|
||||
depbuildImageTag = ht.genBuilderImageTag(compileSpec, targetAssertion.Hash.PackageHash)
|
||||
}
|
||||
imageHashes[assertion.Package.GetFingerPrint()] = depbuildImageTag
|
||||
sourceHash = assertion.Hash.PackageHash
|
||||
}
|
||||
|
||||
return &PackageImageHashTree{
|
||||
Dependencies: dependencies,
|
||||
Target: targetAssertion,
|
||||
SourceHash: sourceHash,
|
||||
BuilderImageHash: ht.genBuilderImageTag(p, targetAssertion.Hash.PackageHash),
|
||||
dependencyBuilderImageHashes: imageHashes,
|
||||
Solution: assertions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ht *ImageHashTree) genBuilderImageTag(p *compilerspec.LuetCompilationSpec, packageImage string) string {
|
||||
// Use packageImage as salt into the fp being used
|
||||
// so the hash is unique also in cases where
|
||||
// some package deps does have completely different
|
||||
// depgraphs
|
||||
return fmt.Sprintf("builder-%s", p.GetPackage().HashFingerprint(packageImage))
|
||||
}
|
||||
|
||||
// resolve computes the dependency tree of a compilation spec and returns solver assertions
|
||||
// in order to be able to compile the spec.
|
||||
func (ht *ImageHashTree) resolve(cs *LuetCompiler, p *compilerspec.LuetCompilationSpec) (solver.PackagesAssertions, error) {
|
||||
dependencies, err := cs.ComputeDepTree(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().HumanReadableString())
|
||||
}
|
||||
|
||||
// Get hash from buildpsecs
|
||||
salts := map[string]string{}
|
||||
for _, assertion := range dependencies { //highly dependent on the order
|
||||
if assertion.Value {
|
||||
spec, err := cs.FromPackage(assertion.Package)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "while computing hash buildspecs")
|
||||
}
|
||||
hash, err := spec.Hash()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed computing hash")
|
||||
}
|
||||
salts[assertion.Package.GetFingerPrint()] = hash
|
||||
}
|
||||
}
|
||||
|
||||
assertions := solver.PackagesAssertions{}
|
||||
for _, assertion := range dependencies { //highly dependent on the order
|
||||
if assertion.Value {
|
||||
nthsolution := dependencies.Cut(assertion.Package)
|
||||
assertion.Hash = solver.PackageHash{
|
||||
BuildHash: nthsolution.SaltedHashFrom(assertion.Package, salts),
|
||||
PackageHash: nthsolution.SaltedAssertionHash(salts),
|
||||
}
|
||||
assertion.Package.SetTreeDir(p.Package.GetTreeDir())
|
||||
assertions = append(assertions, assertion)
|
||||
}
|
||||
}
|
||||
|
||||
return assertions, nil
|
||||
}
|
150
pkg/compiler/imagehashtree_test.go
Normal file
150
pkg/compiler/imagehashtree_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler_test
|
||||
|
||||
import (
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
sd "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ImageHashTree", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
hashtree := NewHashTree(generalRecipe.GetDatabase())
|
||||
Context("Simple package definition", func() {
|
||||
BeforeEach(func() {
|
||||
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
hashtree = NewHashTree(generalRecipe.GetDatabase())
|
||||
|
||||
})
|
||||
|
||||
It("Calculates the hash correctly", func() {
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
packageHash, err := hashtree.Query(compiler, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(packageHash.Target.Hash.BuildHash).To(Equal("53993e5a02da4c21ad845371c872f5836fe45ff3a4e3c5ccb6296d0faee2b107"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("a786d3fd29d0b8bdfe5f304c8bf8be909d5c764cd7059c0e63294a8bff17f3ef"))
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-0cd3c0d07fc9be568377b3bf1b699e06"))
|
||||
})
|
||||
})
|
||||
|
||||
expectedPackageHash := "0d568ac04c4ca528a4e5b67978f2ad3a75d31d443ab20f9d7683b9608cc0d494"
|
||||
|
||||
Context("complex package definition", func() {
|
||||
BeforeEach(func() {
|
||||
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
hashtree = NewHashTree(generalRecipe.GetDatabase())
|
||||
|
||||
})
|
||||
It("Calculates the hash correctly", func() {
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
packageHash, err := hashtree.Query(compiler, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(expectedPackageHash))
|
||||
Expect(packageHash.SourceHash).To(Equal(expectedPackageHash))
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-0f45c345f59103e84fc8bebbf02f2e2b"))
|
||||
|
||||
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("2e8159583ac825acada763358290cfbea919a33873a926cab84f4f1a67ecf111"))
|
||||
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
|
||||
hash, err := packageHash.DependencyBuildImage(a)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
|
||||
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
|
||||
Expect(assertionA.Hash.PackageHash).To(Equal(expectedPackageHash))
|
||||
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
|
||||
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
|
||||
|
||||
Expect(assertionB.Hash.PackageHash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
hashB, err := packageHash.DependencyBuildImage(b)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hashB).To(Equal("315075265aeb2e3c04c5428d31911f53c194ec9fa3db1421e8478f44b1e0def8"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("complex package definition, with small change in build.yaml", func() {
|
||||
BeforeEach(func() {
|
||||
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
//Definition of A here is slightly changed in the steps build.yaml file (1 character only)
|
||||
err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision_content_changed")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
hashtree = NewHashTree(generalRecipe.GetDatabase())
|
||||
|
||||
})
|
||||
It("Calculates the hash correctly", func() {
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
packageHash, err := hashtree.Query(compiler, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).ToNot(Equal(expectedPackageHash))
|
||||
sourceHash := "66ec001fe72052d0e605ca96f607ae39ea4f8b53f0b7f762e377622d9c654de3"
|
||||
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(sourceHash))
|
||||
Expect(packageHash.SourceHash).To(Equal(sourceHash))
|
||||
|
||||
Expect(packageHash.SourceHash).ToNot(Equal(expectedPackageHash))
|
||||
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-ffc02fd8aaa916d0e17249885b3226b1"))
|
||||
|
||||
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("b9c0286ebf6d28be831926ec7da9cb3cda6b489722d656aefc363ebd7173f937"))
|
||||
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
|
||||
hash, err := packageHash.DependencyBuildImage(a)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
|
||||
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
|
||||
|
||||
Expect(assertionA.Hash.PackageHash).To(Equal("66ec001fe72052d0e605ca96f607ae39ea4f8b53f0b7f762e377622d9c654de3"))
|
||||
Expect(assertionA.Hash.PackageHash).ToNot(Equal(expectedPackageHash))
|
||||
|
||||
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
|
||||
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
|
||||
|
||||
Expect(assertionB.Hash.PackageHash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
hashB, err := packageHash.DependencyBuildImage(b)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hashB).To(Equal("315075265aeb2e3c04c5428d31911f53c194ec9fa3db1421e8478f44b1e0def8"))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
@@ -1,201 +0,0 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
)
|
||||
|
||||
type Compiler interface {
|
||||
Compile(bool, CompilationSpec) (Artifact, error)
|
||||
CompileParallel(keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error)
|
||||
CompileWithReverseDeps(keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error)
|
||||
ComputeDepTree(p CompilationSpec) (solver.PackagesAssertions, error)
|
||||
ComputeMinimumCompilableSet(p ...CompilationSpec) ([]CompilationSpec, error)
|
||||
SetConcurrency(i int)
|
||||
FromPackage(pkg.Package) (CompilationSpec, error)
|
||||
FromDatabase(db pkg.PackageDatabase, minimum bool, dst string) ([]CompilationSpec, error)
|
||||
SetBackend(CompilerBackend)
|
||||
GetBackend() CompilerBackend
|
||||
|
||||
SetBackendArgs([]string)
|
||||
SetCompressionType(t CompressionImplementation)
|
||||
}
|
||||
|
||||
type CompilerBackendOptions struct {
|
||||
ImageName string
|
||||
SourcePath string
|
||||
DockerFileName string
|
||||
Destination string
|
||||
Context string
|
||||
BackendArgs []string
|
||||
}
|
||||
|
||||
type CompilerOptions struct {
|
||||
ImageRepository string
|
||||
PullFirst, KeepImg, Push bool
|
||||
Concurrency int
|
||||
CompressionType CompressionImplementation
|
||||
|
||||
Wait bool
|
||||
OnlyDeps bool
|
||||
NoDeps bool
|
||||
SolverOptions config.LuetSolverOptions
|
||||
BuildValuesFile string
|
||||
|
||||
PackageTargetOnly bool
|
||||
}
|
||||
|
||||
func NewDefaultCompilerOptions() *CompilerOptions {
|
||||
return &CompilerOptions{
|
||||
ImageRepository: "luet/cache",
|
||||
PullFirst: false,
|
||||
Push: false,
|
||||
CompressionType: None,
|
||||
KeepImg: true,
|
||||
Concurrency: runtime.NumCPU(),
|
||||
OnlyDeps: false,
|
||||
NoDeps: false,
|
||||
}
|
||||
}
|
||||
|
||||
type CompilerBackend interface {
|
||||
BuildImage(CompilerBackendOptions) error
|
||||
ExportImage(CompilerBackendOptions) error
|
||||
RemoveImage(CompilerBackendOptions) error
|
||||
Changes(fromImage, toImage CompilerBackendOptions) ([]ArtifactLayer, error)
|
||||
ImageDefinitionToTar(CompilerBackendOptions) error
|
||||
ExtractRootfs(opts CompilerBackendOptions, keepPerms bool) error
|
||||
|
||||
CopyImage(string, string) error
|
||||
DownloadImage(opts CompilerBackendOptions) error
|
||||
|
||||
Push(opts CompilerBackendOptions) error
|
||||
ImageAvailable(string) bool
|
||||
|
||||
ImageExists(string) bool
|
||||
}
|
||||
|
||||
type Artifact interface {
|
||||
GetPath() string
|
||||
SetPath(string)
|
||||
GetDependencies() []Artifact
|
||||
SetDependencies(d []Artifact)
|
||||
GetSourceAssertion() solver.PackagesAssertions
|
||||
SetSourceAssertion(as solver.PackagesAssertions)
|
||||
|
||||
SetCompileSpec(as CompilationSpec)
|
||||
GetCompileSpec() CompilationSpec
|
||||
WriteYaml(dst string) error
|
||||
Unpack(dst string, keepPerms bool) error
|
||||
Compress(src string, concurrency int) error
|
||||
SetCompressionType(t CompressionImplementation)
|
||||
FileList() ([]string, error)
|
||||
Hash() error
|
||||
Verify() error
|
||||
|
||||
SetFiles(f []string)
|
||||
GetFiles() []string
|
||||
GetFileName() string
|
||||
|
||||
GetChecksums() Checksums
|
||||
SetChecksums(c Checksums)
|
||||
|
||||
GenerateFinalImage(string, CompilerBackend, bool) (CompilerBackendOptions, error)
|
||||
GetUncompressedName() string
|
||||
}
|
||||
|
||||
type ArtifactNode struct {
|
||||
Name string `json:"Name"`
|
||||
Size int `json:"Size"`
|
||||
}
|
||||
type ArtifactDiffs struct {
|
||||
Additions []ArtifactNode `json:"Adds"`
|
||||
Deletions []ArtifactNode `json:"Dels"`
|
||||
Changes []ArtifactNode `json:"Mods"`
|
||||
}
|
||||
type ArtifactLayer struct {
|
||||
FromImage string `json:"Image1"`
|
||||
ToImage string `json:"Image2"`
|
||||
Diffs ArtifactDiffs `json:"Diff"`
|
||||
}
|
||||
type ArtifactLayerSummary struct {
|
||||
FromImage string `json:"image1"`
|
||||
ToImage string `json:"image2"`
|
||||
AddFiles int `json:"add_files"`
|
||||
AddSizes int64 `json:"add_sizes"`
|
||||
DelFiles int `json:"del_files"`
|
||||
DelSizes int64 `json:"del_sizes"`
|
||||
ChangeFiles int `json:"change_files"`
|
||||
ChangeSizes int64 `json:"change_sizes"`
|
||||
}
|
||||
type ArtifactLayersSummary struct {
|
||||
Layers []ArtifactLayerSummary `json:"summary"`
|
||||
}
|
||||
|
||||
// CompilationSpec represent a compilation specification derived from a package
|
||||
type CompilationSpec interface {
|
||||
ImageUnpack() bool // tells if the definition is just an image
|
||||
GetIncludes() []string
|
||||
GetExcludes() []string
|
||||
|
||||
RenderBuildImage() (string, error)
|
||||
WriteBuildImageDefinition(string) error
|
||||
|
||||
RenderStepImage(image string) (string, error)
|
||||
WriteStepImageDefinition(fromimage, path string) error
|
||||
|
||||
GetPackage() pkg.Package
|
||||
BuildSteps() []string
|
||||
|
||||
GetSeedImage() string
|
||||
SetSeedImage(string)
|
||||
|
||||
GetImage() string
|
||||
SetImage(string)
|
||||
|
||||
SetOutputPath(string)
|
||||
GetOutputPath() string
|
||||
Rel(string) string
|
||||
|
||||
GetPreBuildSteps() []string
|
||||
|
||||
GetSourceAssertion() solver.PackagesAssertions
|
||||
SetSourceAssertion(as solver.PackagesAssertions)
|
||||
|
||||
GetRetrieve() []string
|
||||
CopyRetrieves(dest string) error
|
||||
|
||||
SetPackageDir(string)
|
||||
GetPackageDir() string
|
||||
|
||||
EmptyPackage() bool
|
||||
UnpackedPackage() bool
|
||||
HasImageSource() bool
|
||||
IsVirtual() bool
|
||||
}
|
||||
|
||||
type CompilationSpecs interface {
|
||||
Unique() CompilationSpecs
|
||||
Len() int
|
||||
All() []CompilationSpec
|
||||
Add(CompilationSpec)
|
||||
Remove(s CompilationSpecs) CompilationSpecs
|
||||
}
|
@@ -13,12 +13,14 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
package artifact
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -27,7 +29,6 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
system "github.com/docker/docker/pkg/system"
|
||||
zstd "github.com/klauspost/compress/zstd"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
|
||||
@@ -36,8 +37,12 @@ import (
|
||||
"sync"
|
||||
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
compression "github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
@@ -45,54 +50,31 @@ import (
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type CompressionImplementation string
|
||||
|
||||
const (
|
||||
None CompressionImplementation = "none" // e.g. tar for standard packages
|
||||
GZip CompressionImplementation = "gzip"
|
||||
Zstandard CompressionImplementation = "zstd"
|
||||
)
|
||||
|
||||
type ArtifactIndex []Artifact
|
||||
|
||||
func (i ArtifactIndex) CleanPath() ArtifactIndex {
|
||||
newIndex := ArtifactIndex{}
|
||||
for _, n := range i {
|
||||
art := n.(*PackageArtifact)
|
||||
// FIXME: This is a dup and makes difficult to add attributes to artifacts
|
||||
newIndex = append(newIndex, &PackageArtifact{
|
||||
Path: path.Base(n.GetPath()),
|
||||
SourceAssertion: art.SourceAssertion,
|
||||
CompileSpec: art.CompileSpec,
|
||||
Dependencies: art.Dependencies,
|
||||
CompressionType: art.CompressionType,
|
||||
Checksums: art.Checksums,
|
||||
Files: art.Files,
|
||||
})
|
||||
}
|
||||
return newIndex
|
||||
//Update if exists, otherwise just create
|
||||
}
|
||||
|
||||
// When compiling, we write also a fingerprint.metadata.yaml file with PackageArtifact. In this way we can have another command to create the repository
|
||||
// which will consist in just of an repository.yaml which is just the repository structure with the list of package artifact.
|
||||
// In this way a generic client can fetch the packages and, after unpacking the tree, performing queries to install packages.
|
||||
type PackageArtifact struct {
|
||||
Path string `json:"path"`
|
||||
|
||||
Dependencies []*PackageArtifact `json:"dependencies"`
|
||||
CompileSpec *LuetCompilationSpec `json:"compilationspec"`
|
||||
Checksums Checksums `json:"checksums"`
|
||||
SourceAssertion solver.PackagesAssertions `json:"-"`
|
||||
CompressionType CompressionImplementation `json:"compressiontype"`
|
||||
Files []string `json:"files"`
|
||||
Dependencies []*PackageArtifact `json:"dependencies"`
|
||||
CompileSpec *compilerspec.LuetCompilationSpec `json:"compilationspec"`
|
||||
Checksums Checksums `json:"checksums"`
|
||||
SourceAssertion solver.PackagesAssertions `json:"-"`
|
||||
CompressionType compression.Implementation `json:"compressiontype"`
|
||||
Files []string `json:"files"`
|
||||
PackageCacheImage string `json:"package_cacheimage"`
|
||||
}
|
||||
|
||||
func NewPackageArtifact(path string) Artifact {
|
||||
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: None}
|
||||
func (p *PackageArtifact) ShallowCopy() *PackageArtifact {
|
||||
copy := *p
|
||||
return ©
|
||||
}
|
||||
|
||||
func NewPackageArtifactFromYaml(data []byte) (Artifact, error) {
|
||||
func NewPackageArtifact(path string) *PackageArtifact {
|
||||
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: compression.None}
|
||||
}
|
||||
|
||||
func NewPackageArtifactFromYaml(data []byte) (*PackageArtifact, error) {
|
||||
p := &PackageArtifact{Checksums: Checksums{}}
|
||||
err := yaml.Unmarshal(data, &p)
|
||||
if err != nil {
|
||||
@@ -102,42 +84,6 @@ func NewPackageArtifactFromYaml(data []byte) (Artifact, error) {
|
||||
return p, err
|
||||
}
|
||||
|
||||
func LoadArtifactFromYaml(spec CompilationSpec) (Artifact, error) {
|
||||
|
||||
metaFile := spec.GetPackage().GetFingerPrint() + ".metadata.yaml"
|
||||
dat, err := ioutil.ReadFile(spec.Rel(metaFile))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error reading file "+metaFile)
|
||||
}
|
||||
art, err := NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error writing file "+metaFile)
|
||||
}
|
||||
// It is relative, set it back to abs
|
||||
art.SetPath(spec.Rel(art.GetPath()))
|
||||
return art, nil
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetCompressionType(t CompressionImplementation) {
|
||||
a.CompressionType = t
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetChecksums() Checksums {
|
||||
return a.Checksums
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetChecksums(c Checksums) {
|
||||
a.Checksums = c
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetFiles(f []string) {
|
||||
a.Files = f
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetFiles() []string {
|
||||
return a.Files
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) Hash() error {
|
||||
return a.Checksums.Generate(a)
|
||||
}
|
||||
@@ -181,8 +127,8 @@ func (a *PackageArtifact) WriteYaml(dst string) error {
|
||||
}
|
||||
//p := a.CompileSpec.GetPackage().GetPath()
|
||||
|
||||
mangle.GetCompileSpec().GetPackage().SetPath("")
|
||||
for _, ass := range mangle.GetCompileSpec().GetSourceAssertion() {
|
||||
mangle.CompileSpec.GetPackage().SetPath("")
|
||||
for _, ass := range mangle.CompileSpec.GetSourceAssertion() {
|
||||
ass.Package.SetPath("")
|
||||
}
|
||||
|
||||
@@ -191,7 +137,7 @@ func (a *PackageArtifact) WriteYaml(dst string) error {
|
||||
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(dst, a.GetCompileSpec().GetPackage().GetFingerPrint()+".metadata.yaml"), data, os.ModePerm)
|
||||
err = ioutil.WriteFile(filepath.Join(dst, a.CompileSpec.GetPackage().GetMetadataFilePath()), data, os.ModePerm)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While writing PackageArtifact YAML")
|
||||
}
|
||||
@@ -201,48 +147,8 @@ func (a *PackageArtifact) WriteYaml(dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetSourceAssertion() solver.PackagesAssertions {
|
||||
return a.SourceAssertion
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetCompileSpec(as CompilationSpec) {
|
||||
a.CompileSpec = as.(*LuetCompilationSpec)
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetCompileSpec() CompilationSpec {
|
||||
return a.CompileSpec
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetSourceAssertion(as solver.PackagesAssertions) {
|
||||
a.SourceAssertion = as
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetDependencies() []Artifact {
|
||||
ret := []Artifact{}
|
||||
for _, d := range a.Dependencies {
|
||||
ret = append(ret, d)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetDependencies(d []Artifact) {
|
||||
ret := []*PackageArtifact{}
|
||||
for _, dd := range d {
|
||||
ret = append(ret, dd.(*PackageArtifact))
|
||||
}
|
||||
a.Dependencies = ret
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetPath() string {
|
||||
return a.Path
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetFileName() string {
|
||||
return path.Base(a.GetPath())
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetPath(p string) {
|
||||
a.Path = p
|
||||
return path.Base(a.Path)
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) genDockerfile() string {
|
||||
@@ -253,14 +159,20 @@ COPY . /`
|
||||
|
||||
// CreateArtifactForFile creates a new artifact from the given file
|
||||
func CreateArtifactForFile(s string, opts ...func(*PackageArtifact)) (*PackageArtifact, error) {
|
||||
|
||||
if _, err := os.Stat(s); os.IsNotExist(err) {
|
||||
return nil, errors.Wrap(err, "artifact path doesn't exist")
|
||||
}
|
||||
fileName := path.Base(s)
|
||||
archive, err := LuetCfg.GetSystem().TempDir("archive")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
|
||||
}
|
||||
defer os.RemoveAll(archive) // clean up
|
||||
helpers.CopyFile(s, filepath.Join(archive, fileName))
|
||||
dst := filepath.Join(archive, fileName)
|
||||
if err := fileHelper.CopyFile(s, dst); err != nil {
|
||||
return nil, errors.Wrapf(err, "error while copying %s to %s", s, dst)
|
||||
}
|
||||
|
||||
artifact, err := LuetCfg.GetSystem().TempDir("artifact")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
|
||||
@@ -274,9 +186,13 @@ func CreateArtifactForFile(s string, opts ...func(*PackageArtifact)) (*PackageAr
|
||||
return a, a.Compress(archive, 1)
|
||||
}
|
||||
|
||||
type ImageBuilder interface {
|
||||
BuildImage(backend.Options) error
|
||||
}
|
||||
|
||||
// GenerateFinalImage takes an artifact and builds a Docker image with its content
|
||||
func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend, keepPerms bool) (CompilerBackendOptions, error) {
|
||||
builderOpts := CompilerBackendOptions{}
|
||||
func (a *PackageArtifact) GenerateFinalImage(imageName string, b ImageBuilder, keepPerms bool) (backend.Options, error) {
|
||||
builderOpts := backend.Options{}
|
||||
archive, err := LuetCfg.GetSystem().TempDir("archive")
|
||||
if err != nil {
|
||||
return builderOpts, errors.Wrap(err, "error met while creating tempdir for "+a.Path)
|
||||
@@ -294,7 +210,7 @@ func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend
|
||||
return builderOpts, errors.Wrap(err, "error met while uncompressing artifact "+a.Path)
|
||||
}
|
||||
|
||||
empty, err := helpers.DirectoryIsEmpty(uncompressedFiles)
|
||||
empty, err := fileHelper.DirectoryIsEmpty(uncompressedFiles)
|
||||
if err != nil {
|
||||
return builderOpts, errors.Wrap(err, "error met while checking if directory is empty "+uncompressedFiles)
|
||||
}
|
||||
@@ -303,7 +219,7 @@ func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend
|
||||
// We can't generate FROM scratch empty images. Docker will refuse to export them
|
||||
// workaround: Inject a .virtual empty file
|
||||
if empty {
|
||||
helpers.Touch(filepath.Join(uncompressedFiles, ".virtual"))
|
||||
fileHelper.Touch(filepath.Join(uncompressedFiles, ".virtual"))
|
||||
}
|
||||
|
||||
data := a.genDockerfile()
|
||||
@@ -311,7 +227,7 @@ func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend
|
||||
return builderOpts, errors.Wrap(err, "error met while rendering artifact dockerfile "+a.Path)
|
||||
}
|
||||
|
||||
builderOpts = CompilerBackendOptions{
|
||||
builderOpts = backend.Options{
|
||||
ImageName: imageName,
|
||||
SourcePath: archive,
|
||||
DockerFileName: dockerFile,
|
||||
@@ -326,7 +242,7 @@ func (a *PackageArtifact) GenerateFinalImage(imageName string, b CompilerBackend
|
||||
func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
||||
switch a.CompressionType {
|
||||
|
||||
case Zstandard:
|
||||
case compression.Zstandard:
|
||||
err := helpers.Tar(src, a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -364,7 +280,7 @@ func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
||||
|
||||
a.Path = zstdFile
|
||||
return nil
|
||||
case GZip:
|
||||
case compression.GZip:
|
||||
err := helpers.Tar(src, a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -404,15 +320,14 @@ func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
||||
default:
|
||||
return helpers.Tar(src, a.getCompressedName())
|
||||
}
|
||||
return errors.New("Compression type must be supplied")
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) getCompressedName() string {
|
||||
switch a.CompressionType {
|
||||
case Zstandard:
|
||||
case compression.Zstandard:
|
||||
return a.Path + ".zst"
|
||||
|
||||
case GZip:
|
||||
case compression.GZip:
|
||||
return a.Path + ".gz"
|
||||
}
|
||||
return a.Path
|
||||
@@ -421,12 +336,34 @@ func (a *PackageArtifact) getCompressedName() string {
|
||||
// GetUncompressedName returns the artifact path without the extension suffix
|
||||
func (a *PackageArtifact) GetUncompressedName() string {
|
||||
switch a.CompressionType {
|
||||
case Zstandard, GZip:
|
||||
case compression.Zstandard, compression.GZip:
|
||||
return strings.TrimSuffix(a.Path, filepath.Ext(a.Path))
|
||||
}
|
||||
return a.Path
|
||||
}
|
||||
|
||||
func hashContent(bv []byte) string {
|
||||
hasher := sha1.New()
|
||||
hasher.Write(bv)
|
||||
sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
|
||||
return sha
|
||||
}
|
||||
|
||||
func hashFileContent(path string) (string, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
h := sha1.New()
|
||||
if _, err := io.Copy(h, f); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func tarModifierWrapperFunc(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
|
||||
// If the destination path already exists I rename target file name with postfix.
|
||||
var destPath string
|
||||
@@ -438,6 +375,7 @@ func tarModifierWrapperFunc(dst, path string, header *tar.Header, content io.Rea
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
tarHash := hashContent(buffer.Bytes())
|
||||
|
||||
// If file is not present on archive but is defined on mods
|
||||
// I receive the callback. Prevent nil exception.
|
||||
@@ -450,13 +388,26 @@ func tarModifierWrapperFunc(dst, path string, header *tar.Header, content io.Rea
|
||||
return header, buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
existingHash := ""
|
||||
f, err := os.Lstat(destPath)
|
||||
if err == nil {
|
||||
Debug("File exists already, computing hash for", destPath)
|
||||
hash, herr := hashFileContent(destPath)
|
||||
if herr == nil {
|
||||
existingHash = hash
|
||||
}
|
||||
}
|
||||
|
||||
Debug("Existing file hash: ", existingHash, "Tar file hashsum: ", tarHash)
|
||||
// We want to protect file only if the hash of the files are differing OR the file size are
|
||||
differs := (existingHash != "" && existingHash != tarHash) || (err != nil && f != nil && header.Size != f.Size())
|
||||
// Check if exists
|
||||
if helpers.Exists(destPath) {
|
||||
if fileHelper.Exists(destPath) && differs {
|
||||
for i := 1; i < 1000; i++ {
|
||||
name := filepath.Join(filepath.Join(filepath.Dir(path),
|
||||
fmt.Sprintf("._cfg%04d_%s", i, filepath.Base(path))))
|
||||
|
||||
if helpers.Exists(name) {
|
||||
if fileHelper.Exists(name) {
|
||||
continue
|
||||
}
|
||||
Info(fmt.Sprintf("Found protected file %s. Creating %s.", destPath,
|
||||
@@ -509,13 +460,13 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
tarModifier := helpers.NewTarModifierWrapper(dst, tarModifierWrapperFunc)
|
||||
|
||||
switch a.CompressionType {
|
||||
case Zstandard:
|
||||
case compression.Zstandard:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.GetPath() + ".uncompressed")
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(a.GetPath() + ".uncompressed")
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
@@ -534,22 +485,22 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
|
||||
_, err = io.Copy(archive, d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot copy to "+a.GetPath()+".uncompressed")
|
||||
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
|
||||
}
|
||||
|
||||
err = helpers.UntarProtect(a.GetPath()+".uncompressed", dst,
|
||||
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
|
||||
LuetCfg.GetGeneral().SameOwner, protectedFiles, tarModifier)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case GZip:
|
||||
case compression.GZip:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.GetPath() + ".uncompressed")
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(a.GetPath() + ".uncompressed")
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
@@ -567,10 +518,10 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
|
||||
_, err = io.Copy(archive, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Cannot copy to "+a.GetPath()+".uncompressed")
|
||||
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
|
||||
}
|
||||
|
||||
err = helpers.UntarProtect(a.GetPath()+".uncompressed", dst,
|
||||
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
|
||||
LuetCfg.GetGeneral().SameOwner, protectedFiles, tarModifier)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -578,7 +529,7 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
return nil
|
||||
// Defaults to tar only (covers when "none" is supplied)
|
||||
default:
|
||||
return helpers.UntarProtect(a.GetPath(), dst, LuetCfg.GetGeneral().SameOwner,
|
||||
return helpers.UntarProtect(a.Path, dst, LuetCfg.GetGeneral().SameOwner,
|
||||
protectedFiles, tarModifier)
|
||||
}
|
||||
return errors.New("Compression type must be supplied")
|
||||
@@ -588,12 +539,12 @@ func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
func (a *PackageArtifact) FileList() ([]string, error) {
|
||||
var tr *tar.Reader
|
||||
switch a.CompressionType {
|
||||
case Zstandard:
|
||||
archive, err := os.Create(a.GetPath() + ".uncompressed")
|
||||
case compression.Zstandard:
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer os.RemoveAll(a.GetPath() + ".uncompressed")
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
@@ -609,13 +560,13 @@ func (a *PackageArtifact) FileList() ([]string, error) {
|
||||
}
|
||||
defer r.Close()
|
||||
tr = tar.NewReader(r)
|
||||
case GZip:
|
||||
case compression.GZip:
|
||||
// Create the uncompressed archive
|
||||
archive, err := os.Create(a.GetPath() + ".uncompressed")
|
||||
archive, err := os.Create(a.Path + ".uncompressed")
|
||||
if err != nil {
|
||||
return []string{}, err
|
||||
}
|
||||
defer os.RemoveAll(a.GetPath() + ".uncompressed")
|
||||
defer os.RemoveAll(a.Path + ".uncompressed")
|
||||
defer archive.Close()
|
||||
|
||||
original, err := os.Open(a.Path)
|
||||
@@ -634,7 +585,7 @@ func (a *PackageArtifact) FileList() ([]string, error) {
|
||||
|
||||
// Defaults to tar only (covers when "none" is supplied)
|
||||
default:
|
||||
tarFile, err := os.Open(a.GetPath())
|
||||
tarFile, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrap(err, "Could not open package archive")
|
||||
}
|
||||
@@ -671,47 +622,16 @@ type CopyJob struct {
|
||||
Artifact string
|
||||
}
|
||||
|
||||
func copyXattr(srcPath, dstPath, attr string) error {
|
||||
data, err := system.Lgetxattr(srcPath, attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data != nil {
|
||||
if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doCopyXattrs(srcPath, dstPath string) error {
|
||||
if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return copyXattr(srcPath, dstPath, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
||||
defer wg.Done()
|
||||
|
||||
for job := range s {
|
||||
//Info("#"+strconv.Itoa(i), "copying", job.Src, "to", job.Dst)
|
||||
// if dir, err := helpers.IsDirectory(job.Src); err == nil && dir {
|
||||
// err = helpers.CopyDir(job.Src, job.Dst)
|
||||
// if err != nil {
|
||||
// Warning("Error copying dir", job, err)
|
||||
// }
|
||||
// continue
|
||||
// }
|
||||
|
||||
_, err := os.Lstat(job.Dst)
|
||||
if err != nil {
|
||||
Debug("Copying ", job.Src)
|
||||
if err := helpers.CopyFile(job.Src, job.Dst); err != nil {
|
||||
if err := fileHelper.DeepCopyFile(job.Src, job.Dst); err != nil {
|
||||
Warning("Error copying", job, err)
|
||||
}
|
||||
doCopyXattrs(job.Src, job.Dst)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -729,8 +649,24 @@ func compileRegexes(regexes []string) []*regexp.Regexp {
|
||||
return result
|
||||
}
|
||||
|
||||
type ArtifactNode struct {
|
||||
Name string `json:"Name"`
|
||||
Size int `json:"Size"`
|
||||
}
|
||||
type ArtifactDiffs struct {
|
||||
Additions []ArtifactNode `json:"Adds"`
|
||||
Deletions []ArtifactNode `json:"Dels"`
|
||||
Changes []ArtifactNode `json:"Mods"`
|
||||
}
|
||||
|
||||
type ArtifactLayer struct {
|
||||
FromImage string `json:"Image1"`
|
||||
ToImage string `json:"Image2"`
|
||||
Diffs ArtifactDiffs `json:"Diff"`
|
||||
}
|
||||
|
||||
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
|
||||
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, excludes []string, t CompressionImplementation) (Artifact, error) {
|
||||
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, excludes []string, t compression.Implementation) (*PackageArtifact, error) {
|
||||
|
||||
archive, err := LuetCfg.GetSystem().TempDir("archive")
|
||||
if err != nil {
|
||||
@@ -852,45 +788,10 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
||||
wg.Wait()
|
||||
|
||||
a := NewPackageArtifact(dst)
|
||||
a.SetCompressionType(t)
|
||||
a.CompressionType = t
|
||||
err = a.Compress(archive, concurrency)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func ComputeArtifactLayerSummary(diffs []ArtifactLayer) ArtifactLayersSummary {
|
||||
|
||||
ans := ArtifactLayersSummary{
|
||||
Layers: make([]ArtifactLayerSummary, 0),
|
||||
}
|
||||
|
||||
for _, layer := range diffs {
|
||||
sum := ArtifactLayerSummary{
|
||||
FromImage: layer.FromImage,
|
||||
ToImage: layer.ToImage,
|
||||
AddFiles: 0,
|
||||
AddSizes: 0,
|
||||
DelFiles: 0,
|
||||
DelSizes: 0,
|
||||
ChangeFiles: 0,
|
||||
ChangeSizes: 0,
|
||||
}
|
||||
for _, a := range layer.Diffs.Additions {
|
||||
sum.AddFiles++
|
||||
sum.AddSizes += int64(a.Size)
|
||||
}
|
||||
for _, d := range layer.Diffs.Deletions {
|
||||
sum.DelFiles++
|
||||
sum.DelSizes += int64(d.Size)
|
||||
}
|
||||
for _, c := range layer.Diffs.Changes {
|
||||
sum.ChangeFiles++
|
||||
sum.ChangeSizes += int64(c.Size)
|
||||
}
|
||||
ans.Layers = append(ans.Layers, sum)
|
||||
}
|
||||
|
||||
return ans
|
||||
}
|
32
pkg/compiler/types/artifact/artifact_suite_test.go
Normal file
32
pkg/compiler/types/artifact/artifact_suite_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package artifact_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/mudler/luet/cmd"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestArtifact(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
LoadConfig(config.LuetCfg)
|
||||
RunSpecs(t, "Artifact Suite")
|
||||
}
|
@@ -13,7 +13,7 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler_test
|
||||
package artifact_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
@@ -22,10 +22,14 @@ import (
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
. "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compression "github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -38,18 +42,15 @@ var _ = Describe("Artifact", func() {
|
||||
|
||||
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildtree")
|
||||
err := generalRecipe.Load("../../../../tests/fixtures/buildtree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
cc := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
lspec, err := cc.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
lspec, ok := spec.(*LuetCompilationSpec)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
Expect(lspec.Image).To(Equal("luet/base"))
|
||||
Expect(lspec.Seed).To(Equal("alpine"))
|
||||
@@ -71,7 +72,7 @@ var _ = Describe("Artifact", func() {
|
||||
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -81,7 +82,7 @@ ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin`))
|
||||
b := NewSimpleDockerBackend()
|
||||
opts := CompilerBackendOptions{
|
||||
opts := backend.Options{
|
||||
ImageName: "luet/base",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "Dockerfile",
|
||||
@@ -89,12 +90,12 @@ ENV PACKAGE_CATEGORY=app-admin`))
|
||||
}
|
||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM luet/base
|
||||
@@ -105,7 +106,7 @@ ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin
|
||||
RUN echo foo > /test
|
||||
RUN echo bar > /test2`))
|
||||
opts2 := CompilerBackendOptions{
|
||||
opts2 := backend.Options{
|
||||
ImageName: "test",
|
||||
SourcePath: tmpdir,
|
||||
DockerFileName: "LuetDockerfile",
|
||||
@@ -113,8 +114,8 @@ RUN echo bar > /test2`))
|
||||
}
|
||||
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
diffs, err := b.Changes(opts, opts2)
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
diffs, err := compiler.GenerateChanges(b, opts, opts2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifacts := []ArtifactNode{{
|
||||
@@ -135,30 +136,30 @@ RUN echo bar > /test2`))
|
||||
Additions: artifacts,
|
||||
},
|
||||
}}))
|
||||
err = b.ExtractRootfs(CompilerBackendOptions{ImageName: "test", Destination: rootfs}, false)
|
||||
err = b.ExtractRootfs(backend.Options{ImageName: "test", Destination: rootfs}, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifact, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, None)
|
||||
a, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, compression.None)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue())
|
||||
err = helpers.Untar(artifact.GetPath(), unpacked, false)
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue())
|
||||
err = helpers.Untar(a.Path, unpacked, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(unpacked, "test"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(unpacked, "test2"))).To(BeTrue())
|
||||
content1, err := helpers.Read(filepath.Join(unpacked, "test"))
|
||||
Expect(fileHelper.Exists(filepath.Join(unpacked, "test"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(unpacked, "test2"))).To(BeTrue())
|
||||
content1, err := fileHelper.Read(filepath.Join(unpacked, "test"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("foo\n"))
|
||||
content2, err := helpers.Read(filepath.Join(unpacked, "test2"))
|
||||
content2, err := fileHelper.Read(filepath.Join(unpacked, "test2"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content2).To(Equal("bar\n"))
|
||||
|
||||
err = artifact.Hash()
|
||||
err = a.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = artifact.Verify()
|
||||
err = a.Verify()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.CopyFile(filepath.Join(tmpdir, "output2.tar"), filepath.Join(tmpdir, "package.tar"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.CopyFile(filepath.Join(tmpdir, "output2.tar"), filepath.Join(tmpdir, "package.tar"))).ToNot(HaveOccurred())
|
||||
|
||||
err = artifact.Verify()
|
||||
err = a.Verify()
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
@@ -183,13 +184,13 @@ RUN echo bar > /test2`))
|
||||
err = ioutil.WriteFile(filepath.Join(tmpdir, "foo", "bar", "test"), testString, 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifact := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
|
||||
artifact.SetCompileSpec(&LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}})
|
||||
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
|
||||
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
|
||||
|
||||
err = artifact.Compress(tmpdir, 1)
|
||||
err = a.Compress(tmpdir, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
resultingImage := imageprefix + "foo--1.0"
|
||||
opts, err := artifact.GenerateFinalImage(resultingImage, b, false)
|
||||
opts, err := a.GenerateFinalImage(resultingImage, b, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(opts.ImageName).To(Equal(resultingImage))
|
||||
|
||||
@@ -199,7 +200,7 @@ RUN echo bar > /test2`))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(result) // clean up
|
||||
|
||||
err = b.ExtractRootfs(CompilerBackendOptions{ImageName: resultingImage, Destination: result}, false)
|
||||
err = b.ExtractRootfs(backend.Options{ImageName: resultingImage, Destination: result}, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
content, err := ioutil.ReadFile(filepath.Join(result, "test"))
|
||||
@@ -225,13 +226,13 @@ RUN echo bar > /test2`))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpWork) // clean up
|
||||
|
||||
artifact := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
|
||||
artifact.SetCompileSpec(&LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}})
|
||||
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
|
||||
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
|
||||
|
||||
err = artifact.Compress(tmpdir, 1)
|
||||
err = a.Compress(tmpdir, 1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
resultingImage := imageprefix + "foo--1.0"
|
||||
opts, err := artifact.GenerateFinalImage(resultingImage, b, false)
|
||||
opts, err := a.GenerateFinalImage(resultingImage, b, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(opts.ImageName).To(Equal(resultingImage))
|
||||
|
||||
@@ -241,10 +242,10 @@ RUN echo bar > /test2`))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(result) // clean up
|
||||
|
||||
err = b.ExtractRootfs(CompilerBackendOptions{ImageName: resultingImage, Destination: result}, false)
|
||||
err = b.ExtractRootfs(backend.Options{ImageName: resultingImage, Destination: result}, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.DirectoryIsEmpty(result)).To(BeFalse())
|
||||
Expect(fileHelper.DirectoryIsEmpty(result)).To(BeFalse())
|
||||
content, err := ioutil.ReadFile(filepath.Join(result, ".virtual"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -253,15 +254,15 @@ RUN echo bar > /test2`))
|
||||
|
||||
It("Retrieves uncompressed name", func() {
|
||||
a := NewPackageArtifact("foo.tar.gz")
|
||||
a.SetCompressionType(compiler.GZip)
|
||||
a.CompressionType = (compression.GZip)
|
||||
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
|
||||
|
||||
a = NewPackageArtifact("foo.tar.zst")
|
||||
a.SetCompressionType(compiler.Zstandard)
|
||||
a.CompressionType = compression.Zstandard
|
||||
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
|
||||
|
||||
a = NewPackageArtifact("foo.tar")
|
||||
a.SetCompressionType(compiler.None)
|
||||
a.CompressionType = compression.None
|
||||
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
|
||||
})
|
||||
})
|
@@ -13,7 +13,7 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
package artifact
|
||||
|
||||
import (
|
||||
|
||||
@@ -43,7 +43,7 @@ type HashOptions struct {
|
||||
}
|
||||
|
||||
// Generate generates all Checksums supported for the artifact
|
||||
func (c *Checksums) Generate(a Artifact) error {
|
||||
func (c *Checksums) Generate(a *PackageArtifact) error {
|
||||
return c.generateSHA256(a)
|
||||
}
|
||||
|
||||
@@ -56,13 +56,13 @@ func (c Checksums) Compare(d Checksums) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Checksums) generateSHA256(a Artifact) error {
|
||||
func (c *Checksums) generateSHA256(a *PackageArtifact) error {
|
||||
return c.generateSum(a, HashOptions{Hasher: sha256.New(), Type: SHA256})
|
||||
}
|
||||
|
||||
func (c *Checksums) generateSum(a Artifact, opts HashOptions) error {
|
||||
func (c *Checksums) generateSum(a *PackageArtifact, opts HashOptions) error {
|
||||
|
||||
f, err := os.Open(a.GetPath())
|
||||
f, err := os.Open(a.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
@@ -13,13 +13,13 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler_test
|
||||
package artifact_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -40,13 +40,13 @@ var _ = Describe("Checksum", func() {
|
||||
Expect(len(definitionsum)).To(Equal(0))
|
||||
Expect(len(definitionsum2)).To(Equal(0))
|
||||
|
||||
err = buildsum.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/build.yaml"))
|
||||
err = buildsum.Generate(NewPackageArtifact("../../../../tests/fixtures/layers/alpine/build.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = definitionsum.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/definition.yaml"))
|
||||
err = definitionsum.Generate(NewPackageArtifact("../../../../tests/fixtures/layers/alpine/definition.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = definitionsum2.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/definition.yaml"))
|
||||
err = definitionsum2.Generate(NewPackageArtifact("../../../../tests/fixtures/layers/alpine/definition.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(buildsum)).To(Equal(1))
|
9
pkg/compiler/types/compression/compression.go
Normal file
9
pkg/compiler/types/compression/compression.go
Normal file
@@ -0,0 +1,9 @@
|
||||
package compression
|
||||
|
||||
type Implementation string
|
||||
|
||||
const (
|
||||
None Implementation = "none" // e.g. tar for standard packages
|
||||
GZip Implementation = "gzip"
|
||||
Zstandard Implementation = "zstd"
|
||||
)
|
199
pkg/compiler/types/options/compiler_options.go
Normal file
199
pkg/compiler/types/options/compiler_options.go
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
)
|
||||
|
||||
type Compiler struct {
|
||||
PushImageRepository string
|
||||
PullImageRepository []string
|
||||
PullFirst, KeepImg, Push bool
|
||||
Concurrency int
|
||||
CompressionType compression.Implementation
|
||||
|
||||
Wait bool
|
||||
OnlyDeps bool
|
||||
NoDeps bool
|
||||
SolverOptions config.LuetSolverOptions
|
||||
BuildValuesFile []string
|
||||
BuildValues []map[string]interface{}
|
||||
|
||||
PackageTargetOnly bool
|
||||
Rebuild bool
|
||||
|
||||
BackendArgs []string
|
||||
|
||||
BackendType string
|
||||
}
|
||||
|
||||
func NewDefaultCompiler() *Compiler {
|
||||
return &Compiler{
|
||||
PushImageRepository: "luet/cache",
|
||||
PullFirst: false,
|
||||
Push: false,
|
||||
CompressionType: compression.None,
|
||||
KeepImg: true,
|
||||
Concurrency: runtime.NumCPU(),
|
||||
OnlyDeps: false,
|
||||
NoDeps: false,
|
||||
SolverOptions: config.LuetSolverOptions{Options: solver.Options{Concurrency: 1, Type: solver.SingleCoreSimple}},
|
||||
}
|
||||
}
|
||||
|
||||
type Option func(cfg *Compiler) error
|
||||
|
||||
func (cfg *Compiler) Apply(opts ...Option) error {
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if err := opt(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithOptions(opt *Compiler) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg = opt
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithBackendType(r string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.BackendType = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithBuildValues(r []string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.BuildValuesFile = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithPullRepositories(r []string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.PullImageRepository = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithPushRepository(r string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
if len(cfg.PullImageRepository) == 0 {
|
||||
cfg.PullImageRepository = []string{cfg.PushImageRepository}
|
||||
}
|
||||
cfg.PushImageRepository = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func BackendArgs(r []string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.BackendArgs = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func PullFirst(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.PullFirst = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func KeepImg(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.KeepImg = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Rebuild(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.Rebuild = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func PushImages(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.Push = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Wait(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.Wait = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func OnlyDeps(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.OnlyDeps = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func OnlyTarget(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.PackageTargetOnly = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func NoDeps(b bool) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.NoDeps = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func Concurrency(i int) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
if i == 0 {
|
||||
i = runtime.NumCPU()
|
||||
}
|
||||
cfg.Concurrency = i
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithCompressionType(t compression.Implementation) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.CompressionType = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSolverOptions(c config.LuetSolverOptions) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.SolverOptions = c
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -13,21 +13,26 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
package compilerspec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mitchellh/hashstructure/v2"
|
||||
options "github.com/mudler/luet/pkg/compiler/types/options"
|
||||
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/otiai10/copy"
|
||||
dirhash "golang.org/x/mod/sumdb/dirhash"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type LuetCompilationspecs []LuetCompilationSpec
|
||||
|
||||
func NewLuetCompilationspecs(s ...CompilationSpec) CompilationSpecs {
|
||||
func NewLuetCompilationspecs(s ...*LuetCompilationSpec) *LuetCompilationspecs {
|
||||
all := LuetCompilationspecs{}
|
||||
|
||||
for _, spec := range s {
|
||||
@@ -40,7 +45,7 @@ func (specs LuetCompilationspecs) Len() int {
|
||||
return len(specs)
|
||||
}
|
||||
|
||||
func (specs *LuetCompilationspecs) Remove(s CompilationSpecs) CompilationSpecs {
|
||||
func (specs *LuetCompilationspecs) Remove(s *LuetCompilationspecs) *LuetCompilationspecs {
|
||||
newSpecs := LuetCompilationspecs{}
|
||||
SPECS:
|
||||
for _, spec := range specs.All() {
|
||||
@@ -54,16 +59,12 @@ SPECS:
|
||||
return &newSpecs
|
||||
}
|
||||
|
||||
func (specs *LuetCompilationspecs) Add(s CompilationSpec) {
|
||||
c, ok := s.(*LuetCompilationSpec)
|
||||
if !ok {
|
||||
panic("LuetCompilationspecs supports only []LuetCompilationSpec")
|
||||
}
|
||||
*specs = append(*specs, *c)
|
||||
func (specs *LuetCompilationspecs) Add(s *LuetCompilationSpec) {
|
||||
*specs = append(*specs, *s)
|
||||
}
|
||||
|
||||
func (specs *LuetCompilationspecs) All() []CompilationSpec {
|
||||
var cspecs []CompilationSpec
|
||||
func (specs *LuetCompilationspecs) All() []*LuetCompilationSpec {
|
||||
var cspecs []*LuetCompilationSpec
|
||||
for i, _ := range *specs {
|
||||
f := (*specs)[i]
|
||||
cspecs = append(cspecs, &f)
|
||||
@@ -72,7 +73,7 @@ func (specs *LuetCompilationspecs) All() []CompilationSpec {
|
||||
return cspecs
|
||||
}
|
||||
|
||||
func (specs *LuetCompilationspecs) Unique() CompilationSpecs {
|
||||
func (specs *LuetCompilationspecs) Unique() *LuetCompilationspecs {
|
||||
newSpecs := LuetCompilationspecs{}
|
||||
seen := map[string]bool{}
|
||||
|
||||
@@ -87,6 +88,13 @@ func (specs *LuetCompilationspecs) Unique() CompilationSpecs {
|
||||
return &newSpecs
|
||||
}
|
||||
|
||||
type CopyField struct {
|
||||
Package *pkg.DefaultPackage `json:"package"`
|
||||
Image string `json:"image"`
|
||||
Source string `json:"source"`
|
||||
Destination string `json:"destination"`
|
||||
}
|
||||
|
||||
type LuetCompilationSpec struct {
|
||||
Steps []string `json:"steps"` // Are run inside a container and the result layer diff is saved
|
||||
Env []string `json:"env"`
|
||||
@@ -103,9 +111,51 @@ type LuetCompilationSpec struct {
|
||||
Unpack bool `json:"unpack"`
|
||||
Includes []string `json:"includes"`
|
||||
Excludes []string `json:"excludes"`
|
||||
|
||||
BuildOptions *options.Compiler `json:"build_options"`
|
||||
|
||||
Copy []CopyField `json:"copy"`
|
||||
|
||||
Join pkg.DefaultPackages `json:"join"`
|
||||
RequiresFinalImages bool `json:"requires_final_images" yaml:"requires_final_images"`
|
||||
}
|
||||
|
||||
func NewLuetCompilationSpec(b []byte, p pkg.Package) (CompilationSpec, error) {
|
||||
// Signature is a portion of the spec that yields a signature for the hash
|
||||
type Signature struct {
|
||||
Image string
|
||||
Steps []string
|
||||
PackageDir string
|
||||
Prelude []string
|
||||
Seed string
|
||||
Env []string
|
||||
Retrieve []string
|
||||
Unpack bool
|
||||
Includes []string
|
||||
Excludes []string
|
||||
Copy []CopyField
|
||||
Join pkg.DefaultPackages
|
||||
RequiresFinalImages bool
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) signature() Signature {
|
||||
return Signature{
|
||||
Image: cs.Image,
|
||||
Steps: cs.Steps,
|
||||
PackageDir: cs.PackageDir,
|
||||
Prelude: cs.Prelude,
|
||||
Seed: cs.Seed,
|
||||
Env: cs.Env,
|
||||
Retrieve: cs.Retrieve,
|
||||
Unpack: cs.Unpack,
|
||||
Includes: cs.Includes,
|
||||
Excludes: cs.Excludes,
|
||||
Copy: cs.Copy,
|
||||
Join: cs.Join,
|
||||
RequiresFinalImages: cs.RequiresFinalImages,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLuetCompilationSpec(b []byte, p pkg.Package) (*LuetCompilationSpec, error) {
|
||||
var spec LuetCompilationSpec
|
||||
err := yaml.Unmarshal(b, &spec)
|
||||
if err != nil {
|
||||
@@ -118,6 +168,10 @@ func (cs *LuetCompilationSpec) GetSourceAssertion() solver.PackagesAssertions {
|
||||
return cs.SourceAssertion
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) SetBuildOptions(b options.Compiler) {
|
||||
cs.BuildOptions = &b
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) SetSourceAssertion(as solver.PackagesAssertions) {
|
||||
cs.SourceAssertion = as
|
||||
}
|
||||
@@ -192,7 +246,7 @@ func (cs *LuetCompilationSpec) SetSeedImage(s string) {
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) EmptyPackage() bool {
|
||||
return len(cs.BuildSteps()) == 0 && len(cs.GetPreBuildSteps()) == 0 && !cs.UnpackedPackage()
|
||||
return len(cs.BuildSteps()) == 0 && !cs.UnpackedPackage()
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) UnpackedPackage() bool {
|
||||
@@ -209,7 +263,18 @@ func (cs *LuetCompilationSpec) UnpackedPackage() bool {
|
||||
// a compilation spec has an image source when it depends on other packages or have a source image
|
||||
// explictly supplied
|
||||
func (cs *LuetCompilationSpec) HasImageSource() bool {
|
||||
return (cs.Package != nil && len(cs.GetPackage().GetRequires()) != 0) || cs.GetImage() != ""
|
||||
return (cs.Package != nil && len(cs.GetPackage().GetRequires()) != 0) || cs.GetImage() != "" || len(cs.Join) != 0
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) Hash() (string, error) {
|
||||
// build a signature, we want to be part of the hash only the fields that are relevant for build purposes
|
||||
signature := cs.signature()
|
||||
h, err := hashstructure.Hash(signature, hashstructure.FormatV2, nil)
|
||||
sum, err := dirhash.HashDir(cs.Package.Path, "", dirhash.DefaultHash)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(h, sum), err
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) CopyRetrieves(dest string) error {
|
||||
@@ -252,6 +317,13 @@ ADD ` + s + ` /luetbuild/`
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range cs.Copy {
|
||||
if c.Image != "" {
|
||||
copyLine := fmt.Sprintf("\nCOPY --from=%s %s %s\n", c.Image, c.Source, c.Destination)
|
||||
spec = spec + copyLine
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range cs.Env {
|
||||
spec = spec + `
|
||||
ENV ` + s
|
32
pkg/compiler/types/spec/spec_suite_test.go
Normal file
32
pkg/compiler/types/spec/spec_suite_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compilerspec_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/mudler/luet/cmd"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestSpec(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
LoadConfig(config.LuetCfg)
|
||||
RunSpecs(t, "Spec Suite")
|
||||
}
|
@@ -13,17 +13,19 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler_test
|
||||
package compilerspec_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
options "github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -32,63 +34,116 @@ import (
|
||||
var _ = Describe("Spec", func() {
|
||||
Context("Luet specs", func() {
|
||||
It("Allows normal operations", func() {
|
||||
testSpec := &LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "a", Version: "0"}}
|
||||
testSpec2 := &LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "bar", Category: "a", Version: "0"}}
|
||||
testSpec3 := &LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "baz", Category: "a", Version: "0"}}
|
||||
testSpec4 := &LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "a", Version: "0"}}
|
||||
testSpec := &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "a", Version: "0"}}
|
||||
testSpec2 := &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "bar", Category: "a", Version: "0"}}
|
||||
testSpec3 := &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "baz", Category: "a", Version: "0"}}
|
||||
testSpec4 := &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "a", Version: "0"}}
|
||||
|
||||
specs := NewLuetCompilationspecs(testSpec, testSpec2)
|
||||
specs := compilerspec.NewLuetCompilationspecs(testSpec, testSpec2)
|
||||
Expect(specs.Len()).To(Equal(2))
|
||||
Expect(specs.All()).To(Equal([]CompilationSpec{testSpec, testSpec2}))
|
||||
Expect(specs.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2}))
|
||||
specs.Add(testSpec3)
|
||||
Expect(specs.All()).To(Equal([]CompilationSpec{testSpec, testSpec2, testSpec3}))
|
||||
Expect(specs.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2, testSpec3}))
|
||||
specs.Add(testSpec4)
|
||||
Expect(specs.All()).To(Equal([]CompilationSpec{testSpec, testSpec2, testSpec3, testSpec4}))
|
||||
Expect(specs.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2, testSpec3, testSpec4}))
|
||||
newSpec := specs.Unique()
|
||||
Expect(newSpec.All()).To(Equal([]CompilationSpec{testSpec, testSpec2, testSpec3}))
|
||||
Expect(newSpec.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2, testSpec3}))
|
||||
|
||||
newSpec2 := specs.Remove(NewLuetCompilationspecs(testSpec, testSpec2))
|
||||
Expect(newSpec2.All()).To(Equal([]CompilationSpec{testSpec3}))
|
||||
newSpec2 := specs.Remove(compilerspec.NewLuetCompilationspecs(testSpec, testSpec2))
|
||||
Expect(newSpec2.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec3}))
|
||||
|
||||
})
|
||||
Context("virtuals", func() {
|
||||
When("is empty", func() {
|
||||
It("is virtual", func() {
|
||||
spec := &LuetCompilationSpec{}
|
||||
spec := &compilerspec.LuetCompilationSpec{}
|
||||
Expect(spec.IsVirtual()).To(BeTrue())
|
||||
})
|
||||
})
|
||||
When("has defined steps", func() {
|
||||
It("is not a virtual", func() {
|
||||
spec := &LuetCompilationSpec{Steps: []string{"foo"}}
|
||||
spec := &compilerspec.LuetCompilationSpec{Steps: []string{"foo"}}
|
||||
Expect(spec.IsVirtual()).To(BeFalse())
|
||||
})
|
||||
})
|
||||
When("has defined image", func() {
|
||||
It("is not a virtual", func() {
|
||||
spec := &LuetCompilationSpec{Image: "foo"}
|
||||
spec := &compilerspec.LuetCompilationSpec{Image: "foo"}
|
||||
Expect(spec.IsVirtual()).To(BeFalse())
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("Image hashing", func() {
|
||||
It("is stable", func() {
|
||||
spec1 := &compilerspec.LuetCompilationSpec{
|
||||
Image: "foo",
|
||||
BuildOptions: &options.Compiler{BuildValues: []map[string]interface{}{{"foo": "bar", "baz": true}}},
|
||||
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "Bar",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
spec2 := &compilerspec.LuetCompilationSpec{
|
||||
Image: "foo",
|
||||
BuildOptions: &options.Compiler{BuildValues: []map[string]interface{}{{"foo": "bar", "baz": true}}},
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "Bar",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
spec3 := &compilerspec.LuetCompilationSpec{
|
||||
Image: "foo",
|
||||
Steps: []string{"foo"},
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "Bar",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
hash, err := spec1.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
hash2, err := spec2.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
hash3, err := spec3.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hash).To(Equal(hash2))
|
||||
hashagain, err := spec2.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(hash).ToNot(Equal(hash3))
|
||||
Expect(hash).To(Equal(hashagain))
|
||||
})
|
||||
})
|
||||
|
||||
Context("Simple package build definition", func() {
|
||||
It("Loads it correctly", func() {
|
||||
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildtree")
|
||||
err := generalRecipe.Load("../../../../tests/fixtures/buildtree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
lspec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
lspec, ok := spec.(*LuetCompilationSpec)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
Expect(lspec.Image).To(Equal("luet/base"))
|
||||
Expect(lspec.Seed).To(Equal("alpine"))
|
||||
@@ -99,7 +154,7 @@ var _ = Describe("Spec", func() {
|
||||
lspec.Env = []string{"test=1"}
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -112,7 +167,7 @@ ENV test=1`))
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM luet/base
|
||||
@@ -132,18 +187,15 @@ RUN echo bar > /test2`))
|
||||
It("Renders retrieve and env fields", func() {
|
||||
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/retrieve")
|
||||
err := generalRecipe.Load("../../../../tests/fixtures/retrieve")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.0"})
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
lspec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
lspec, ok := spec.(*LuetCompilationSpec)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
Expect(lspec.Image).To(Equal("luet/base"))
|
||||
Expect(lspec.Seed).To(Equal("alpine"))
|
||||
@@ -153,7 +205,7 @@ RUN echo bar > /test2`))
|
||||
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -170,7 +222,7 @@ ENV test=1`))
|
||||
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -185,7 +237,7 @@ ENV test=1`))
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(dockerfile).To(Equal(`
|
@@ -27,7 +27,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
solver "github.com/mudler/luet/pkg/solver"
|
||||
|
||||
@@ -65,6 +64,7 @@ type LuetGeneralConfig struct {
|
||||
}
|
||||
|
||||
type LuetSolverOptions struct {
|
||||
solver.Options
|
||||
Type string `mapstructure:"type"`
|
||||
LearnRate float32 `mapstructure:"rate"`
|
||||
Discount float32 `mapstructure:"discount"`
|
||||
@@ -159,8 +159,9 @@ type LuetRepository struct {
|
||||
Enable bool `json:"enable" yaml:"enable" mapstructure:"enable"`
|
||||
Cached bool `json:"cached,omitempty" yaml:"cached,omitempty" mapstructure:"cached,omitempty"`
|
||||
Authentication map[string]string `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"`
|
||||
TreePath string `json:"tree_path,omitempty" yaml:"tree_path,omitempty" mapstructure:"tree_path"`
|
||||
MetaPath string `json:"meta_path,omitempty" yaml:"meta_path,omitempty" mapstructure:"meta_path"`
|
||||
TreePath string `json:"treepath,omitempty" yaml:"treepath,omitempty" mapstructure:"treepath"`
|
||||
MetaPath string `json:"metapath,omitempty" yaml:"metapath,omitempty" mapstructure:"metapath"`
|
||||
Verify bool `json:"verify,omitempty" yaml:"verify,omitempty" mapstructure:"verify"`
|
||||
|
||||
// Serialized options not used in repository configuration
|
||||
|
||||
@@ -404,8 +405,13 @@ system:
|
||||
}
|
||||
|
||||
func (c *LuetSystemConfig) InitTmpDir() error {
|
||||
if !helpers.Exists(c.TmpDirBase) {
|
||||
return os.MkdirAll(c.TmpDirBase, os.ModePerm)
|
||||
if _, err := os.Stat(c.TmpDirBase); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(c.TmpDirBase, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -38,7 +38,7 @@ var _ = Describe("Config", func() {
|
||||
tmpDir, err := config.LuetCfg.GetSystem().TempDir("test1")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(strings.HasPrefix(tmpDir, filepath.Join(os.TempDir(), "tmpluet"))).To(BeTrue())
|
||||
Expect(helpers.Exists(tmpDir)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(tmpDir)).To(BeTrue())
|
||||
|
||||
defer os.RemoveAll(tmpDir)
|
||||
})
|
||||
@@ -49,7 +49,7 @@ var _ = Describe("Config", func() {
|
||||
tmpFile, err := config.LuetCfg.GetSystem().TempFile("testfile1")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(strings.HasPrefix(tmpFile.Name(), filepath.Join(os.TempDir(), "tmpluet"))).To(BeTrue())
|
||||
Expect(helpers.Exists(tmpFile.Name())).To(BeTrue())
|
||||
Expect(fileHelper.Exists(tmpFile.Name())).To(BeTrue())
|
||||
|
||||
defer os.Remove(tmpFile.Name())
|
||||
})
|
||||
|
@@ -90,14 +90,11 @@ func UntarProtect(src, dst string, sameOwner bool, protectedFiles []string, modi
|
||||
}
|
||||
|
||||
if sameOwner {
|
||||
// PRE: i have root privileged.
|
||||
|
||||
// we do have root permissions, so we can extract keeping the same permissions.
|
||||
replacerArchive := archive.ReplaceFileTarWrapper(in, mods)
|
||||
|
||||
opts := &archive.TarOptions{
|
||||
// NOTE: NoLchown boolean is used for chmod of the symlink
|
||||
// Probably it's needed set this always to true.
|
||||
NoLchown: true,
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
ContinueOnError: true,
|
||||
}
|
||||
@@ -201,12 +198,8 @@ func Untar(src, dest string, sameOwner bool) error {
|
||||
defer in.Close()
|
||||
|
||||
if sameOwner {
|
||||
// PRE: i have root privileged.
|
||||
|
||||
opts := &archive.TarOptions{
|
||||
// NOTE: NoLchown boolean is used for chmod of the symlink
|
||||
// Probably it's needed set this always to true.
|
||||
NoLchown: true,
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
ContinueOnError: true,
|
||||
}
|
||||
|
@@ -25,6 +25,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
. "github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -128,7 +130,7 @@ var _ = Describe("Helpers Archive", func() {
|
||||
err = archive.Untar(replacerArchive, targetDir, opts)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(Exists(filepath.Join(targetDir, "._cfg0001_file-0"))).Should(Equal(true))
|
||||
Expect(fileHelper.Exists(filepath.Join(targetDir, "._cfg0001_file-0"))).Should(Equal(true))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
261
pkg/helpers/docker/docker.go
Normal file
261
pkg/helpers/docker/docker.go
Normal file
@@ -0,0 +1,261 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/mudler/luet/pkg/helpers/imgworker"
|
||||
|
||||
continerdarchive "github.com/containerd/containerd/archive"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
"github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// See also https://github.com/docker/cli/blob/88c6089300a82d3373892adf6845a4fed1a4ba8d/cli/command/image/trust.go#L171
|
||||
|
||||
func verifyImage(image string, authConfig *types.AuthConfig) (string, error) {
|
||||
ref, err := reference.ParseAnyReference(image)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "invalid reference %s", image)
|
||||
}
|
||||
|
||||
// only check if image ref doesn't contain hashes
|
||||
if _, ok := ref.(reference.Digested); !ok {
|
||||
namedRef, ok := ref.(reference.Named)
|
||||
if !ok {
|
||||
return "", errors.New("failed to resolve image digest using content trust: reference is not named")
|
||||
}
|
||||
namedRef = reference.TagNameOnly(namedRef)
|
||||
taggedRef, ok := namedRef.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return "", errors.New("failed to resolve image digest using content trust: reference is not tagged")
|
||||
}
|
||||
|
||||
resolvedImage, err := trustedResolveDigest(context.Background(), taggedRef, authConfig, "luet")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to resolve image digest using content trust")
|
||||
}
|
||||
resolvedFamiliar := reference.FamiliarString(resolvedImage)
|
||||
return resolvedFamiliar, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func trustedResolveDigest(ctx context.Context, ref reference.NamedTagged, authConfig *types.AuthConfig, useragent string) (reference.Canonical, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
notaryRepo, err := trust.GetNotaryRepository(os.Stdin, os.Stdout, useragent, repoInfo, authConfig, "pull")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
// Only get the tag if it's in the top level targets role or the releases delegation role
|
||||
// ignore it if it's in any other delegation roles
|
||||
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", reference.FamiliarString(ref)))
|
||||
}
|
||||
|
||||
h, ok := t.Hashes["sha256"]
|
||||
if !ok {
|
||||
return nil, errors.New("no valid hash, expecting sha256")
|
||||
}
|
||||
|
||||
dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h))
|
||||
|
||||
// Allow returning canonical reference with tag
|
||||
return reference.WithDigest(ref, dgst)
|
||||
}
|
||||
|
||||
type staticAuth struct {
|
||||
auth *types.AuthConfig
|
||||
}
|
||||
|
||||
func (s staticAuth) Authorization() (*authn.AuthConfig, error) {
|
||||
if s.auth == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return &authn.AuthConfig{
|
||||
Username: s.auth.Username,
|
||||
Password: s.auth.Password,
|
||||
Auth: s.auth.Auth,
|
||||
IdentityToken: s.auth.IdentityToken,
|
||||
RegistryToken: s.auth.RegistryToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UnpackEventData is the data structure to pass for the bus events
|
||||
type UnpackEventData struct {
|
||||
Image string
|
||||
Dest string
|
||||
}
|
||||
|
||||
// privilegedExtractImage uses the imgworker (which requires privileges) to extract a container image
|
||||
func privilegedExtractImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
|
||||
defer os.RemoveAll(temp)
|
||||
c, err := imgworker.New(temp, auth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed creating client")
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
listedImage, err := c.Pull(image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed listing images")
|
||||
}
|
||||
|
||||
os.RemoveAll(dest)
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
err = c.Unpack(image, dest)
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return listedImage, err
|
||||
}
|
||||
|
||||
// UnarchiveLayers extract layers with archive.Untar from docker instead of containerd
|
||||
func UnarchiveLayers(temp string, img v1.Image, image, dest string, auth *types.AuthConfig, verify bool) (int64, error) {
|
||||
layers, err := img.Layers()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading layers from '%s' image failed: %v", image, err)
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
var size int64
|
||||
for _, l := range layers {
|
||||
s, err := l.Size()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading layer size from '%s' image failed: %v", image, err)
|
||||
}
|
||||
size += s
|
||||
|
||||
layerReader, err := l.Uncompressed()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading uncompressed layer from '%s' image failed: %v", image, err)
|
||||
}
|
||||
defer layerReader.Close()
|
||||
|
||||
// Unpack the tarfile to the rootfs path.
|
||||
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
|
||||
if err := archive.Untar(layerReader, dest, &archive.TarOptions{
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
}); err != nil {
|
||||
return 0, fmt.Errorf("extracting '%s' image to directory %s failed: %v", image, dest, err)
|
||||
}
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// DownloadAndExtractDockerImage extracts a container image natively. It supports privileged/unprivileged mode
|
||||
func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
|
||||
if verify {
|
||||
img, err := verifyImage(image, auth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed verifying image")
|
||||
}
|
||||
image = img
|
||||
}
|
||||
|
||||
if os.Getenv("LUET_PRIVILEGED_EXTRACT") == "true" {
|
||||
return privilegedExtractImage(temp, image, dest, auth, verify)
|
||||
}
|
||||
|
||||
ref, err := name.ParseReference(image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
img, err := remote.Image(ref, remote.WithAuth(staticAuth{auth}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m, err := img.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mt, err := img.MediaType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, err := img.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader := mutate.Extract(img)
|
||||
defer reader.Close()
|
||||
defer os.RemoveAll(temp)
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
c, err := continerdarchive.Apply(context.TODO(), dest, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return &imgworker.ListedImage{
|
||||
Image: images.Image{
|
||||
Name: image,
|
||||
Labels: m.Annotations,
|
||||
Target: specs.Descriptor{
|
||||
MediaType: string(mt),
|
||||
Digest: digest.Digest(d.String()),
|
||||
Size: c,
|
||||
},
|
||||
},
|
||||
ContentSize: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func StripInvalidStringsFromImage(s string) string {
|
||||
return strings.ReplaceAll(s, "+", "-")
|
||||
}
|
30
pkg/helpers/docker/docker_test.go
Normal file
30
pkg/helpers/docker/docker_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("StripInvalidStringsFromImage", func() {
|
||||
Context("Image names", func() {
|
||||
It("strips invalid chars", func() {
|
||||
Expect(docker.StripInvalidStringsFromImage("foo+bar")).To(Equal("foo-bar"))
|
||||
})
|
||||
})
|
||||
})
|
@@ -13,12 +13,13 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers
|
||||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -26,10 +27,42 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/google/renameio"
|
||||
copy "github.com/otiai10/copy"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
|
||||
func RandStringRunes(n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letterRunes[rand.Intn(len(letterRunes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func Move(src, dst string) error {
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
t, err := renameio.TempFile("", dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer t.Cleanup()
|
||||
|
||||
_, err = io.Copy(t, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return t.CloseAtomicallyReplace()
|
||||
}
|
||||
|
||||
func OrderFiles(target string, files []string) ([]string, []string) {
|
||||
|
||||
var newFiles []string
|
||||
@@ -135,9 +168,27 @@ func Read(file string) (string, error) {
|
||||
return string(dat), nil
|
||||
}
|
||||
|
||||
func EnsureDirPerm(src, dst string) {
|
||||
if info, err := os.Lstat(filepath.Dir(src)); err == nil {
|
||||
if _, err := os.Lstat(filepath.Dir(dst)); os.IsNotExist(err) {
|
||||
err := os.MkdirAll(filepath.Dir(dst), info.Mode().Perm())
|
||||
if err != nil {
|
||||
fmt.Println("warning: failed creating", filepath.Dir(dst), err.Error())
|
||||
}
|
||||
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
|
||||
if err := os.Lchown(filepath.Dir(dst), int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
fmt.Println("warning: failed chowning", filepath.Dir(dst), err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
EnsureDir(dst)
|
||||
}
|
||||
}
|
||||
|
||||
func EnsureDir(fileName string) error {
|
||||
dirName := filepath.Dir(fileName)
|
||||
if _, serr := os.Stat(dirName); serr != nil {
|
||||
if _, serr := os.Stat(dirName); os.IsNotExist(serr) {
|
||||
merr := os.MkdirAll(dirName, os.ModePerm) // FIXME: It should preserve permissions from src to dst instead
|
||||
if merr != nil {
|
||||
return merr
|
||||
@@ -146,12 +197,39 @@ func EnsureDir(fileName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFile copies the contents of the file named src to the file named
|
||||
func CopyFile(src, dst string) (err error) {
|
||||
return copy.Copy(src, dst, copy.Options{
|
||||
Sync: true,
|
||||
OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||
}
|
||||
|
||||
func copyXattr(srcPath, dstPath, attr string) error {
|
||||
data, err := system.Lgetxattr(srcPath, attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data != nil {
|
||||
if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doCopyXattrs(srcPath, dstPath string) error {
|
||||
if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return copyXattr(srcPath, dstPath, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
// DeepCopyFile copies the contents of the file named src to the file named
|
||||
// by dst. The file will be created if it does not already exist. If the
|
||||
// destination file exists, all it's contents will be replaced by the contents
|
||||
// of the source file. The file mode will be copied from the source and
|
||||
// the copied data is synced/flushed to stable storage.
|
||||
func CopyFile(src, dst string) (err error) {
|
||||
func DeepCopyFile(src, dst string) (err error) {
|
||||
// Workaround for https://github.com/otiai10/copy/issues/47
|
||||
fi, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
@@ -161,7 +239,7 @@ func CopyFile(src, dst string) (err error) {
|
||||
fm := fi.Mode()
|
||||
switch {
|
||||
case fm&os.ModeNamedPipe != 0:
|
||||
EnsureDir(dst)
|
||||
EnsureDirPerm(src, dst)
|
||||
if err := syscall.Mkfifo(dst, uint32(fi.Mode())); err != nil {
|
||||
return errors.Wrap(err, "failed creating pipe")
|
||||
}
|
||||
@@ -173,6 +251,9 @@ func CopyFile(src, dst string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
//filepath.Dir(src)
|
||||
EnsureDirPerm(src, dst)
|
||||
|
||||
err = copy.Copy(src, dst, copy.Options{
|
||||
Sync: true,
|
||||
OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||
@@ -180,11 +261,12 @@ func CopyFile(src, dst string) (err error) {
|
||||
return err
|
||||
}
|
||||
if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
if err := os.Chown(dst, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
fmt.Println("failed chowning", dst, err.Error())
|
||||
if err := os.Lchown(dst, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
fmt.Println("warning: failed chowning", dst, err.Error())
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
||||
return doCopyXattrs(src, dst)
|
||||
}
|
||||
|
||||
func IsDirectory(path string) (bool, error) {
|
@@ -13,14 +13,15 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers_test
|
||||
package file_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@@ -28,8 +29,8 @@ import (
|
||||
var _ = Describe("Helpers", func() {
|
||||
Context("Exists", func() {
|
||||
It("Detect existing and not-existing files", func() {
|
||||
Expect(Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml")).To(BeTrue())
|
||||
Expect(Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml.not.exists")).To(BeFalse())
|
||||
Expect(fileHelper.Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml")).To(BeTrue())
|
||||
Expect(fileHelper.Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml.not.exists")).To(BeFalse())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -38,15 +39,15 @@ var _ = Describe("Helpers", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
Expect(DirectoryIsEmpty(testDir)).To(BeTrue())
|
||||
Expect(fileHelper.DirectoryIsEmpty(testDir)).To(BeTrue())
|
||||
})
|
||||
It("Detects directory with files", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
err = Touch(filepath.Join(testDir, "foo"))
|
||||
err = fileHelper.Touch(filepath.Join(testDir, "foo"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(DirectoryIsEmpty(testDir)).To(BeFalse())
|
||||
Expect(fileHelper.DirectoryIsEmpty(testDir)).To(BeFalse())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -72,7 +73,7 @@ var _ = Describe("Helpers", func() {
|
||||
err = ioutil.WriteFile(filepath.Join(testDir, "baz2", "foo"), []byte("test\n"), 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ordered, notExisting := OrderFiles(testDir, []string{"bar", "baz", "bar/foo", "baz2", "foo", "baz2/foo", "notexisting"})
|
||||
ordered, notExisting := fileHelper.OrderFiles(testDir, []string{"bar", "baz", "bar/foo", "baz2", "foo", "baz2/foo", "notexisting"})
|
||||
|
||||
Expect(ordered).To(Equal([]string{"baz", "bar/foo", "foo", "baz2/foo", "bar", "baz2"}))
|
||||
Expect(notExisting).To(Equal([]string{"notexisting"}))
|
||||
@@ -96,7 +97,7 @@ var _ = Describe("Helpers", func() {
|
||||
err = os.MkdirAll(filepath.Join(testDir, "foo", "baz", "fa"), os.ModePerm)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ordered, _ := OrderFiles(testDir, []string{"foo", "foo/bar", "bar", "foo/baz/fa", "foo/baz"})
|
||||
ordered, _ := fileHelper.OrderFiles(testDir, []string{"foo", "foo/bar", "bar", "foo/baz/fa", "foo/baz"})
|
||||
Expect(ordered).To(Equal([]string{"foo/baz/fa", "foo/bar", "foo/baz", "foo", "bar"}))
|
||||
})
|
||||
})
|
@@ -3,6 +3,9 @@ package helpers
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
@@ -37,13 +40,44 @@ func RenderHelm(template string, values, d map[string]interface{}) (string, erro
|
||||
|
||||
type templatedata map[string]interface{}
|
||||
|
||||
func RenderFiles(toTemplate, valuesFile string, defaultFile string) (string, error) {
|
||||
// UnMarshalValues unmarshal values files and joins them into a unique templatedata
|
||||
// the join happens from right to left, so any rightmost value file overwrites the content of the ones before it.
|
||||
func UnMarshalValues(values []string) (templatedata, error) {
|
||||
dst := templatedata{}
|
||||
if len(values) > 0 {
|
||||
for _, bv := range reverse(values) {
|
||||
current := templatedata{}
|
||||
|
||||
defBuild, err := ioutil.ReadFile(bv)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+bv)
|
||||
}
|
||||
err = yaml.Unmarshal(defBuild, ¤t)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+bv)
|
||||
}
|
||||
if err := mergo.Merge(&dst, current); err != nil {
|
||||
return nil, errors.Wrap(err, "merging values file "+bv)
|
||||
}
|
||||
}
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func reverse(s []string) []string {
|
||||
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func RenderFiles(toTemplate, valuesFile string, defaultFile ...string) (string, error) {
|
||||
raw, err := ioutil.ReadFile(toTemplate)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "reading file "+toTemplate)
|
||||
}
|
||||
|
||||
if !Exists(valuesFile) {
|
||||
if !fileHelper.Exists(valuesFile) {
|
||||
return "", errors.Wrap(err, "file not existing "+valuesFile)
|
||||
}
|
||||
val, err := ioutil.ReadFile(valuesFile)
|
||||
@@ -52,20 +86,14 @@ func RenderFiles(toTemplate, valuesFile string, defaultFile string) (string, err
|
||||
}
|
||||
|
||||
var values templatedata
|
||||
d := templatedata{}
|
||||
if len(defaultFile) > 0 {
|
||||
def, err := ioutil.ReadFile(defaultFile)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "reading file "+valuesFile)
|
||||
}
|
||||
if err = yaml.Unmarshal(def, &d); err != nil {
|
||||
return "", errors.Wrap(err, "unmarshalling file "+toTemplate)
|
||||
}
|
||||
}
|
||||
|
||||
if err = yaml.Unmarshal(val, &values); err != nil {
|
||||
return "", errors.Wrap(err, "unmarshalling file "+toTemplate)
|
||||
}
|
||||
|
||||
return RenderHelm(string(raw), values, d)
|
||||
dst, err := UnMarshalValues(defaultFile)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "unmarshalling values")
|
||||
}
|
||||
|
||||
return RenderHelm(string(raw), values, dst)
|
||||
}
|
||||
|
@@ -114,11 +114,46 @@ foo: "bar"
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderFiles(toTemplate, values, "")
|
||||
res, err := RenderFiles(toTemplate, values)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("bar"))
|
||||
})
|
||||
|
||||
It("Render files merging defaults", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
toTemplate := filepath.Join(testDir, "totemplate.yaml")
|
||||
values := filepath.Join(testDir, "values.yaml")
|
||||
d := filepath.Join(testDir, "default.yaml")
|
||||
d2 := filepath.Join(testDir, "default2.yaml")
|
||||
|
||||
writeFile(toTemplate, `{{.Values.foo}}{{.Values.bar}}{{.Values.b}}`)
|
||||
writeFile(values, `
|
||||
foo: "bar"
|
||||
b: "f"
|
||||
`)
|
||||
writeFile(d, `
|
||||
foo: "baz"
|
||||
`)
|
||||
|
||||
writeFile(d2, `
|
||||
foo: "do"
|
||||
bar: "nei"
|
||||
`)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderFiles(toTemplate, values, d2, d)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("bazneif"))
|
||||
|
||||
res, err = RenderFiles(toTemplate, values, d, d2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("doneif"))
|
||||
})
|
||||
|
||||
It("doesn't interpolate if no one provides the values", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
36
pkg/helpers/imgworker/auth.go
Normal file
36
pkg/helpers/imgworker/auth.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package imgworker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/auth"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func NewDockerAuthProvider(auth *types.AuthConfig) session.Attachable {
|
||||
return &authProvider{
|
||||
config: auth,
|
||||
}
|
||||
}
|
||||
|
||||
type authProvider struct {
|
||||
config *types.AuthConfig
|
||||
}
|
||||
|
||||
func (ap *authProvider) Register(server *grpc.Server) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) {
|
||||
res := &auth.CredentialsResponse{}
|
||||
if ap.config.IdentityToken != "" {
|
||||
res.Secret = ap.config.IdentityToken
|
||||
} else {
|
||||
res.Username = ap.config.Username
|
||||
res.Secret = ap.config.Password
|
||||
}
|
||||
return res, nil
|
||||
}
|
@@ -8,6 +8,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/genuinetools/img/types"
|
||||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/session"
|
||||
@@ -29,10 +30,11 @@ type Client struct {
|
||||
|
||||
sess *session.Session
|
||||
ctx context.Context
|
||||
auth *dockertypes.AuthConfig
|
||||
}
|
||||
|
||||
// New returns a new client for communicating with the buildkit controller.
|
||||
func New(root string) (*Client, error) {
|
||||
func New(root string, auth *dockertypes.AuthConfig) (*Client, error) {
|
||||
// Native backend is fine, our images have just one layer. No need to depend on anything
|
||||
backend := types.NativeBackend
|
||||
|
||||
@@ -45,6 +47,7 @@ func New(root string) (*Client, error) {
|
||||
backend: types.NativeBackend,
|
||||
root: root,
|
||||
localDirs: nil,
|
||||
auth: auth,
|
||||
}
|
||||
|
||||
if err := c.prepare(); err != nil {
|
@@ -31,6 +31,7 @@ func (c *Client) Pull(image string) (*ListedImage, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse the image name and tag.
|
||||
named, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
@@ -114,7 +115,6 @@ func (c *Client) Pull(image string) (*ListedImage, error) {
|
||||
if _, err := e.Export(ctx, exporter.Source{Ref: ref}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the image.
|
||||
img, err := opt.ImageStore.Get(ctx, image)
|
||||
if err != nil {
|
@@ -4,10 +4,8 @@ package imgworker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/moby/buildkit/session/testutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -31,7 +29,7 @@ func (c *Client) Session(ctx context.Context) (*session.Session, session.Dialer,
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create session manager")
|
||||
}
|
||||
sessionName := "img"
|
||||
sessionName := "luet"
|
||||
s, err := session.NewSession(ctx, sessionName, "")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create session")
|
||||
@@ -41,7 +39,7 @@ func (c *Client) Session(ctx context.Context) (*session.Session, session.Dialer,
|
||||
syncedDirs = append(syncedDirs, filesync.SyncedDir{Name: name, Dir: d})
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
|
||||
s.Allow(authprovider.NewDockerAuthProvider(os.Stderr))
|
||||
s.Allow(NewDockerAuthProvider(c.auth))
|
||||
return s, sessionDialer(s, m), err
|
||||
}
|
||||
|
@@ -5,6 +5,7 @@ package imgworker
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
@@ -18,6 +19,12 @@ import (
|
||||
// TODO: this requires root permissions to mount/unmount layers, althrought it shouldn't be required.
|
||||
// See how backends are unpacking images without asking for root permissions.
|
||||
|
||||
// UnpackEventData is the data structure to pass for the bus events
|
||||
type UnpackEventData struct {
|
||||
Image string
|
||||
Dest string
|
||||
}
|
||||
|
||||
// Unpack exports an image to a rootfs destination directory.
|
||||
func (c *Client) Unpack(image, dest string) error {
|
||||
|
||||
@@ -59,6 +66,8 @@ func (c *Client) Unpack(image, dest string) error {
|
||||
return fmt.Errorf("getting image manifest failed: %v", err)
|
||||
}
|
||||
|
||||
_,_ = bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
for _, desc := range manifest.Layers {
|
||||
logrus.Debugf("Unpacking layer %s", desc.Digest.String())
|
||||
|
||||
@@ -71,12 +80,14 @@ func (c *Client) Unpack(image, dest string) error {
|
||||
// Unpack the tarfile to the rootfs path.
|
||||
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
|
||||
if err := archive.Untar(content.NewReader(layer), dest, &archive.TarOptions{
|
||||
NoLchown: true,
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
}); err != nil {
|
||||
return fmt.Errorf("extracting tar for %s to directory %s failed: %v", desc.Digest.String(), dest, err)
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return nil
|
||||
}
|
@@ -14,12 +14,21 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers
|
||||
package match
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
func ReverseAny(s interface{}) {
|
||||
n := reflect.ValueOf(s).Len()
|
||||
swap := reflect.Swapper(s)
|
||||
for i, j := 0, n-1; i < j; i, j = i+1, j-1 {
|
||||
swap(i, j)
|
||||
}
|
||||
}
|
||||
|
||||
func MapMatchRegex(m *map[string]string, r *regexp.Regexp) bool {
|
||||
ans := false
|
||||
|
@@ -16,58 +16,43 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
imgworker "github.com/mudler/luet/pkg/installer/client/imgworker"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
errImageDownloadMsg = "failed downloading image %s: %s"
|
||||
)
|
||||
|
||||
type DockerClient struct {
|
||||
RepoData RepoData
|
||||
auth *types.AuthConfig
|
||||
verify bool
|
||||
}
|
||||
|
||||
func NewDockerClient(r RepoData) *DockerClient {
|
||||
return &DockerClient{RepoData: r}
|
||||
auth := &types.AuthConfig{}
|
||||
|
||||
dat, _ := json.Marshal(r.Authentication)
|
||||
json.Unmarshal(dat, auth)
|
||||
|
||||
return &DockerClient{RepoData: r, auth: auth}
|
||||
}
|
||||
|
||||
func downloadAndExtractDockerImage(image, dest string) error {
|
||||
temp, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(temp)
|
||||
Debug("Temporary directory", temp)
|
||||
c, err := imgworker.New(temp)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed creating client")
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
// FROM Slightly adapted from genuinetools/img https://github.com/genuinetools/img/blob/54d0ca981c1260546d43961a538550eef55c87cf/pull.go
|
||||
Debug("Pulling image", image)
|
||||
listedImage, err := c.Pull(image)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed listing images")
|
||||
|
||||
}
|
||||
Debug("Pulled:", listedImage.Target.Digest)
|
||||
Debug("Size:", units.BytesSize(float64(listedImage.ContentSize)))
|
||||
Debug("Unpacking", image, "to", dest)
|
||||
os.RemoveAll(dest)
|
||||
return c.Unpack(image, dest)
|
||||
}
|
||||
|
||||
func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
|
||||
func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
//var u *url.URL = nil
|
||||
var err error
|
||||
var temp string
|
||||
@@ -75,11 +60,11 @@ func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Ar
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
var resultingArtifact compiler.Artifact
|
||||
artifactName := path.Base(artifact.GetPath())
|
||||
var resultingArtifact *artifact.PackageArtifact
|
||||
artifactName := path.Base(a.Path)
|
||||
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
|
||||
Debug("Cache file", cacheFile)
|
||||
if err := helpers.EnsureDir(cacheFile); err != nil {
|
||||
if err := fileHelper.EnsureDir(cacheFile); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create cache folder %s for %s", config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), cacheFile)
|
||||
}
|
||||
ok := false
|
||||
@@ -92,11 +77,11 @@ func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Ar
|
||||
// is done in such cases (see repository.go)
|
||||
|
||||
// Check if file is already in cache
|
||||
if helpers.Exists(cacheFile) {
|
||||
if fileHelper.Exists(cacheFile) {
|
||||
Debug("Cache hit for artifact", artifactName)
|
||||
resultingArtifact = artifact
|
||||
resultingArtifact.SetPath(cacheFile)
|
||||
resultingArtifact.SetChecksums(compiler.Checksums{})
|
||||
resultingArtifact = a
|
||||
resultingArtifact.Path = cacheFile
|
||||
resultingArtifact.Checksums = artifact.Checksums{}
|
||||
} else {
|
||||
|
||||
temp, err = config.LuetCfg.GetSystem().TempDir("tree")
|
||||
@@ -107,22 +92,31 @@ func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Ar
|
||||
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
|
||||
imageName := fmt.Sprintf("%s:%s", uri, artifact.GetCompileSpec().GetPackage().ImageID())
|
||||
imageName := fmt.Sprintf("%s:%s", uri, a.CompileSpec.GetPackage().ImageID())
|
||||
Info("Downloading image", imageName)
|
||||
|
||||
// imageName := fmt.Sprintf("%s/%s", uri, artifact.GetCompileSpec().GetPackage().GetPackageImageName())
|
||||
err = downloadAndExtractDockerImage(imageName, temp)
|
||||
contentstore, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
Debug("Failed download of image", imageName)
|
||||
Warning("Cannot create contentstore", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// imageName := fmt.Sprintf("%s/%s", uri, artifact.GetCompileSpec().GetPackage().GetPackageImageName())
|
||||
info, err := docker.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
Info(fmt.Sprintf("Pulled: %s", info.Target.Digest))
|
||||
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
|
||||
Debug("\nCompressing result ", filepath.Join(temp), "to", cacheFile)
|
||||
|
||||
newart := artifact
|
||||
newart := a
|
||||
// We discard checksum, that are checked while during pull and unpack
|
||||
newart.SetChecksums(compiler.Checksums{})
|
||||
newart.SetPath(cacheFile) // First set to cache file
|
||||
newart.SetPath(newart.GetUncompressedName()) // Calculate the real path from cacheFile
|
||||
newart.Checksums = artifact.Checksums{}
|
||||
newart.Path = cacheFile // First set to cache file
|
||||
newart.Path = newart.GetUncompressedName() // Calculate the real path from cacheFile
|
||||
err = newart.Compress(temp, 1)
|
||||
if err != nil {
|
||||
Error(fmt.Sprintf("Failed compressing package %s: %s", imageName, err.Error()))
|
||||
@@ -145,8 +139,7 @@ func (c *DockerClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Ar
|
||||
func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
var file *os.File = nil
|
||||
var err error
|
||||
var temp string
|
||||
|
||||
var temp, contentstore string
|
||||
// Files should be in URI/repository:<file>
|
||||
ok := false
|
||||
|
||||
@@ -156,25 +149,31 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
}
|
||||
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
|
||||
file, err = config.LuetCfg.GetSystem().TempFile("DockerClient")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
Debug("Downloading file", name, "from", uri)
|
||||
|
||||
imageName := fmt.Sprintf("%s:%s", uri, name)
|
||||
//imageName := fmt.Sprintf("%s/%s:%s", uri, "repository", name)
|
||||
err = downloadAndExtractDockerImage(imageName, temp)
|
||||
contentstore, err = config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
Debug("Failed download of image", imageName)
|
||||
Warning("Cannot create contentstore", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
Debug("\nCopying file ", filepath.Join(temp, name), "to", file.Name())
|
||||
err = helpers.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
imageName := fmt.Sprintf("%s:%s", uri, docker.StripInvalidStringsFromImage(name))
|
||||
Info("Downloading", imageName)
|
||||
|
||||
info, err := docker.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
Info(fmt.Sprintf("Pulled: %s", info.Target.Digest))
|
||||
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
|
||||
|
||||
Debug("\nCopying file ", filepath.Join(temp, name), "to", file.Name())
|
||||
err = fileHelper.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -20,8 +20,10 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
@@ -30,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
// This test expect that the repository defined in UNIT_TEST_DOCKER_IMAGE is in zstd format.
|
||||
// the repository is built by the 01_simple_docker.sh integration test file.
|
||||
// the repository is built by the 01_simple_docker.sh integration test fileHelper.
|
||||
// This test also require root. At the moment, unpacking docker images with 'img' requires root permission to
|
||||
// mount/unmount layers.
|
||||
var _ = Describe("Docker client", func() {
|
||||
@@ -49,14 +51,14 @@ var _ = Describe("Docker client", func() {
|
||||
It("Downloads single files", func() {
|
||||
f, err := c.DownloadFile("repository.yaml")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(f)).To(ContainSubstring("Test Repo"))
|
||||
Expect(fileHelper.Read(f)).To(ContainSubstring("Test Repo"))
|
||||
os.RemoveAll(f)
|
||||
})
|
||||
|
||||
It("Downloads artifacts", func() {
|
||||
f, err := c.DownloadArtifact(&compiler.PackageArtifact{
|
||||
f, err := c.DownloadArtifact(&artifact.PackageArtifact{
|
||||
Path: "test.tar",
|
||||
CompileSpec: &compiler.LuetCompilationSpec{
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "c",
|
||||
Category: "test",
|
||||
@@ -69,9 +71,9 @@ var _ = Describe("Docker client", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
Expect(f.Unpack(tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(filepath.Join(tmpdir, "c"))).To(Equal("c\n"))
|
||||
Expect(helpers.Read(filepath.Join(tmpdir, "cd"))).To(Equal("c\n"))
|
||||
os.RemoveAll(f.GetPath())
|
||||
Expect(fileHelper.Read(filepath.Join(tmpdir, "c"))).To(Equal("c\n"))
|
||||
Expect(fileHelper.Read(filepath.Join(tmpdir, "cd"))).To(Equal("c\n"))
|
||||
os.RemoveAll(f.Path)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -24,13 +24,12 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
"github.com/cavaliercoder/grab"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
|
||||
"github.com/schollz/progressbar/v3"
|
||||
)
|
||||
@@ -66,18 +65,18 @@ func Round(input float64) float64 {
|
||||
return math.Floor(input + 0.5)
|
||||
}
|
||||
|
||||
func (c *HttpClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
|
||||
func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
var u *url.URL = nil
|
||||
var err error
|
||||
var req *grab.Request
|
||||
var temp string
|
||||
|
||||
artifactName := path.Base(artifact.GetPath())
|
||||
artifactName := path.Base(a.Path)
|
||||
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
|
||||
ok := false
|
||||
|
||||
// Check if file is already in cache
|
||||
if helpers.Exists(cacheFile) {
|
||||
if fileHelper.Exists(cacheFile) {
|
||||
Debug("Use artifact", artifactName, "from cache.")
|
||||
} else {
|
||||
|
||||
@@ -156,7 +155,7 @@ func (c *HttpClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Arti
|
||||
fmt.Sprintf("%.2f", (float64(resp.BytesPerSecond())/1024)/1024), "MiB/s )")
|
||||
|
||||
Debug("\nCopying file ", filepath.Join(temp, artifactName), "to", cacheFile)
|
||||
err = helpers.CopyFile(filepath.Join(temp, artifactName), cacheFile)
|
||||
err = fileHelper.CopyFile(filepath.Join(temp, artifactName), cacheFile)
|
||||
|
||||
bar.Finish()
|
||||
ok = true
|
||||
@@ -168,8 +167,8 @@ func (c *HttpClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Arti
|
||||
}
|
||||
}
|
||||
|
||||
newart := artifact
|
||||
newart.SetPath(cacheFile)
|
||||
newart := a
|
||||
newart.Path = cacheFile
|
||||
return newart, nil
|
||||
}
|
||||
|
||||
@@ -218,7 +217,7 @@ func (c *HttpClient) DownloadFile(name string) (string, error) {
|
||||
fmt.Sprintf("%.2f", (float64(resp.BytesComplete())/1000)/1000), "MB (",
|
||||
fmt.Sprintf("%.2f", (float64(resp.BytesPerSecond())/1024)/1024), "MiB/s )")
|
||||
|
||||
err = helpers.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
err = fileHelper.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -22,9 +22,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -47,7 +46,7 @@ var _ = Describe("Http client", func() {
|
||||
c := NewHttpClient(RepoData{Urls: []string{ts.URL}})
|
||||
path, err := c.DownloadFile("test.txt")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path)).To(Equal("test"))
|
||||
Expect(fileHelper.Read(path)).To(Equal("test"))
|
||||
os.RemoveAll(path)
|
||||
})
|
||||
|
||||
@@ -63,10 +62,10 @@ var _ = Describe("Http client", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c := NewHttpClient(RepoData{Urls: []string{ts.URL}})
|
||||
path, err := c.DownloadArtifact(&compiler.PackageArtifact{Path: "test.txt"})
|
||||
path, err := c.DownloadArtifact(&artifact.PackageArtifact{Path: "test.txt"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path.GetPath())).To(Equal("test"))
|
||||
os.RemoveAll(path.GetPath())
|
||||
Expect(fileHelper.Read(path.Path)).To(Equal("test"))
|
||||
os.RemoveAll(path.Path)
|
||||
})
|
||||
|
||||
})
|
||||
|
@@ -18,4 +18,5 @@ package client
|
||||
type RepoData struct {
|
||||
Urls []string
|
||||
Authentication map[string]string
|
||||
Verify bool
|
||||
}
|
||||
|
@@ -20,11 +20,10 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
)
|
||||
|
||||
type LocalClient struct {
|
||||
@@ -35,11 +34,11 @@ func NewLocalClient(r RepoData) *LocalClient {
|
||||
return &LocalClient{RepoData: r}
|
||||
}
|
||||
|
||||
func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
|
||||
func (c *LocalClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
var err error
|
||||
|
||||
rootfs := ""
|
||||
artifactName := path.Base(artifact.GetPath())
|
||||
artifactName := path.Base(a.Path)
|
||||
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
|
||||
|
||||
if !config.LuetCfg.ConfigFromHost {
|
||||
@@ -50,7 +49,7 @@ func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Art
|
||||
}
|
||||
|
||||
// Check if file is already in cache
|
||||
if helpers.Exists(cacheFile) {
|
||||
if fileHelper.Exists(cacheFile) {
|
||||
Debug("Use artifact", artifactName, "from cache.")
|
||||
} else {
|
||||
ok := false
|
||||
@@ -61,7 +60,7 @@ func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Art
|
||||
Info("Downloading artifact", artifactName, "from", uri)
|
||||
|
||||
//defer os.Remove(file.Name())
|
||||
err = helpers.CopyFile(filepath.Join(uri, artifactName), cacheFile)
|
||||
err = fileHelper.CopyFile(filepath.Join(uri, artifactName), cacheFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -74,8 +73,8 @@ func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Art
|
||||
}
|
||||
}
|
||||
|
||||
newart := artifact
|
||||
newart.SetPath(cacheFile)
|
||||
newart := a
|
||||
newart.Path = cacheFile
|
||||
return newart, nil
|
||||
}
|
||||
|
||||
@@ -104,7 +103,7 @@ func (c *LocalClient) DownloadFile(name string) (string, error) {
|
||||
}
|
||||
//defer os.Remove(file.Name())
|
||||
|
||||
err = helpers.CopyFile(filepath.Join(uri, name), file.Name())
|
||||
err = fileHelper.CopyFile(filepath.Join(uri, name), file.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -20,9 +20,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -42,7 +41,7 @@ var _ = Describe("Local client", func() {
|
||||
c := NewLocalClient(RepoData{Urls: []string{tmpdir}})
|
||||
path, err := c.DownloadFile("test.txt")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path)).To(Equal("test"))
|
||||
Expect(fileHelper.Read(path)).To(Equal("test"))
|
||||
os.RemoveAll(path)
|
||||
})
|
||||
|
||||
@@ -56,10 +55,10 @@ var _ = Describe("Local client", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c := NewLocalClient(RepoData{Urls: []string{tmpdir}})
|
||||
path, err := c.DownloadArtifact(&compiler.PackageArtifact{Path: "test.txt"})
|
||||
path, err := c.DownloadArtifact(&artifact.PackageArtifact{Path: "test.txt"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path.GetPath())).To(Equal("test"))
|
||||
os.RemoveAll(path.GetPath())
|
||||
Expect(fileHelper.Read(path.Path)).To(Equal("test"))
|
||||
os.RemoveAll(path.Path)
|
||||
})
|
||||
|
||||
})
|
||||
|
@@ -16,6 +16,7 @@
|
||||
package installer
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
@@ -48,7 +49,7 @@ func (f *LuetFinalizer) RunInstall(s *System) error {
|
||||
for _, c := range f.Install {
|
||||
toRun := append(args, c)
|
||||
Info(":shell: Executing finalizer on ", s.Target, cmd, toRun)
|
||||
if s.Target == "/" {
|
||||
if s.Target == string(os.PathSeparator) {
|
||||
cmd := exec.Command(cmd, toRun...)
|
||||
stdoutStderr, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
|
@@ -24,15 +24,16 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
. "github.com/logrusorgru/aurora"
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
"github.com/mudler/luet/pkg/helpers/match"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
. "github.com/logrusorgru/aurora"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -58,11 +59,11 @@ type LuetInstaller struct {
|
||||
|
||||
type ArtifactMatch struct {
|
||||
Package pkg.Package
|
||||
Artifact compiler.Artifact
|
||||
Repository Repository
|
||||
Artifact *artifact.PackageArtifact
|
||||
Repository *LuetSystemRepository
|
||||
}
|
||||
|
||||
func NewLuetInstaller(opts LuetInstallerOptions) Installer {
|
||||
func NewLuetInstaller(opts LuetInstallerOptions) *LuetInstaller {
|
||||
return &LuetInstaller{Options: opts}
|
||||
}
|
||||
|
||||
@@ -106,11 +107,11 @@ func (l *LuetInstaller) computeUpgrade(syncedRepos Repositories, s *System) (pkg
|
||||
continue
|
||||
}
|
||||
for _, artefact := range matches[0].Repo.GetIndex() {
|
||||
if artefact.GetCompileSpec().GetPackage() == nil {
|
||||
if artefact.CompileSpec.GetPackage() == nil {
|
||||
return uninstall, toInstall, errors.New("Package in compilespec empty")
|
||||
|
||||
}
|
||||
if artefact.GetCompileSpec().GetPackage().Matches(p) && artefact.GetCompileSpec().GetPackage().GetBuildTimestamp() != p.GetBuildTimestamp() {
|
||||
if artefact.CompileSpec.GetPackage().Matches(p) && artefact.CompileSpec.GetPackage().GetBuildTimestamp() != p.GetBuildTimestamp() {
|
||||
toInstall = append(toInstall, matches[0].Package).Unique()
|
||||
uninstall = append(uninstall, p).Unique()
|
||||
}
|
||||
@@ -127,6 +128,8 @@ func packsToList(p pkg.Packages) string {
|
||||
for _, pp := range p {
|
||||
packs = append(packs, pp.HumanReadableString())
|
||||
}
|
||||
|
||||
sort.Strings(packs)
|
||||
return strings.Join(packs, " ")
|
||||
}
|
||||
|
||||
@@ -136,6 +139,7 @@ func matchesToList(artefacts map[string]ArtifactMatch) string {
|
||||
for fingerprint, match := range artefacts {
|
||||
packs = append(packs, fmt.Sprintf("%s (%s)", fingerprint, match.Repository.GetName()))
|
||||
}
|
||||
sort.Strings(packs)
|
||||
return strings.Join(packs, " ")
|
||||
}
|
||||
|
||||
@@ -152,39 +156,7 @@ func (l *LuetInstaller) Upgrade(s *System) error {
|
||||
Info(":memo: note: will consider new build revisions while upgrading")
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
uninstall, toInstall, err := l.computeUpgrade(syncedRepos, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing upgrade")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if len(uninstall) > 0 {
|
||||
Info(":recycle: Packages that are going to be removed from the system:\n ", Yellow(packsToList(uninstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) > 0 {
|
||||
Info(":zap:Packages that are going to be installed in the system:\n ", Green(packsToList(toInstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) == 0 && len(uninstall) == 0 {
|
||||
Info("Nothing to do")
|
||||
return nil
|
||||
}
|
||||
|
||||
if l.Options.Ask {
|
||||
Info("By going forward, you are also accepting the licenses of the packages that you are going to install in your system.")
|
||||
if Ask() {
|
||||
l.Options.Ask = false // Don't prompt anymore
|
||||
return l.swap(syncedRepos, uninstall, toInstall, s, true)
|
||||
} else {
|
||||
return errors.New("Aborted by user")
|
||||
}
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
defer SpinnerStop()
|
||||
return l.swap(syncedRepos, uninstall, toInstall, s, true)
|
||||
return l.checkAndUpgrade(syncedRepos, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) SyncRepositories(inMemory bool) (Repositories, error) {
|
||||
@@ -225,11 +197,19 @@ func (l *LuetInstaller) Swap(toRemove pkg.Packages, toInstall pkg.Packages, s *S
|
||||
toRemoveFinal = append(toRemoveFinal, pp)
|
||||
}
|
||||
}
|
||||
o := Option{
|
||||
FullUninstall: false,
|
||||
Force: true,
|
||||
CheckConflicts: false,
|
||||
FullCleanUninstall: false,
|
||||
NoDeps: l.Options.NoDeps,
|
||||
OnlyDeps: false,
|
||||
}
|
||||
|
||||
return l.swap(syncedRepos, toRemoveFinal, toInstall, s, false)
|
||||
return l.swap(o, syncedRepos, toRemoveFinal, toInstall, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) computeSwap(syncedRepos Repositories, toRemove pkg.Packages, toInstall pkg.Packages, s *System) (map[string]ArtifactMatch, pkg.Packages, solver.PackagesAssertions, pkg.PackageDatabase, error) {
|
||||
func (l *LuetInstaller) computeSwap(o Option, syncedRepos Repositories, toRemove pkg.Packages, toInstall pkg.Packages, s *System) (map[string]ArtifactMatch, pkg.Packages, solver.PackagesAssertions, pkg.PackageDatabase, error) {
|
||||
|
||||
allRepos := pkg.NewInMemoryDatabase(false)
|
||||
syncedRepos.SyncDatabase(allRepos)
|
||||
@@ -244,8 +224,8 @@ func (l *LuetInstaller) computeSwap(syncedRepos Repositories, toRemove pkg.Packa
|
||||
|
||||
systemAfterChanges := &System{Database: installedtmp}
|
||||
|
||||
packs, err := l.computeUninstall(systemAfterChanges, toRemove...)
|
||||
if err != nil && !l.Options.Force {
|
||||
packs, err := l.computeUninstall(o, systemAfterChanges, toRemove...)
|
||||
if err != nil && !o.Force {
|
||||
Error("Failed computing uninstall for ", packsToList(toRemove))
|
||||
return nil, nil, nil, nil, errors.Wrap(err, "computing uninstall "+packsToList(toRemove))
|
||||
}
|
||||
@@ -256,30 +236,16 @@ func (l *LuetInstaller) computeSwap(syncedRepos Repositories, toRemove pkg.Packa
|
||||
}
|
||||
}
|
||||
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(syncedRepos, toInstall, systemAfterChanges)
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(o, syncedRepos, toInstall, systemAfterChanges)
|
||||
for _, p := range toInstall {
|
||||
assertions = append(assertions, solver.PackageAssert{Package: p.(*pkg.DefaultPackage), Value: true})
|
||||
}
|
||||
return match, packages, assertions, allRepos, err
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) swap(syncedRepos Repositories, toRemove pkg.Packages, toInstall pkg.Packages, s *System, forceNodeps bool) error {
|
||||
forced := l.Options.Force
|
||||
nodeps := l.Options.NoDeps
|
||||
func (l *LuetInstaller) swap(o Option, syncedRepos Repositories, toRemove pkg.Packages, toInstall pkg.Packages, s *System) error {
|
||||
|
||||
// We don't want any conflict with the installed to raise during the upgrade.
|
||||
// In this way we both force uninstalls and we avoid to check with conflicts
|
||||
// against the current system state which is pending to deletion
|
||||
// E.g. you can't check for conflicts for an upgrade of a new version of A
|
||||
// if the old A results installed in the system. This is due to the fact that
|
||||
// now the solver enforces the constraints and explictly denies two packages
|
||||
// of the same version installed.
|
||||
l.Options.Force = true
|
||||
if forceNodeps {
|
||||
l.Options.NoDeps = true
|
||||
}
|
||||
|
||||
match, packages, assertions, allRepos, err := l.computeSwap(syncedRepos, toRemove, toInstall, s)
|
||||
match, packages, assertions, allRepos, err := l.computeSwap(o, syncedRepos, toRemove, toInstall, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing package replacement")
|
||||
}
|
||||
@@ -309,15 +275,223 @@ func (l *LuetInstaller) swap(syncedRepos Repositories, toRemove pkg.Packages, to
|
||||
return nil
|
||||
}
|
||||
|
||||
err = l.Uninstall(s, toRemove...)
|
||||
if err != nil && !l.Options.Force {
|
||||
Error("Failed uninstall for ", packsToList(toRemove))
|
||||
return errors.Wrap(err, "uninstalling "+packsToList(toRemove))
|
||||
ops := l.getOpsWithOptions(toRemove, match, Option{
|
||||
Force: o.Force,
|
||||
NoDeps: false,
|
||||
OnlyDeps: o.OnlyDeps,
|
||||
RunFinalizers: false,
|
||||
}, o, syncedRepos, packages, assertions, allRepos)
|
||||
|
||||
err = l.runOps(ops, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed running installer options")
|
||||
}
|
||||
|
||||
l.Options.Force = forced
|
||||
l.Options.NoDeps = nodeps
|
||||
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
||||
toFinalize, err := l.getFinalizers(allRepos, assertions, match, o.NoDeps)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed getting package to finalize")
|
||||
}
|
||||
|
||||
return s.ExecuteFinalizers(toFinalize)
|
||||
}
|
||||
|
||||
type Option struct {
|
||||
Force bool
|
||||
NoDeps bool
|
||||
CheckConflicts bool
|
||||
FullUninstall bool
|
||||
FullCleanUninstall bool
|
||||
OnlyDeps bool
|
||||
RunFinalizers bool
|
||||
}
|
||||
|
||||
type operation struct {
|
||||
Option Option
|
||||
Package pkg.Package
|
||||
}
|
||||
|
||||
type installOperation struct {
|
||||
operation
|
||||
Reposiories Repositories
|
||||
Packages pkg.Packages
|
||||
Assertions solver.PackagesAssertions
|
||||
Database pkg.PackageDatabase
|
||||
Matches map[string]ArtifactMatch
|
||||
}
|
||||
|
||||
// installerOp is the operation that is sent to the
|
||||
// upgradeWorker's channel (todo)
|
||||
type installerOp struct {
|
||||
Uninstall operation
|
||||
Install installOperation
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) runOps(ops []installerOp, s *System) error {
|
||||
all := make(chan installerOp)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
|
||||
// Do the real install
|
||||
for i := 0; i < l.Options.Concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go l.installerOpWorker(i, wg, all, s)
|
||||
}
|
||||
|
||||
for _, c := range ops {
|
||||
all <- c
|
||||
}
|
||||
close(all)
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: use installerOpWorker in place of all the other workers.
|
||||
// This one is general enough to read a list of operations and execute them.
|
||||
func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, c <-chan installerOp, s *System) error {
|
||||
defer wg.Done()
|
||||
|
||||
for p := range c {
|
||||
if p.Uninstall.Package != nil {
|
||||
Debug("Replacing package inplace")
|
||||
toUninstall, uninstall, err := l.generateUninstallFn(p.Uninstall.Option, s, p.Uninstall.Package)
|
||||
if err != nil {
|
||||
Error("Failed to generate Uninstall function for" + err.Error())
|
||||
continue
|
||||
//return errors.Wrap(err, "while computing uninstall")
|
||||
}
|
||||
|
||||
err = uninstall()
|
||||
if err != nil {
|
||||
Error("Failed uninstall for ", packsToList(toUninstall))
|
||||
continue
|
||||
//return errors.Wrap(err, "uninstalling "+packsToList(toUninstall))
|
||||
}
|
||||
}
|
||||
if p.Install.Package != nil {
|
||||
artMatch := p.Install.Matches[p.Install.Package.GetFingerPrint()]
|
||||
ass := p.Install.Assertions.Search(p.Install.Package.GetFingerPrint())
|
||||
packageToInstall, _ := p.Install.Packages.Find(p.Install.Package.GetPackageName())
|
||||
|
||||
err := l.install(
|
||||
p.Install.Option,
|
||||
p.Install.Reposiories,
|
||||
map[string]ArtifactMatch{p.Install.Package.GetFingerPrint(): artMatch},
|
||||
pkg.Packages{packageToInstall},
|
||||
solver.PackagesAssertions{*ass},
|
||||
p.Install.Database,
|
||||
s,
|
||||
)
|
||||
if err != nil {
|
||||
Error(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checks wheter we can uninstall and install in place and compose installer worker ops
|
||||
func (l *LuetInstaller) getOpsWithOptions(
|
||||
toUninstall pkg.Packages, installMatch map[string]ArtifactMatch, installOpt, uninstallOpt Option,
|
||||
syncedRepos Repositories, toInstall pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase) []installerOp {
|
||||
resOps := []installerOp{}
|
||||
for _, match := range installMatch {
|
||||
if pack, err := toUninstall.Find(match.Package.GetPackageName()); err == nil {
|
||||
resOps = append(resOps, installerOp{
|
||||
Uninstall: operation{Package: pack, Option: uninstallOpt},
|
||||
Install: installOperation{
|
||||
operation: operation{
|
||||
Package: match.Package,
|
||||
Option: installOpt,
|
||||
},
|
||||
Matches: installMatch,
|
||||
Packages: toInstall,
|
||||
Reposiories: syncedRepos,
|
||||
Assertions: solution,
|
||||
Database: allRepos,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
resOps = append(resOps, installerOp{
|
||||
Install: installOperation{
|
||||
operation: operation{Package: match.Package, Option: installOpt},
|
||||
Matches: installMatch,
|
||||
Reposiories: syncedRepos,
|
||||
Packages: toInstall,
|
||||
Assertions: solution,
|
||||
Database: allRepos,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range toUninstall {
|
||||
found := false
|
||||
|
||||
for _, match := range installMatch {
|
||||
if match.Package.GetPackageName() == p.GetPackageName() {
|
||||
found = true
|
||||
}
|
||||
|
||||
}
|
||||
if !found {
|
||||
resOps = append(resOps, installerOp{
|
||||
Uninstall: operation{Package: p, Option: uninstallOpt},
|
||||
})
|
||||
}
|
||||
}
|
||||
return resOps
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) checkAndUpgrade(r Repositories, s *System) error {
|
||||
Spinner(32)
|
||||
uninstall, toInstall, err := l.computeUpgrade(r, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing upgrade")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if len(uninstall) > 0 {
|
||||
Info(":recycle: Packages that are going to be removed from the system:\n ", Yellow(packsToList(uninstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) > 0 {
|
||||
Info(":zap:Packages that are going to be installed in the system:\n ", Green(packsToList(toInstall)).BgBlack().String())
|
||||
}
|
||||
|
||||
if len(toInstall) == 0 && len(uninstall) == 0 {
|
||||
Info("Nothing to do")
|
||||
return nil
|
||||
}
|
||||
|
||||
// We don't want any conflict with the installed to raise during the upgrade.
|
||||
// In this way we both force uninstalls and we avoid to check with conflicts
|
||||
// against the current system state which is pending to deletion
|
||||
// E.g. you can't check for conflicts for an upgrade of a new version of A
|
||||
// if the old A results installed in the system. This is due to the fact that
|
||||
// now the solver enforces the constraints and explictly denies two packages
|
||||
// of the same version installed.
|
||||
o := Option{
|
||||
FullUninstall: false,
|
||||
Force: true,
|
||||
CheckConflicts: false,
|
||||
FullCleanUninstall: false,
|
||||
NoDeps: true,
|
||||
OnlyDeps: false,
|
||||
}
|
||||
|
||||
if l.Options.Ask {
|
||||
Info("By going forward, you are also accepting the licenses of the packages that you are going to install in your system.")
|
||||
if Ask() {
|
||||
l.Options.Ask = false // Don't prompt anymore
|
||||
return l.swap(o, r, uninstall, toInstall, s)
|
||||
} else {
|
||||
return errors.New("Aborted by user")
|
||||
}
|
||||
}
|
||||
|
||||
return l.swap(o, r, uninstall, toInstall, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
@@ -326,7 +500,20 @@ func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
return err
|
||||
}
|
||||
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(syncedRepos, cp, s)
|
||||
if len(s.Database.World()) > 0 {
|
||||
Info(":thinking: Checking for available upgrades")
|
||||
if err := l.checkAndUpgrade(syncedRepos, s); err != nil {
|
||||
return errors.Wrap(err, "while checking upgrades before install")
|
||||
}
|
||||
}
|
||||
|
||||
o := Option{
|
||||
NoDeps: l.Options.NoDeps,
|
||||
Force: l.Options.Force,
|
||||
OnlyDeps: l.Options.OnlyDeps,
|
||||
RunFinalizers: true,
|
||||
}
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(o, syncedRepos, cp, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -363,12 +550,12 @@ func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
Info("By going forward, you are also accepting the licenses of the packages that you are going to install in your system.")
|
||||
if Ask() {
|
||||
l.Options.Ask = false // Don't prompt anymore
|
||||
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
||||
return l.install(o, syncedRepos, match, packages, assertions, allRepos, s)
|
||||
} else {
|
||||
return errors.New("Aborted by user")
|
||||
}
|
||||
}
|
||||
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
||||
return l.install(o, syncedRepos, match, packages, assertions, allRepos, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) download(syncedRepos Repositories, toDownload map[string]ArtifactMatch) error {
|
||||
@@ -406,12 +593,12 @@ func (l *LuetInstaller) Reclaim(s *System) error {
|
||||
for _, repo := range syncedRepos {
|
||||
for _, artefact := range repo.GetIndex() {
|
||||
Debug("Checking if",
|
||||
artefact.GetCompileSpec().GetPackage().HumanReadableString(),
|
||||
artefact.CompileSpec.GetPackage().HumanReadableString(),
|
||||
"from", repo.GetName(), "is installed")
|
||||
FILES:
|
||||
for _, f := range artefact.GetFiles() {
|
||||
if helpers.Exists(filepath.Join(s.Target, f)) {
|
||||
p, err := repo.GetTree().GetDatabase().FindPackage(artefact.GetCompileSpec().GetPackage())
|
||||
for _, f := range artefact.Files {
|
||||
if fileHelper.Exists(filepath.Join(s.Target, f)) {
|
||||
p, err := repo.GetTree().GetDatabase().FindPackage(artefact.CompileSpec.GetPackage())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -435,7 +622,7 @@ func (l *LuetInstaller) Reclaim(s *System) error {
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Failed creating package")
|
||||
}
|
||||
s.Database.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: pack.GetFingerPrint(), Files: match.Artifact.GetFiles()})
|
||||
s.Database.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: pack.GetFingerPrint(), Files: match.Artifact.Files})
|
||||
Info(":zap:Reclaimed package:", pack.HumanReadableString())
|
||||
}
|
||||
Info("Done!")
|
||||
@@ -443,7 +630,7 @@ func (l *LuetInstaller) Reclaim(s *System) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages, s *System) (map[string]ArtifactMatch, pkg.Packages, solver.PackagesAssertions, pkg.PackageDatabase, error) {
|
||||
func (l *LuetInstaller) computeInstall(o Option, syncedRepos Repositories, cp pkg.Packages, s *System) (map[string]ArtifactMatch, pkg.Packages, solver.PackagesAssertions, pkg.PackageDatabase, error) {
|
||||
var p pkg.Packages
|
||||
toInstall := map[string]ArtifactMatch{}
|
||||
allRepos := pkg.NewInMemoryDatabase(false)
|
||||
@@ -476,11 +663,11 @@ func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages
|
||||
var packagesToInstall pkg.Packages
|
||||
var err error
|
||||
|
||||
if !l.Options.NoDeps {
|
||||
if !o.NoDeps {
|
||||
solv := solver.NewResolver(solver.Options{Type: l.Options.SolverOptions.Implementation, Concurrency: l.Options.Concurrency}, s.Database, allRepos, pkg.NewInMemoryDatabase(false), l.Options.SolverOptions.Resolver())
|
||||
solution, err = solv.Install(p)
|
||||
/// TODO: PackageAssertions needs to be a map[fingerprint]pack so lookup is in O(1)
|
||||
if err != nil && !l.Options.Force {
|
||||
if err != nil && !o.Force {
|
||||
return toInstall, p, solution, allRepos, errors.Wrap(err, "Failed solving solution for package")
|
||||
}
|
||||
// Gathers things to install
|
||||
@@ -493,7 +680,7 @@ func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages
|
||||
packagesToInstall = append(packagesToInstall, assertion.Package)
|
||||
}
|
||||
}
|
||||
} else if !l.Options.OnlyDeps {
|
||||
} else if !o.OnlyDeps {
|
||||
for _, currentPack := range p {
|
||||
if _, err := s.Database.FindPackage(currentPack); err == nil {
|
||||
// skip matching if it is installed already
|
||||
@@ -512,11 +699,11 @@ func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages
|
||||
}
|
||||
A:
|
||||
for _, artefact := range matches[0].Repo.GetIndex() {
|
||||
if artefact.GetCompileSpec().GetPackage() == nil {
|
||||
if artefact.CompileSpec.GetPackage() == nil {
|
||||
return toInstall, p, solution, allRepos, errors.New("Package in compilespec empty")
|
||||
}
|
||||
if matches[0].Package.Matches(artefact.GetCompileSpec().GetPackage()) {
|
||||
currentPack.SetBuildTimestamp(artefact.GetCompileSpec().GetPackage().GetBuildTimestamp())
|
||||
if matches[0].Package.Matches(artefact.CompileSpec.GetPackage()) {
|
||||
currentPack.SetBuildTimestamp(artefact.CompileSpec.GetPackage().GetBuildTimestamp())
|
||||
// Filter out already installed
|
||||
if _, err := s.Database.FindPackage(currentPack); err != nil {
|
||||
toInstall[currentPack.GetFingerPrint()] = ArtifactMatch{Package: currentPack, Artifact: artefact, Repository: matches[0].Repo}
|
||||
@@ -528,7 +715,47 @@ func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages
|
||||
return toInstall, p, solution, allRepos, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) install(syncedRepos Repositories, toInstall map[string]ArtifactMatch, p pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) error {
|
||||
func (l *LuetInstaller) getFinalizers(allRepos pkg.PackageDatabase, solution solver.PackagesAssertions, toInstall map[string]ArtifactMatch, nodeps bool) ([]pkg.Package, error) {
|
||||
var toFinalize []pkg.Package
|
||||
if !nodeps {
|
||||
// TODO: Lower those errors as warning
|
||||
for _, w := range toInstall {
|
||||
// Finalizers needs to run in order and in sequence.
|
||||
ordered, err := solution.Order(allRepos, w.Package.GetFingerPrint())
|
||||
if err != nil {
|
||||
return toFinalize, errors.Wrap(err, "While order a solution for "+w.Package.HumanReadableString())
|
||||
}
|
||||
ORDER:
|
||||
for _, ass := range ordered {
|
||||
if ass.Value {
|
||||
installed, ok := toInstall[ass.Package.GetFingerPrint()]
|
||||
if !ok {
|
||||
// It was a dep already installed in the system, so we can skip it safely
|
||||
continue ORDER
|
||||
}
|
||||
treePackage, err := installed.Repository.GetTree().GetDatabase().FindPackage(ass.Package)
|
||||
if err != nil {
|
||||
return toFinalize, errors.Wrap(err, "Error getting package "+ass.Package.HumanReadableString())
|
||||
}
|
||||
|
||||
toFinalize = append(toFinalize, treePackage)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
for _, c := range toInstall {
|
||||
treePackage, err := c.Repository.GetTree().GetDatabase().FindPackage(c.Package)
|
||||
if err != nil {
|
||||
return toFinalize, errors.Wrap(err, "Error getting package "+c.Package.HumanReadableString())
|
||||
}
|
||||
toFinalize = append(toFinalize, treePackage)
|
||||
}
|
||||
}
|
||||
return toFinalize, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) install(o Option, syncedRepos Repositories, toInstall map[string]ArtifactMatch, p pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) error {
|
||||
// Install packages into rootfs in parallel.
|
||||
if err := l.download(syncedRepos, toInstall); err != nil {
|
||||
return errors.Wrap(err, "Downloading packages")
|
||||
@@ -557,52 +784,25 @@ func (l *LuetInstaller) install(syncedRepos Repositories, toInstall map[string]A
|
||||
for _, c := range toInstall {
|
||||
// Annotate to the system that the package was installed
|
||||
_, err := s.Database.CreatePackage(c.Package)
|
||||
if err != nil && !l.Options.Force {
|
||||
if err != nil && !o.Force {
|
||||
return errors.Wrap(err, "Failed creating package")
|
||||
}
|
||||
bus.Manager.Publish(bus.EventPackageInstall, c)
|
||||
}
|
||||
var toFinalize []pkg.Package
|
||||
if !l.Options.NoDeps {
|
||||
// TODO: Lower those errors as warning
|
||||
for _, w := range p {
|
||||
// Finalizers needs to run in order and in sequence.
|
||||
ordered, err := solution.Order(allRepos, w.GetFingerPrint())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While order a solution for "+w.HumanReadableString())
|
||||
}
|
||||
ORDER:
|
||||
for _, ass := range ordered {
|
||||
if ass.Value {
|
||||
installed, ok := toInstall[ass.Package.GetFingerPrint()]
|
||||
if !ok {
|
||||
// It was a dep already installed in the system, so we can skip it safely
|
||||
continue ORDER
|
||||
}
|
||||
treePackage, err := installed.Repository.GetTree().GetDatabase().FindPackage(ass.Package)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error getting package "+ass.Package.HumanReadableString())
|
||||
}
|
||||
|
||||
toFinalize = append(toFinalize, treePackage)
|
||||
}
|
||||
}
|
||||
if !o.RunFinalizers {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
for _, c := range toInstall {
|
||||
treePackage, err := c.Repository.GetTree().GetDatabase().FindPackage(c.Package)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error getting package "+c.Package.HumanReadableString())
|
||||
}
|
||||
toFinalize = append(toFinalize, treePackage)
|
||||
}
|
||||
toFinalize, err := l.getFinalizers(allRepos, solution, toInstall, o.NoDeps)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed getting package to finalize")
|
||||
}
|
||||
|
||||
return s.ExecuteFinalizers(toFinalize)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) downloadPackage(a ArtifactMatch) (compiler.Artifact, error) {
|
||||
func (l *LuetInstaller) downloadPackage(a ArtifactMatch) (*artifact.PackageArtifact, error) {
|
||||
|
||||
artifact, err := a.Repository.Client().DownloadArtifact(a.Artifact)
|
||||
if err != nil {
|
||||
@@ -616,26 +816,26 @@ func (l *LuetInstaller) downloadPackage(a ArtifactMatch) (compiler.Artifact, err
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) installPackage(a ArtifactMatch, s *System) error {
|
||||
func (l *LuetInstaller) installPackage(m ArtifactMatch, s *System) error {
|
||||
|
||||
artifact, err := l.downloadPackage(a)
|
||||
a, err := l.downloadPackage(m)
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Failed downloading package")
|
||||
}
|
||||
|
||||
files, err := artifact.FileList()
|
||||
files, err := a.FileList()
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Could not open package archive")
|
||||
}
|
||||
|
||||
err = artifact.Unpack(s.Target, true)
|
||||
err = a.Unpack(s.Target, true)
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Error met while unpacking rootfs")
|
||||
}
|
||||
|
||||
// First create client and download
|
||||
// Then unpack to system
|
||||
return s.Database.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: a.Package.GetFingerPrint(), Files: files})
|
||||
return s.Database.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: m.Package.GetFingerPrint(), Files: files})
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) downloadWorker(i int, wg *sync.WaitGroup, c <-chan ArtifactMatch) error {
|
||||
@@ -676,6 +876,51 @@ func (l *LuetInstaller) installerWorker(i int, wg *sync.WaitGroup, c <-chan Arti
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkAndPrunePath(path string) {
|
||||
// check if now the target path is empty
|
||||
targetPath := filepath.Dir(path)
|
||||
|
||||
fi, err := os.Lstat(targetPath)
|
||||
if err != nil {
|
||||
// Warning("Dir not found (it was before?) ", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsDir():
|
||||
files, err := ioutil.ReadDir(targetPath)
|
||||
if err != nil {
|
||||
Warning("Failed reading folder", targetPath, err.Error())
|
||||
}
|
||||
if len(files) != 0 {
|
||||
Debug("Preserving not-empty folder", targetPath)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err = os.Remove(targetPath); err != nil {
|
||||
Warning("Failed removing file (maybe not present in the system target anymore ?)", targetPath, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// We will try to cleanup every path from the file, if the folders left behind are empty
|
||||
func pruneEmptyFilePath(path string) {
|
||||
checkAndPrunePath(path)
|
||||
|
||||
// A path is for e.g. /usr/bin/bar
|
||||
// we want to create an array as "/usr", "/usr/bin", "/usr/bin/bar"
|
||||
paths := strings.Split(path, string(os.PathSeparator))
|
||||
currentPath := filepath.Join(string(os.PathSeparator), paths[0])
|
||||
allPaths := []string{currentPath}
|
||||
for _, p := range paths[1:] {
|
||||
currentPath = filepath.Join(currentPath, p)
|
||||
allPaths = append(allPaths, currentPath)
|
||||
}
|
||||
match.ReverseAny(allPaths)
|
||||
for _, p := range allPaths {
|
||||
checkAndPrunePath(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
var cp *config.ConfigProtect
|
||||
annotationDir := ""
|
||||
@@ -698,7 +943,7 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
cp.Map(files)
|
||||
}
|
||||
|
||||
toRemove, notPresent := helpers.OrderFiles(s.Target, files)
|
||||
toRemove, notPresent := fileHelper.OrderFiles(s.Target, files)
|
||||
|
||||
// Remove from target
|
||||
for _, f := range toRemove {
|
||||
@@ -737,6 +982,8 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
if err = os.Remove(target); err != nil {
|
||||
Warning("Failed removing file (maybe not present in the system target anymore ?)", target, err.Error())
|
||||
}
|
||||
|
||||
pruneEmptyFilePath(target)
|
||||
}
|
||||
|
||||
for _, f := range notPresent {
|
||||
@@ -750,6 +997,8 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
if err = os.Remove(target); err != nil {
|
||||
Debug("Failed removing file (not present in the system target)", target, err.Error())
|
||||
}
|
||||
|
||||
pruneEmptyFilePath(target)
|
||||
}
|
||||
|
||||
err = s.Database.RemovePackageFiles(p)
|
||||
@@ -767,17 +1016,17 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) computeUninstall(s *System, packs ...pkg.Package) (pkg.Packages, error) {
|
||||
func (l *LuetInstaller) computeUninstall(o Option, s *System, packs ...pkg.Package) (pkg.Packages, error) {
|
||||
|
||||
var toUninstall pkg.Packages
|
||||
// compute uninstall from all world - remove packages in parallel - run uninstall finalizer (in order) TODO - mark the uninstallation in db
|
||||
// Get installed definition
|
||||
checkConflicts := l.Options.CheckConflicts
|
||||
full := l.Options.FullUninstall
|
||||
if l.Options.Force == true { // IF forced, we want to remove the package and all its requires
|
||||
checkConflicts = false
|
||||
full = false
|
||||
}
|
||||
checkConflicts := o.CheckConflicts
|
||||
full := o.FullUninstall
|
||||
// if o.Force == true { // IF forced, we want to remove the package and all its requires
|
||||
// checkConflicts = false
|
||||
// full = false
|
||||
// }
|
||||
|
||||
// Create a temporary DB with the installed packages
|
||||
// so the solver is much faster finding the deptree
|
||||
@@ -787,11 +1036,11 @@ func (l *LuetInstaller) computeUninstall(s *System, packs ...pkg.Package) (pkg.P
|
||||
return toUninstall, errors.Wrap(err, "Failed create temporary in-memory db")
|
||||
}
|
||||
|
||||
if !l.Options.NoDeps {
|
||||
if !o.NoDeps {
|
||||
solv := solver.NewResolver(solver.Options{Type: l.Options.SolverOptions.Implementation, Concurrency: l.Options.Concurrency}, installedtmp, installedtmp, pkg.NewInMemoryDatabase(false), l.Options.SolverOptions.Resolver())
|
||||
var solution pkg.Packages
|
||||
var err error
|
||||
if l.Options.FullCleanUninstall {
|
||||
if o.FullCleanUninstall {
|
||||
solution, err = solv.UninstallUniverse(packs)
|
||||
if err != nil {
|
||||
return toUninstall, errors.Wrap(err, "Could not solve the uninstall constraints. Tip: try with --solver-type qlearning or with --force, or by removing packages excluding their dependencies with --nodeps")
|
||||
@@ -812,32 +1061,47 @@ func (l *LuetInstaller) computeUninstall(s *System, packs ...pkg.Package) (pkg.P
|
||||
|
||||
return toUninstall, nil
|
||||
}
|
||||
func (l *LuetInstaller) Uninstall(s *System, packs ...pkg.Package) error {
|
||||
|
||||
func (l *LuetInstaller) generateUninstallFn(o Option, s *System, packs ...pkg.Package) (pkg.Packages, func() error, error) {
|
||||
for _, p := range packs {
|
||||
if packs, _ := s.Database.FindPackages(p); len(packs) == 0 {
|
||||
return errors.New("Package not found in the system")
|
||||
return nil, nil, errors.New("Package not found in the system")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
toUninstall, err := l.computeUninstall(s, packs...)
|
||||
toUninstall, err := l.computeUninstall(o, s, packs...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while computing uninstall")
|
||||
return nil, nil, errors.Wrap(err, "while computing uninstall")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
uninstall := func() error {
|
||||
for _, p := range toUninstall {
|
||||
err := l.uninstall(p, s)
|
||||
if err != nil && !l.Options.Force {
|
||||
if err != nil && !o.Force {
|
||||
return errors.Wrap(err, "Uninstall failed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return toUninstall, uninstall, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Uninstall(s *System, packs ...pkg.Package) error {
|
||||
|
||||
Spinner(32)
|
||||
o := Option{
|
||||
FullUninstall: l.Options.FullUninstall,
|
||||
Force: l.Options.Force,
|
||||
CheckConflicts: l.Options.CheckConflicts,
|
||||
FullCleanUninstall: l.Options.FullCleanUninstall,
|
||||
}
|
||||
toUninstall, uninstall, err := l.generateUninstallFn(o, s, packs...)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while computing uninstall")
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if len(toUninstall) == 0 {
|
||||
Info("Nothing to do")
|
||||
return nil
|
||||
@@ -855,4 +1119,4 @@ func (l *LuetInstaller) Uninstall(s *System, packs ...pkg.Package) error {
|
||||
return uninstall()
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Repositories(r []Repository) { l.PackageRepositories = r }
|
||||
func (l *LuetInstaller) Repositories(r []*LuetSystemRepository) { l.PackageRepositories = r }
|
||||
|
@@ -23,9 +23,11 @@ import (
|
||||
// . "github.com/mudler/luet/pkg/installer"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
compression "github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/installer"
|
||||
solver "github.com/mudler/luet/pkg/solver"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -34,7 +36,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func stubRepo(tmpdir, tree string) (installer.Repository, error) {
|
||||
func stubRepo(tmpdir, tree string) (*LuetSystemRepository, error) {
|
||||
return GenerateRepository(
|
||||
"test",
|
||||
"description",
|
||||
@@ -43,10 +45,11 @@ func stubRepo(tmpdir, tree string) (installer.Repository, error) {
|
||||
1,
|
||||
tmpdir,
|
||||
[]string{tree},
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false)
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false, false, nil)
|
||||
}
|
||||
|
||||
var _ = Describe("Installer", func() {
|
||||
|
||||
Context("Writes a repository definition", func() {
|
||||
It("Writes a repo and can install packages from it", func() {
|
||||
//repo:=NewLuetSystemRepository()
|
||||
@@ -62,7 +65,9 @@ var _ = Describe("Installer", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(),
|
||||
generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -77,38 +82,37 @@ var _ = Describe("Installer", func() {
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
a, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(a.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(a.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -133,8 +137,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -151,8 +155,8 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
@@ -178,7 +182,7 @@ urls:
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(),
|
||||
generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -193,43 +197,42 @@ urls:
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
treeFile := NewDefaultTreeRepositoryFile()
|
||||
treeFile.SetCompressionType(compiler.None)
|
||||
treeFile.SetCompressionType(compression.None)
|
||||
repo.SetRepositoryFile(REPOFILE_TREE_KEY, treeFile)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -254,8 +257,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -272,8 +275,8 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
@@ -298,7 +301,8 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -313,25 +317,24 @@ urls:
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := GenerateRepository(
|
||||
"test",
|
||||
@@ -340,18 +343,18 @@ urls:
|
||||
[]string{tmpdir}, 1,
|
||||
tmpdir,
|
||||
[]string{"../../tests/fixtures/buildable"},
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false)
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false, false, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -381,8 +384,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -399,8 +402,8 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
@@ -423,7 +426,8 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -438,25 +442,24 @@ urls:
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := GenerateRepository(
|
||||
"test",
|
||||
@@ -466,18 +469,18 @@ urls:
|
||||
1,
|
||||
tmpdir,
|
||||
[]string{"../../tests/fixtures/buildable"},
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false)
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false, false, nil)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -518,7 +521,7 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe2.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
c = compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c = compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err = c.FromPackage(&pkg.DefaultPackage{Name: "alpine", Category: "seed", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -526,11 +529,10 @@ urls:
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
spec.SetOutputPath(tmpdir2)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
artifact, err = c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
|
||||
repo, err = stubRepo(tmpdir2, "../../tests/fixtures/alpine")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -577,7 +579,7 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -595,24 +597,23 @@ urls:
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -642,8 +643,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -660,11 +661,11 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
// New version - new files
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
@@ -695,8 +696,8 @@ urls:
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
Expect(len(generalRecipeNewRepo.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c2 := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipeNewRepo.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
c2 := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipeNewRepo.GetDatabase())
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -718,21 +719,20 @@ urls:
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdirnewrepo)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec3))
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
_, errs = c2.CompileParallel(false, compiler.NewLuetCompilationspecs(spec2))
|
||||
_, errs = c2.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec2))
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_old_repo")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -775,8 +775,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -794,11 +794,11 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
// New version - new files
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
@@ -827,7 +827,12 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(
|
||||
backend.NewSimpleDockerBackend(),
|
||||
generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2),
|
||||
options.WithCompressionType(compression.GZip),
|
||||
)
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -844,26 +849,25 @@ urls:
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
c.SetCompressionType(compiler.GZip)
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.1.package.tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.1.package.tar"))).ToNot(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -893,8 +897,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -911,11 +915,11 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
// New version - new files
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
@@ -985,7 +989,9 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(),
|
||||
options.Concurrency(2),
|
||||
options.WithCompressionType(compression.GZip))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1002,26 +1008,24 @@ urls:
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(2)
|
||||
c.SetCompressionType(compiler.GZip)
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.1.package.tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.1.package.tar"))).ToNot(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -1056,13 +1060,13 @@ urls:
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
Expect(len(system.Database.World())).To(Equal(0))
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeFalse())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeFalse())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "c"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "c"))).To(BeFalse())
|
||||
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "test5"))).ToNot(HaveOccurred())
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "test6"))).ToNot(HaveOccurred())
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "c"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "test5"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "test6"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "c"))).ToNot(HaveOccurred())
|
||||
|
||||
err = inst.Reclaim(system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1090,7 +1094,8 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(),
|
||||
options.WithCompressionType(compression.GZip))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1104,24 +1109,22 @@ urls:
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
c.SetConcurrency(1)
|
||||
c.SetCompressionType(compiler.GZip)
|
||||
_, errs := c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec, spec3))
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_old_repo")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -1156,13 +1159,13 @@ urls:
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
Expect(len(system.Database.World())).To(Equal(0))
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeFalse())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeFalse())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "c"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "c"))).To(BeFalse())
|
||||
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "test5"))).ToNot(HaveOccurred())
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "test6"))).ToNot(HaveOccurred())
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "c"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "test5"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "test6"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "c"))).ToNot(HaveOccurred())
|
||||
|
||||
err = inst.Reclaim(system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1182,7 +1185,7 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe2.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
c = compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
c = compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase())
|
||||
|
||||
spec, err = c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1192,7 +1195,7 @@ urls:
|
||||
defer os.RemoveAll(tmpdir2) // clean up
|
||||
spec.SetOutputPath(tmpdir2)
|
||||
|
||||
_, errs = c.CompileParallel(false, compiler.NewLuetCompilationspecs(spec))
|
||||
_, errs = c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
@@ -1216,11 +1219,11 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
// New version - new files
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
|
@@ -16,64 +16,13 @@
|
||||
package installer
|
||||
|
||||
import (
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
//"github.com/mudler/luet/pkg/solver"
|
||||
)
|
||||
|
||||
type Installer interface {
|
||||
Install(pkg.Packages, *System) error
|
||||
Uninstall(*System, ...pkg.Package) error
|
||||
Upgrade(s *System) error
|
||||
Reclaim(s *System) error
|
||||
|
||||
Repositories([]Repository)
|
||||
SyncRepositories(bool) (Repositories, error)
|
||||
Swap(pkg.Packages, pkg.Packages, *System) error
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
DownloadArtifact(compiler.Artifact) (compiler.Artifact, error)
|
||||
DownloadArtifact(*artifact.PackageArtifact) (*artifact.PackageArtifact, error)
|
||||
DownloadFile(string) (string, error)
|
||||
}
|
||||
|
||||
type Repositories []Repository
|
||||
|
||||
type Repository interface {
|
||||
GetName() string
|
||||
GetDescription() string
|
||||
GetUrls() []string
|
||||
SetUrls([]string)
|
||||
AddUrl(string)
|
||||
GetPriority() int
|
||||
GetIndex() compiler.ArtifactIndex
|
||||
SetIndex(i compiler.ArtifactIndex)
|
||||
GetTree() tree.Builder
|
||||
SetTree(tree.Builder)
|
||||
Write(path string, resetRevision, force bool) error
|
||||
Sync(bool) (Repository, error)
|
||||
GetTreePath() string
|
||||
SetTreePath(string)
|
||||
GetMetaPath() string
|
||||
SetMetaPath(string)
|
||||
GetType() string
|
||||
SetType(string)
|
||||
SetAuthentication(map[string]string)
|
||||
GetAuthentication() map[string]string
|
||||
GetRevision() int
|
||||
IncrementRevision()
|
||||
GetLastUpdate() string
|
||||
SetLastUpdate(string)
|
||||
Client() Client
|
||||
|
||||
SetPriority(int)
|
||||
GetRepositoryFile(string) (LuetRepositoryFile, error)
|
||||
SetRepositoryFile(string, LuetRepositoryFile)
|
||||
SetName(p string)
|
||||
Serialize() (*LuetSystemRepositoryMetadata, LuetSystemRepositorySerialized)
|
||||
GetBackend() compiler.CompilerBackend
|
||||
SetBackend(b compiler.CompilerBackend)
|
||||
FileSearch(pattern string) (pkg.Packages, error)
|
||||
SearchArtefact(p pkg.Package) (compiler.Artifact, error)
|
||||
}
|
||||
type Repositories []*LuetSystemRepository
|
||||
|
File diff suppressed because it is too large
Load Diff
274
pkg/installer/repository_docker.go
Normal file
274
pkg/installer/repository_docker.go
Normal file
@@ -0,0 +1,274 @@
|
||||
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dockerRepositoryGenerator struct {
|
||||
b compiler.CompilerBackend
|
||||
imagePrefix string
|
||||
imagePush, force bool
|
||||
}
|
||||
|
||||
func (l *dockerRepositoryGenerator) Initialize(path string, db pkg.PackageDatabase) ([]*artifact.PackageArtifact, error) {
|
||||
|
||||
Info("Generating docker images for packages in", l.imagePrefix)
|
||||
var art []*artifact.PackageArtifact
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
Debug("Skipping", info.Name(), err.Error())
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
Debug("Skipping directories")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(info.Name(), ".metadata.yaml") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := l.pushImageFromArtifact(artifact.NewPackageArtifact(currentpath), l.b, true); err != nil {
|
||||
return errors.Wrap(err, "while pushing metadata file associated to the artifact")
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
|
||||
a, err := artifact.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
// Set the path relative to the file.
|
||||
// The metadata contains the full path where the file was located during buildtime.
|
||||
a.Path = filepath.Join(filepath.Dir(currentpath), filepath.Base(a.Path))
|
||||
|
||||
// We want to include packages that are ONLY referenced in the tree.
|
||||
// the ones which aren't should be deleted. (TODO: by another cli command?)
|
||||
if _, notfound := db.FindPackage(a.CompileSpec.Package); notfound != nil {
|
||||
Debug(fmt.Sprintf("Package %s not found in tree. Ignoring it.",
|
||||
a.CompileSpec.Package.HumanReadableString()))
|
||||
return nil
|
||||
}
|
||||
|
||||
packageImage := fmt.Sprintf("%s:%s", l.imagePrefix, a.CompileSpec.GetPackage().ImageID())
|
||||
|
||||
if l.imagePush && l.b.ImageAvailable(packageImage) && !l.force {
|
||||
Info("Image", packageImage, "already present, skipping. use --force-push to override")
|
||||
} else {
|
||||
Info("Generating final image", packageImage,
|
||||
"for package ", a.CompileSpec.GetPackage().HumanReadableString())
|
||||
if opts, err := a.GenerateFinalImage(packageImage, l.b, true); err != nil {
|
||||
return errors.Wrap(err, "Failed generating metadata tree"+opts.ImageName)
|
||||
}
|
||||
}
|
||||
if l.imagePush {
|
||||
if err := pushImage(l.b, packageImage, l.force); err != nil {
|
||||
return errors.Wrapf(err, "Failed while pushing image: '%s'", packageImage)
|
||||
}
|
||||
}
|
||||
|
||||
art = append(art, a)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(path, ff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
return art, nil
|
||||
}
|
||||
|
||||
func pushImage(b compiler.CompilerBackend, image string, force bool) error {
|
||||
if b.ImageAvailable(image) && !force {
|
||||
Debug("Image", image, "already present, skipping")
|
||||
return nil
|
||||
}
|
||||
return b.Push(backend.Options{ImageName: image})
|
||||
}
|
||||
|
||||
func (d *dockerRepositoryGenerator) pushFileFromArtifact(a *artifact.PackageArtifact, imageTree string) error {
|
||||
Debug("Generating image", imageTree)
|
||||
if opts, err := a.GenerateFinalImage(imageTree, d.b, false); err != nil {
|
||||
return errors.Wrap(err, "Failed generating metadata tree "+opts.ImageName)
|
||||
}
|
||||
if d.imagePush {
|
||||
if err := pushImage(d.b, imageTree, true); err != nil {
|
||||
return errors.Wrapf(err, "Failed while pushing image: '%s'", imageTree)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerRepositoryGenerator) pushRepoMetadata(repospec string, r *LuetSystemRepository) error {
|
||||
// create temp dir for metafile
|
||||
metaDir, err := config.LuetCfg.GetSystem().TempDir("metadata")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while creating tempdir for metadata")
|
||||
}
|
||||
defer os.RemoveAll(metaDir) // clean up
|
||||
|
||||
tempRepoFile := filepath.Join(metaDir, REPOSITORY_SPECFILE+".tar")
|
||||
if err := helpers.Tar(repospec, tempRepoFile); err != nil {
|
||||
return errors.Wrap(err, "Error met while archiving repository file")
|
||||
}
|
||||
|
||||
a := artifact.NewPackageArtifact(tempRepoFile)
|
||||
imageRepo := fmt.Sprintf("%s:%s", d.imagePrefix, REPOSITORY_SPECFILE)
|
||||
|
||||
if err := d.pushFileFromArtifact(a, imageRepo); err != nil {
|
||||
return errors.Wrap(err, "while pushing file from artifact")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerRepositoryGenerator) pushImageFromArtifact(a *artifact.PackageArtifact, b compiler.CompilerBackend, checkIfExists bool) error {
|
||||
// we generate a new archive containing the required compressed file.
|
||||
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
|
||||
treeArchive, err := artifact.CreateArtifactForFile(a.Path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed generating checksums for tree")
|
||||
}
|
||||
imageTree := fmt.Sprintf("%s:%s", d.imagePrefix, docker.StripInvalidStringsFromImage(a.GetFileName()))
|
||||
if checkIfExists && d.imagePush && d.b.ImageAvailable(imageTree) && !d.force {
|
||||
Info("Image", imageTree, "already present, skipping. use --force-push to override")
|
||||
return nil
|
||||
} else {
|
||||
return d.pushFileFromArtifact(treeArchive, imageTree)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate creates a Docker luet repository
|
||||
func (d *dockerRepositoryGenerator) Generate(r *LuetSystemRepository, imagePrefix string, resetRevision bool) error {
|
||||
// - Iterate over meta, build final images, push them if necessary
|
||||
// - while pushing, check if image already exists, and if exist push them only if --force is supplied
|
||||
// - Generate final images for metadata and push
|
||||
|
||||
imageRepository := fmt.Sprintf("%s:%s", imagePrefix, REPOSITORY_SPECFILE)
|
||||
|
||||
r.LastUpdate = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
repoTemp, err := config.LuetCfg.GetSystem().TempDir("repo")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error met while creating tempdir for repository")
|
||||
}
|
||||
defer os.RemoveAll(repoTemp) // clean up
|
||||
|
||||
if r.GetBackend().ImageAvailable(imageRepository) {
|
||||
if err := r.GetBackend().DownloadImage(backend.Options{ImageName: imageRepository}); err != nil {
|
||||
return errors.Wrapf(err, "while downloading '%s'", imageRepository)
|
||||
}
|
||||
|
||||
if err := r.GetBackend().ExtractRootfs(backend.Options{ImageName: imageRepository, Destination: repoTemp}, false); err != nil {
|
||||
return errors.Wrapf(err, "while extracting '%s'", imageRepository)
|
||||
}
|
||||
}
|
||||
|
||||
repospec := filepath.Join(repoTemp, REPOSITORY_SPECFILE)
|
||||
|
||||
// Increment the internal revision version by reading the one which is already available (if any)
|
||||
if err := r.BumpRevision(repospec, resetRevision); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Info(fmt.Sprintf(
|
||||
"For repository %s creating revision %d and last update %s...",
|
||||
r.Name, r.Revision, r.LastUpdate,
|
||||
))
|
||||
|
||||
bus.Manager.Publish(bus.EventRepositoryPreBuild, struct {
|
||||
Repo LuetSystemRepository
|
||||
Path string
|
||||
}{
|
||||
Repo: *r,
|
||||
Path: imageRepository,
|
||||
})
|
||||
|
||||
// Create tree and repository file
|
||||
a, err := r.AddTree(r.GetTree(), repoTemp, REPOFILE_TREE_KEY, NewDefaultTreeRepositoryFile())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error met while adding runtime tree to repository")
|
||||
}
|
||||
|
||||
// we generate a new archive containing the required compressed file.
|
||||
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
|
||||
if err := d.pushImageFromArtifact(a, d.b, false); err != nil {
|
||||
return errors.Wrap(err, "error met while pushing runtime tree")
|
||||
}
|
||||
|
||||
a, err = r.AddTree(r.BuildTree, repoTemp, REPOFILE_COMPILER_TREE_KEY, NewDefaultCompilerTreeRepositoryFile())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error met while adding compiler tree to repository")
|
||||
}
|
||||
// we generate a new archive containing the required compressed file.
|
||||
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
|
||||
if err := d.pushImageFromArtifact(a, d.b, false); err != nil {
|
||||
return errors.Wrap(err, "error met while pushing compiler tree")
|
||||
}
|
||||
|
||||
// create temp dir for metafile
|
||||
metaDir, err := config.LuetCfg.GetSystem().TempDir("metadata")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error met while creating tempdir for metadata")
|
||||
}
|
||||
defer os.RemoveAll(metaDir) // clean up
|
||||
|
||||
a, err = r.AddMetadata(repospec, metaDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed adding Metadata file to repository")
|
||||
}
|
||||
|
||||
if err := d.pushImageFromArtifact(a, d.b, false); err != nil {
|
||||
return errors.Wrap(err, "error met while pushing docker image from artifact")
|
||||
}
|
||||
|
||||
if err := d.pushRepoMetadata(repospec, r); err != nil {
|
||||
return errors.Wrap(err, "while pushing repository metadata tree")
|
||||
}
|
||||
|
||||
bus.Manager.Publish(bus.EventRepositoryPostBuild, struct {
|
||||
Repo LuetSystemRepository
|
||||
Path string
|
||||
}{
|
||||
Repo: *r,
|
||||
Path: imagePrefix,
|
||||
})
|
||||
return nil
|
||||
}
|
132
pkg/installer/repository_local.go
Normal file
132
pkg/installer/repository_local.go
Normal file
@@ -0,0 +1,132 @@
|
||||
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type localRepositoryGenerator struct{}
|
||||
|
||||
func (l *localRepositoryGenerator) Initialize(path string, db pkg.PackageDatabase) ([]*artifact.PackageArtifact, error) {
|
||||
return buildPackageIndex(path, db)
|
||||
}
|
||||
|
||||
func buildPackageIndex(path string, db pkg.PackageDatabase) ([]*artifact.PackageArtifact, error) {
|
||||
|
||||
var art []*artifact.PackageArtifact
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
Debug("Failed walking", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(info.Name(), ".metadata.yaml") {
|
||||
return nil // Skip with no errors
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
|
||||
a, err := artifact.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
|
||||
// We want to include packages that are ONLY referenced in the tree.
|
||||
// the ones which aren't should be deleted. (TODO: by another cli command?)
|
||||
if _, notfound := db.FindPackage(a.CompileSpec.GetPackage()); notfound != nil {
|
||||
Debug(fmt.Sprintf("Package %s not found in tree. Ignoring it.",
|
||||
a.CompileSpec.GetPackage().HumanReadableString()))
|
||||
return nil
|
||||
}
|
||||
|
||||
art = append(art, a)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(path, ff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
return art, nil
|
||||
}
|
||||
|
||||
// Generate creates a Local luet repository
|
||||
func (*localRepositoryGenerator) Generate(r *LuetSystemRepository, dst string, resetRevision bool) error {
|
||||
err := os.MkdirAll(dst, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.LastUpdate = strconv.FormatInt(time.Now().Unix(), 10)
|
||||
|
||||
repospec := filepath.Join(dst, REPOSITORY_SPECFILE)
|
||||
// Increment the internal revision version by reading the one which is already available (if any)
|
||||
if err := r.BumpRevision(repospec, resetRevision); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
Info(fmt.Sprintf(
|
||||
"Repository %s: creating revision %d and last update %s...",
|
||||
r.Name, r.Revision, r.LastUpdate,
|
||||
))
|
||||
|
||||
bus.Manager.Publish(bus.EventRepositoryPreBuild, struct {
|
||||
Repo LuetSystemRepository
|
||||
Path string
|
||||
}{
|
||||
Repo: *r,
|
||||
Path: dst,
|
||||
})
|
||||
|
||||
if _, err := r.AddTree(r.GetTree(), dst, REPOFILE_TREE_KEY, NewDefaultTreeRepositoryFile()); err != nil {
|
||||
return errors.Wrap(err, "error met while adding runtime tree to repository")
|
||||
}
|
||||
|
||||
if _, err := r.AddTree(r.BuildTree, dst, REPOFILE_COMPILER_TREE_KEY, NewDefaultCompilerTreeRepositoryFile()); err != nil {
|
||||
return errors.Wrap(err, "error met while adding compiler tree to repository")
|
||||
}
|
||||
|
||||
if _, err := r.AddMetadata(repospec, dst); err != nil {
|
||||
return errors.Wrap(err, "failed adding Metadata file to repository")
|
||||
}
|
||||
|
||||
bus.Manager.Publish(bus.EventRepositoryPostBuild, struct {
|
||||
Repo LuetSystemRepository
|
||||
Path string
|
||||
}{
|
||||
Repo: *r,
|
||||
Path: dst,
|
||||
})
|
||||
return nil
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
@@ -26,18 +26,20 @@ import (
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/installer"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func dockerStubRepo(tmpdir, tree, image string, push, force bool) (installer.Repository, error) {
|
||||
func dockerStubRepo(tmpdir, tree, image string, push, force bool) (*LuetSystemRepository, error) {
|
||||
return GenerateRepository(
|
||||
"test",
|
||||
"description",
|
||||
@@ -46,7 +48,7 @@ func dockerStubRepo(tmpdir, tree, image string, push, force bool) (installer.Rep
|
||||
1,
|
||||
tmpdir,
|
||||
[]string{tree},
|
||||
pkg.NewInMemoryDatabase(false), backend.NewSimpleDockerBackend(), image, push, force)
|
||||
pkg.NewInMemoryDatabase(false), backend.NewSimpleDockerBackend(), image, push, force, false, nil)
|
||||
}
|
||||
|
||||
var _ = Describe("Repository", func() {
|
||||
@@ -64,7 +66,7 @@ var _ = Describe("Repository", func() {
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -79,38 +81,37 @@ var _ = Describe("Repository", func() {
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Generate repository metadata of files ONLY referenced in a tree", func() {
|
||||
@@ -132,11 +133,11 @@ var _ = Describe("Repository", func() {
|
||||
Expect(len(generalRecipe2.GetDatabase().GetPackages())).To(Equal(1))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler2 := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler2 := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase())
|
||||
spec2, err := compiler2.FromPackage(&pkg.DefaultPackage{Name: "alpine", Category: "seed", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -153,46 +154,44 @@ var _ = Describe("Repository", func() {
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
compiler2.SetConcurrency(1)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
artifact2, err := compiler2.Compile(false, spec2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact2.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact2.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact2.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact2.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec2.Rel("alpine-seed-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec2.Rel("alpine-seed-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("alpine-seed-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("alpine-seed-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
|
||||
// We check now that the artifact not referenced in the tree
|
||||
// (spec2) is not indexed in the repository
|
||||
@@ -254,7 +253,7 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
localcompiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
localcompiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := localcompiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -266,22 +265,21 @@ urls:
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
localcompiler.SetConcurrency(1)
|
||||
|
||||
artifact, err := localcompiler.Compile(false, spec)
|
||||
a, err := localcompiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(a.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(a.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
repo, err := dockerStubRepo(tmpdir, "../../tests/fixtures/buildable", repoImage, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(repoImage, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -298,11 +296,11 @@ urls:
|
||||
|
||||
f, err := c.DownloadFile("repository.yaml")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(f)).To(ContainSubstring("name: test"))
|
||||
Expect(fileHelper.Read(f)).To(ContainSubstring("name: test"))
|
||||
|
||||
a, err := c.DownloadArtifact(&compiler.PackageArtifact{
|
||||
a, err = c.DownloadArtifact(&artifact.PackageArtifact{
|
||||
Path: "test.tar",
|
||||
CompileSpec: &compiler.LuetCompilationSpec{
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "b",
|
||||
Category: "test",
|
||||
@@ -313,7 +311,7 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(a.Unpack(extracted, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(filepath.Join(extracted, "test6"))).To(Equal("artifact6\n"))
|
||||
Expect(fileHelper.Read(filepath.Join(extracted, "test6"))).To(Equal("artifact6\n"))
|
||||
})
|
||||
|
||||
It("generates images of virtual packages", func() {
|
||||
@@ -329,7 +327,7 @@ urls:
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(5))
|
||||
|
||||
localcompiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), compiler.NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
localcompiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := localcompiler.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.99"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -341,19 +339,18 @@ urls:
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
localcompiler.SetConcurrency(1)
|
||||
|
||||
artifact, err := localcompiler.Compile(false, spec)
|
||||
a, err := localcompiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(a.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(a.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
repo, err := dockerStubRepo(tmpdir, "../../tests/fixtures/virtuals", repoImage, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(repoImage, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -370,11 +367,11 @@ urls:
|
||||
|
||||
f, err := c.DownloadFile("repository.yaml")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(f)).To(ContainSubstring("name: test"))
|
||||
Expect(fileHelper.Read(f)).To(ContainSubstring("name: test"))
|
||||
|
||||
a, err := c.DownloadArtifact(&compiler.PackageArtifact{
|
||||
a, err = c.DownloadArtifact(&artifact.PackageArtifact{
|
||||
Path: "test.tar",
|
||||
CompileSpec: &compiler.LuetCompilationSpec{
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "a",
|
||||
Category: "test",
|
||||
@@ -386,7 +383,7 @@ urls:
|
||||
|
||||
Expect(a.Unpack(extracted, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.DirectoryIsEmpty(extracted)).To(BeFalse())
|
||||
Expect(fileHelper.DirectoryIsEmpty(extracted)).To(BeFalse())
|
||||
content, err := ioutil.ReadFile(filepath.Join(extracted, ".virtual"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -397,14 +394,14 @@ urls:
|
||||
repos := Repositories{
|
||||
&LuetSystemRepository{
|
||||
Index: compiler.ArtifactIndex{
|
||||
&compiler.PackageArtifact{
|
||||
CompileSpec: &compiler.LuetCompilationSpec{
|
||||
&artifact.PackageArtifact{
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{},
|
||||
},
|
||||
Path: "bar",
|
||||
Files: []string{"boo"},
|
||||
},
|
||||
&compiler.PackageArtifact{
|
||||
&artifact.PackageArtifact{
|
||||
Path: "d",
|
||||
Files: []string{"baz"},
|
||||
},
|
||||
@@ -414,15 +411,15 @@ urls:
|
||||
|
||||
matches := repos.SearchPackages("bo", FileSearch)
|
||||
Expect(len(matches)).To(Equal(1))
|
||||
Expect(matches[0].Artifact.GetPath()).To(Equal("bar"))
|
||||
Expect(matches[0].Artifact.Path).To(Equal("bar"))
|
||||
})
|
||||
|
||||
It("Searches packages", func() {
|
||||
repo := &LuetSystemRepository{
|
||||
Index: compiler.ArtifactIndex{
|
||||
&compiler.PackageArtifact{
|
||||
&artifact.PackageArtifact{
|
||||
Path: "foo",
|
||||
CompileSpec: &compiler.LuetCompilationSpec{
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "bar",
|
||||
@@ -430,9 +427,9 @@ urls:
|
||||
},
|
||||
},
|
||||
},
|
||||
&compiler.PackageArtifact{
|
||||
&artifact.PackageArtifact{
|
||||
Path: "baz",
|
||||
CompileSpec: &compiler.LuetCompilationSpec{
|
||||
CompileSpec: &compilerspec.LuetCompilationSpec{
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "baz",
|
||||
@@ -449,7 +446,7 @@ urls:
|
||||
Version: "1.0",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(a.GetPath()).To(Equal("baz"))
|
||||
Expect(a.Path).To(Equal("baz"))
|
||||
|
||||
a, err = repo.SearchArtefact(&pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
@@ -457,7 +454,7 @@ urls:
|
||||
Version: "1.0",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(a.GetPath()).To(Equal("foo"))
|
||||
Expect(a.Path).To(Equal("foo"))
|
||||
|
||||
// Doesn't exist. so must fail
|
||||
_, err = repo.SearchArtefact(&pkg.DefaultPackage{
|
||||
|
@@ -3,6 +3,7 @@ package installer
|
||||
import (
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
@@ -23,8 +24,8 @@ func (s *System) ExecuteFinalizers(packs []pkg.Package) error {
|
||||
var errs error
|
||||
executedFinalizer := map[string]bool{}
|
||||
for _, p := range packs {
|
||||
if helpers.Exists(p.Rel(tree.FinalizerFile)) {
|
||||
out, err := helpers.RenderFiles(p.Rel(tree.FinalizerFile), p.Rel(tree.DefinitionFile), "")
|
||||
if fileHelper.Exists(p.Rel(tree.FinalizerFile)) {
|
||||
out, err := helpers.RenderFiles(p.Rel(tree.FinalizerFile), p.Rel(tree.DefinitionFile))
|
||||
if err != nil {
|
||||
Warning("Failed rendering finalizer for ", p.HumanReadableString(), err.Error())
|
||||
errs = multierror.Append(errs, err)
|
||||
|
@@ -26,7 +26,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
"github.com/mudler/luet/pkg/helpers/match"
|
||||
version "github.com/mudler/luet/pkg/versioner"
|
||||
|
||||
gentoo "github.com/Sabayon/pkgs-checker/pkg/gentoo"
|
||||
@@ -113,6 +114,14 @@ type Package interface {
|
||||
GetBuildTimestamp() string
|
||||
|
||||
Clone() Package
|
||||
|
||||
GetMetadataFilePath() string
|
||||
SetTreeDir(s string)
|
||||
GetTreeDir() string
|
||||
|
||||
Mark() Package
|
||||
|
||||
JSON() ([]byte, error)
|
||||
}
|
||||
|
||||
type Tree interface {
|
||||
@@ -125,6 +134,19 @@ type Tree interface {
|
||||
|
||||
type Packages []Package
|
||||
|
||||
type DefaultPackages []*DefaultPackage
|
||||
|
||||
func (d DefaultPackages) Hash(salt string) string {
|
||||
|
||||
overallFp := ""
|
||||
for _, c := range d {
|
||||
overallFp = overallFp + c.HashFingerprint("join")
|
||||
}
|
||||
h := md5.New()
|
||||
io.WriteString(h, fmt.Sprintf("%s-%s", overallFp, salt))
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
// >> Unmarshallers
|
||||
// DefaultPackageFromYaml decodes a package from yaml bytes
|
||||
func DefaultPackageFromYaml(yml []byte) (DefaultPackage, error) {
|
||||
@@ -210,6 +232,11 @@ func (t *DefaultPackage) JSON() ([]byte, error) {
|
||||
return buffer.Bytes(), err
|
||||
}
|
||||
|
||||
// GetMetadataFilePath returns the canonical name of an artifact metadata file
|
||||
func (d *DefaultPackage) GetMetadataFilePath() string {
|
||||
return d.GetFingerPrint() + ".metadata.yaml"
|
||||
}
|
||||
|
||||
// DefaultPackage represent a standard package definition
|
||||
type DefaultPackage struct {
|
||||
ID int `storm:"id,increment" json:"id"` // primary key with auto increment
|
||||
@@ -235,6 +262,8 @@ type DefaultPackage struct {
|
||||
BuildTimestamp string `json:"buildtimestamp,omitempty"`
|
||||
|
||||
Labels map[string]string `json:"labels,omitempty"` // Affects YAML field names too.
|
||||
|
||||
TreeDir string `json:"treedir,omitempty"`
|
||||
}
|
||||
|
||||
// State represent the package state
|
||||
@@ -251,6 +280,12 @@ func NewPackage(name, version string, requires []*DefaultPackage, conflicts []*D
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) SetTreeDir(s string) {
|
||||
p.TreeDir = s
|
||||
}
|
||||
func (p *DefaultPackage) GetTreeDir() string {
|
||||
return p.TreeDir
|
||||
}
|
||||
func (p *DefaultPackage) String() string {
|
||||
b, err := p.JSON()
|
||||
if err != nil {
|
||||
@@ -290,7 +325,7 @@ func (p *DefaultPackage) GetPackageName() string {
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) ImageID() string {
|
||||
return strings.ReplaceAll(p.GetFingerPrint(), "+", "-")
|
||||
return docker.StripInvalidStringsFromImage(p.GetFingerPrint())
|
||||
}
|
||||
|
||||
// GetBuildTimestamp returns the package build timestamp
|
||||
@@ -325,19 +360,19 @@ func (p *DefaultPackage) IsHidden() bool {
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) HasLabel(label string) bool {
|
||||
return helpers.MapHasKey(&p.Labels, label)
|
||||
return match.MapHasKey(&p.Labels, label)
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) MatchLabel(r *regexp.Regexp) bool {
|
||||
return helpers.MapMatchRegex(&p.Labels, r)
|
||||
return match.MapMatchRegex(&p.Labels, r)
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) HasAnnotation(label string) bool {
|
||||
return helpers.MapHasKey(&p.Annotations, label)
|
||||
return match.MapHasKey(&p.Annotations, label)
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) MatchAnnotation(r *regexp.Regexp) bool {
|
||||
return helpers.MapMatchRegex(&p.Annotations, r)
|
||||
return match.MapMatchRegex(&p.Annotations, r)
|
||||
}
|
||||
|
||||
// AddUse adds a use to a package
|
||||
@@ -473,6 +508,12 @@ func (p *DefaultPackage) Matches(m Package) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) Mark() Package {
|
||||
marked := p.Clone()
|
||||
marked.SetName("@@" + marked.GetName())
|
||||
return marked
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) Expand(definitiondb PackageDatabase) (Packages, error) {
|
||||
var versionsInWorld Packages
|
||||
|
||||
@@ -626,6 +667,16 @@ func (set Packages) Best(v version.Versioner) Package {
|
||||
return versionsMap[sorted[len(sorted)-1]]
|
||||
}
|
||||
|
||||
func (set Packages) Find(packageName string) (Package, error) {
|
||||
for _, p := range set {
|
||||
if p.GetPackageName() == packageName {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &DefaultPackage{}, errors.New("package not found")
|
||||
}
|
||||
|
||||
func (set Packages) Unique() Packages {
|
||||
var result Packages
|
||||
uniq := make(map[string]Package)
|
||||
|
@@ -260,24 +260,42 @@ func (a PackagesAssertions) TrueLen() int {
|
||||
// and checks it's not the only one. if it's unique it marks it specially - so the hash
|
||||
// which is generated is unique for the selected package
|
||||
func (assertions PackagesAssertions) HashFrom(p pkg.Package) string {
|
||||
return assertions.SaltedHashFrom(p, map[string]string{})
|
||||
}
|
||||
|
||||
func (assertions PackagesAssertions) AssertionHash() string {
|
||||
return assertions.SaltedAssertionHash(map[string]string{})
|
||||
}
|
||||
|
||||
func (assertions PackagesAssertions) SaltedHashFrom(p pkg.Package, salts map[string]string) string {
|
||||
var assertionhash string
|
||||
|
||||
// When we don't have any solution to hash for, we need to generate an UUID by ourselves
|
||||
latestsolution := assertions.Drop(p)
|
||||
if latestsolution.TrueLen() == 0 {
|
||||
assertionhash = assertions.Mark(p).AssertionHash()
|
||||
// Preserve the hash if supplied of marked packages
|
||||
marked := p.Mark()
|
||||
if markedHash, exists := salts[p.GetFingerPrint()]; exists {
|
||||
salts[marked.GetFingerPrint()] = markedHash
|
||||
}
|
||||
assertionhash = assertions.Mark(p).SaltedAssertionHash(salts)
|
||||
} else {
|
||||
assertionhash = latestsolution.AssertionHash()
|
||||
assertionhash = latestsolution.SaltedAssertionHash(salts)
|
||||
}
|
||||
return assertionhash
|
||||
}
|
||||
|
||||
func (assertions PackagesAssertions) AssertionHash() string {
|
||||
func (assertions PackagesAssertions) SaltedAssertionHash(salts map[string]string) string {
|
||||
var fingerprint string
|
||||
for _, assertion := range assertions { // Note: Always order them first!
|
||||
if assertion.Value { // Tke into account only dependencies installed (get fingerprint of subgraph)
|
||||
fingerprint += assertion.ToString() + "\n"
|
||||
salt, exists := salts[assertion.Package.GetFingerPrint()]
|
||||
if exists {
|
||||
fingerprint += assertion.ToString() + salt + "\n"
|
||||
|
||||
} else {
|
||||
fingerprint += assertion.ToString() + "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
hash := sha256.Sum256([]byte(fingerprint))
|
||||
@@ -316,8 +334,7 @@ func (assertions PackagesAssertions) Mark(p pkg.Package) PackagesAssertions {
|
||||
|
||||
for _, a := range assertions {
|
||||
if a.Package.Matches(p) {
|
||||
marked := a.Package.Clone()
|
||||
marked.SetName("@@" + marked.GetName())
|
||||
marked := a.Package.Mark()
|
||||
a = PackageAssert{Package: marked.(*pkg.DefaultPackage), Value: a.Value, Hash: a.Hash}
|
||||
}
|
||||
ass = append(ass, a)
|
||||
|
@@ -382,6 +382,9 @@ var _ = Describe("Decoder", func() {
|
||||
|
||||
Expect(solution.HashFrom(X)).ToNot(Equal(solution2.HashFrom(F)))
|
||||
Expect(solution3.HashFrom(D)).To(Equal(solution.HashFrom(X)))
|
||||
Expect(solution3.SaltedHashFrom(D, map[string]string{D.GetFingerPrint(): "foo"})).ToNot(Equal(solution3.HashFrom(D)))
|
||||
|
||||
Expect(solution4.SaltedHashFrom(Y, map[string]string{X.GetFingerPrint(): "foo"})).ToNot(Equal(solution4.HashFrom(Y)))
|
||||
|
||||
Expect(empty.AssertionHash()).ToNot(Equal(solution3.HashFrom(D)))
|
||||
Expect(empty.AssertionHash()).ToNot(Equal(solution2.HashFrom(F)))
|
||||
|
@@ -26,6 +26,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -57,6 +58,17 @@ type CompilerRecipe struct {
|
||||
Recipe
|
||||
}
|
||||
|
||||
// CompilerRecipes copies tree 1:1 as they contain the specs
|
||||
// and the build context required for reproducible builds
|
||||
func (r *CompilerRecipe) Save(path string) error {
|
||||
for _, p := range r.SourcePath {
|
||||
if err := fileHelper.CopyDir(p, filepath.Join(path, filepath.Base(p))); err != nil {
|
||||
return errors.Wrap(err, "while copying source tree")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *CompilerRecipe) Load(path string) error {
|
||||
|
||||
r.SourcePath = append(r.SourcePath, path)
|
||||
@@ -87,12 +99,13 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
}
|
||||
// Path is set only internally when tree is loaded from disk
|
||||
pack.SetPath(filepath.Dir(currentpath))
|
||||
pack.SetTreeDir(path)
|
||||
|
||||
// Instead of rdeps, have a different tree for build deps.
|
||||
compileDefPath := pack.Rel(CompilerDefinitionFile)
|
||||
if helpers.Exists(compileDefPath) {
|
||||
if fileHelper.Exists(compileDefPath) {
|
||||
|
||||
dat, err := helpers.RenderFiles(compileDefPath, currentpath, "")
|
||||
dat, err := helpers.RenderFiles(compileDefPath, currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err,
|
||||
"Error templating file "+CompilerDefinitionFile+" from "+
|
||||
@@ -115,22 +128,29 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
}
|
||||
|
||||
case CollectionFile:
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
|
||||
packs, err := pkg.DefaultPackagesFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
|
||||
packsRaw, err := pkg.GetRawPackages(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading raw packages from "+currentpath)
|
||||
}
|
||||
|
||||
for _, pack := range packs {
|
||||
pack.SetPath(filepath.Dir(currentpath))
|
||||
pack.SetTreeDir(path)
|
||||
|
||||
// Instead of rdeps, have a different tree for build deps.
|
||||
compileDefPath := pack.Rel(CompilerDefinitionFile)
|
||||
if helpers.Exists(compileDefPath) {
|
||||
if fileHelper.Exists(compileDefPath) {
|
||||
|
||||
raw := packsRaw.Find(pack.GetName(), pack.GetCategory(), pack.GetVersion())
|
||||
buildyaml, err := ioutil.ReadFile(compileDefPath)
|
||||
@@ -159,9 +179,7 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
return errors.Wrap(err, "Error creating package "+pack.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -26,8 +26,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -61,8 +62,8 @@ func (r *InstallerRecipe) Save(path string) error {
|
||||
}
|
||||
// Instead of rdeps, have a different tree for build deps.
|
||||
finalizerPath := p.Rel(FinalizerFile)
|
||||
if helpers.Exists(finalizerPath) { // copy finalizer file from the source tree
|
||||
helpers.CopyFile(finalizerPath, filepath.Join(dir, FinalizerFile))
|
||||
if fileHelper.Exists(finalizerPath) { // copy finalizer file from the source tree
|
||||
fileHelper.CopyFile(finalizerPath, filepath.Join(dir, FinalizerFile))
|
||||
}
|
||||
|
||||
}
|
||||
@@ -71,7 +72,7 @@ func (r *InstallerRecipe) Save(path string) error {
|
||||
|
||||
func (r *InstallerRecipe) Load(path string) error {
|
||||
|
||||
if !helpers.Exists(path) {
|
||||
if !fileHelper.Exists(path) {
|
||||
return errors.New(fmt.Sprintf(
|
||||
"Path %s doesn't exit.", path,
|
||||
))
|
||||
|
@@ -26,9 +26,10 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
spectooling "github.com/mudler/luet/pkg/spectooling"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -77,7 +78,7 @@ func (r *Recipe) Load(path string) error {
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
if !helpers.Exists(path) {
|
||||
if !fileHelper.Exists(path) {
|
||||
return errors.New(fmt.Sprintf(
|
||||
"Path %s doesn't exit.", path,
|
||||
))
|
||||
|
17
tests/fixtures/copy/c/a/build.yaml
vendored
Normal file
17
tests/fixtures/copy/c/a/build.yaml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
image: "alpine"
|
||||
|
||||
copy:
|
||||
- package:
|
||||
name: "a"
|
||||
category: "test"
|
||||
version: ">=0"
|
||||
source: /test3
|
||||
destination: /test3
|
||||
- image: "busybox"
|
||||
source: /bin/busybox
|
||||
destination: /busybox
|
||||
|
||||
steps:
|
||||
- mkdir /bina
|
||||
- cp /test3 /result
|
||||
- cp -rf /busybox /bina/busybox
|
3
tests/fixtures/copy/c/a/definition.yaml
vendored
Normal file
3
tests/fixtures/copy/c/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.2"
|
7
tests/fixtures/copy/cat/a/a/build.yaml
vendored
Normal file
7
tests/fixtures/copy/cat/a/a/build.yaml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
3
tests/fixtures/copy/cat/a/a/definition.yaml
vendored
Normal file
3
tests/fixtures/copy/cat/a/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.2"
|
13
tests/fixtures/copy/cat/b-1.1/build.yaml
vendored
Normal file
13
tests/fixtures/copy/cat/b-1.1/build.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "a"
|
||||
version: ">=0"
|
||||
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /newc
|
||||
- echo artifact6 > /newnewc
|
||||
- chmod +x generate.sh
|
||||
- ./generate.sh
|
3
tests/fixtures/copy/cat/b-1.1/definition.yaml
vendored
Normal file
3
tests/fixtures/copy/cat/b-1.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.1"
|
1
tests/fixtures/copy/cat/b-1.1/generate.sh
vendored
Normal file
1
tests/fixtures/copy/cat/b-1.1/generate.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
echo generated > /sonewc
|
11
tests/fixtures/docker_repo/c/build.yaml
vendored
Normal file
11
tests/fixtures/docker_repo/c/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo c > /c
|
||||
- echo c > /cd
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
||||
|
3
tests/fixtures/docker_repo/c/definition.yaml
vendored
Normal file
3
tests/fixtures/docker_repo/c/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.0"
|
9
tests/fixtures/docker_repo/cat/b/build.yaml
vendored
Normal file
9
tests/fixtures/docker_repo/cat/b/build.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- chmod +x generate.sh
|
||||
- ./generate.sh
|
3
tests/fixtures/docker_repo/cat/b/definition.yaml
vendored
Normal file
3
tests/fixtures/docker_repo/cat/b/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
1
tests/fixtures/docker_repo/cat/b/generate.sh
vendored
Normal file
1
tests/fixtures/docker_repo/cat/b/generate.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
echo generated > /artifact42
|
10
tests/fixtures/docker_repo/cat/cat2/a/build.yaml
vendored
Normal file
10
tests/fixtures/docker_repo/cat/cat2/a/build.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
8
tests/fixtures/docker_repo/cat/cat2/a/definition.yaml
vendored
Normal file
8
tests/fixtures/docker_repo/cat/cat2/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.0"
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
||||
|
11
tests/fixtures/docker_repo/d/build.yaml
vendored
Normal file
11
tests/fixtures/docker_repo/d/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo s > /d
|
||||
- echo dd > /dd
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "c"
|
||||
version: "1.0"
|
||||
|
3
tests/fixtures/docker_repo/d/definition.yaml
vendored
Normal file
3
tests/fixtures/docker_repo/d/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "d"
|
||||
version: "1.0"
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user