mirror of
https://github.com/mudler/luet.git
synced 2025-09-02 15:54:39 +00:00
Compare commits
107 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
e3063985b2 | ||
|
a348fd4835 | ||
|
fc45eae80a | ||
|
b73ac21004 | ||
|
bdd51fa221 | ||
|
4039050449 | ||
|
14914f3c8e | ||
|
e4fff77d43 | ||
|
972421ae81 | ||
|
0bd373be2b | ||
|
aba89db204 | ||
|
178690842f | ||
|
58f4997a0f | ||
|
5bb65e5b30 | ||
|
4e918e6bd1 | ||
|
0f545952cd | ||
|
3402641241 | ||
|
b81d33f182 | ||
|
0cc8930708 | ||
|
db784597d7 | ||
|
35eb63a31c | ||
|
16bb93e165 | ||
|
220f8700ce | ||
|
540e8151ad | ||
|
4adc0dc9b9 | ||
|
0a4fe57f33 | ||
|
ccf83b0d5f | ||
|
57cefc7d6b | ||
|
97ff647f07 | ||
|
b7ac1e03d5 | ||
|
ff092db97d | ||
|
2789f59f53 | ||
|
10ae872a3e | ||
|
65a55e242e | ||
|
77c4bf1fd1 | ||
|
4d6cccb2fa | ||
|
ec7be63418 | ||
|
33b1c63815 | ||
|
86bd6c5fc0 | ||
|
658612fcf3 | ||
|
7128c88da6 | ||
|
74402fae81 | ||
|
9d1594c036 | ||
|
75906c4198 | ||
|
cb032dc714 | ||
|
2c7e495fa1 | ||
|
db8bf2b85e | ||
|
5eb586ddb0 | ||
|
f9747cdf87 | ||
|
becac7d853 | ||
|
5aa5bffb48 | ||
|
9aa3159787 | ||
|
9cb6e65bb6 | ||
|
92b243d7aa | ||
|
29ec19a8a1 | ||
|
440e07c418 | ||
|
acf74f5896 | ||
|
1ee1894ffa | ||
|
c8573f9535 | ||
|
76b70ebeb4 | ||
|
2efb17a06c | ||
|
64ab3711ca | ||
|
eb5d7ba35b | ||
|
b6b91cfd7a | ||
|
4d8a9a544b | ||
|
654b5b48cd | ||
|
92e18d5782 | ||
|
8780e4f16f | ||
|
a7b4ae67c9 | ||
|
68edfd58e7 | ||
|
0658020c60 | ||
|
c3b552103f | ||
|
796967cc9d | ||
|
5cccc34f32 | ||
|
5ef1d04055 | ||
|
1bd4d520a4 | ||
|
b12c7678d4 | ||
|
32a99a4a49 | ||
|
56e9c6f82e | ||
|
92ea69a2b9 | ||
|
838899aa83 | ||
|
76695b2fc8 | ||
|
5c84e5b0a7 | ||
|
06fa8b1c87 | ||
|
ff153f367f | ||
|
459676397c | ||
|
93057fbf6d | ||
|
5e1a7c50df | ||
|
0ceaf09615 | ||
|
0dc78ebe41 | ||
|
27c2e3c51f | ||
|
e83f600ed3 | ||
|
6344e47eb3 | ||
|
8b1c5558b2 | ||
|
c277ac0f94 | ||
|
d8c8c2194f | ||
|
4494385f5b | ||
|
85a7968ecc | ||
|
1ba987b0f1 | ||
|
c72b5be364 | ||
|
1ef18ed2c5 | ||
|
4b1b711a5c | ||
|
7f047e4fc2 | ||
|
356350f724 | ||
|
9d2ee1b760 | ||
|
fd12227d53 | ||
|
1e617b0c67 |
49
.chglog/CHANGELOG.tpl.md
Normal file
49
.chglog/CHANGELOG.tpl.md
Normal file
@@ -0,0 +1,49 @@
|
||||
{{ if .Versions -}}
|
||||
{{ if .Unreleased.CommitGroups -}}
|
||||
{{ range .Unreleased.CommitGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ else }}
|
||||
{{ range .Unreleased.Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{ range .Versions }}
|
||||
<a name="{{ .Tag.Name }}"></a>
|
||||
|
||||
{{ if .CommitGroups -}}
|
||||
{{ range .CommitGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ else }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }} (https://github.com/mudler/luet/commit/{{.Hash.Short}})
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .NoteGroups -}}
|
||||
{{ range .NoteGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Notes }}
|
||||
{{ .Body }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .Versions }}
|
||||
[Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD
|
||||
{{ range .Versions -}}
|
||||
{{ if .Tag.Previous -}}
|
||||
[{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
27
.chglog/config.yml
Executable file
27
.chglog/config.yml
Executable file
@@ -0,0 +1,27 @@
|
||||
style: github
|
||||
template: CHANGELOG.tpl.md
|
||||
info:
|
||||
title: CHANGELOG
|
||||
repository_url: https://github.com/mudler/luet
|
||||
options:
|
||||
commits:
|
||||
# filters:
|
||||
# Type:
|
||||
# - feat
|
||||
# - fix
|
||||
# - perf
|
||||
# - refactor
|
||||
commit_groups:
|
||||
title_maps:
|
||||
feat: Features
|
||||
fix: Bug Fixes
|
||||
perf: Performance Improvements
|
||||
refactor: Code Refactoring
|
||||
ci: Continous Integration
|
||||
header:
|
||||
pattern: "(.*)"
|
||||
pattern_maps:
|
||||
- Subject
|
||||
notes:
|
||||
keywords:
|
||||
- BREAKING CHANGE
|
56
.github/workflows/pr.yml
vendored
Normal file
56
.github/workflows/pr.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
|
||||
on: pull_request
|
||||
name: Build and Test
|
||||
jobs:
|
||||
tests-integration:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x]
|
||||
platform: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Tests with Img backend
|
||||
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make test-integration
|
||||
tests-unit:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.16.x]
|
||||
platform: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Install GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
install-only: true
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make test-coverage
|
74
.github/workflows/push.yml
vendored
Normal file
74
.github/workflows/push.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
on: push
|
||||
concurrency:
|
||||
group: registries-tests
|
||||
|
||||
name: Build on push
|
||||
jobs:
|
||||
tests-integration:
|
||||
name: Integration tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Login to quay
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Login to quay with img
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Tests with Img backend
|
||||
run: |
|
||||
sudo -E env "PATH=$PATH" \
|
||||
env "LUET_BACKEND=img" \
|
||||
make test-integration
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo -E \
|
||||
env "PATH=$PATH" \
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make test-integration
|
||||
|
||||
tests-unit:
|
||||
name: Unit tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Login to quay
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Install GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
install-only: true
|
||||
- name: Build test
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo -E \
|
||||
env "PATH=$PATH" \
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make test-coverage
|
97
.github/workflows/release.yml
vendored
97
.github/workflows/release.yml
vendored
@@ -1,14 +1,20 @@
|
||||
on: push
|
||||
name: Build and release on push
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*' # only test and release when a tag is pushed
|
||||
concurrency:
|
||||
group: registries-tests
|
||||
|
||||
name: Test and Release on tag
|
||||
jobs:
|
||||
release:
|
||||
name: Test and Release
|
||||
tests-integration:
|
||||
name: Integration tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14.x
|
||||
go-version: 1.16.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
@@ -17,31 +23,70 @@ jobs:
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build test
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Login to quay with img
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Tests with Img backend
|
||||
run: |
|
||||
sudo -E env "PATH=$PATH" \
|
||||
env "LUET_BACKEND=img" \
|
||||
make test-integration
|
||||
sudo -E env "PATH=$PATH" \
|
||||
env "LUET_BACKEND=img" \
|
||||
make test-integration
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo -E \
|
||||
env "PATH=$PATH" \
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make test-integration test-coverage
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small && sudo chmod -R 777 release/
|
||||
- name: Release
|
||||
uses: fnkr/github-action-ghr@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
sudo -E \
|
||||
env "PATH=$PATH" \
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make test-integration
|
||||
|
||||
tests-unit:
|
||||
name: Unit tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Login to quay
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo -E \
|
||||
env "PATH=$PATH" \
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make test-coverage
|
||||
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [ "tests-integration","tests-unit" ]
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GHR_PATH: release/
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
30
.github/workflows/test.yml
vendored
30
.github/workflows/test.yml
vendored
@@ -1,30 +0,0 @@
|
||||
|
||||
on: pull_request
|
||||
name: Build and Test
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x]
|
||||
platform: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
- name: Tests with Img backend
|
||||
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make test-integration test-coverage
|
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
*.swp
|
||||
luet
|
||||
tests/integration/shunit2
|
||||
tests/integration/bin
|
||||
tests/integration/bin
|
||||
release/
|
||||
|
43
.goreleaser.yml
Normal file
43
.goreleaser.yml
Normal file
@@ -0,0 +1,43 @@
|
||||
before:
|
||||
hooks:
|
||||
- go mod tidy
|
||||
dist: release
|
||||
source:
|
||||
enabled: true
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
|
||||
checksum:
|
||||
name_template: '{{ .ProjectName }}-{{ .Tag }}-checksums.txt'
|
||||
builds:
|
||||
-
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
ldflags:
|
||||
- -s -w
|
||||
- -X "github.com/mudler/luet/cmd.BuildTime={{ time "2006-01-02 15:04:05 MST" }}"
|
||||
- -X "github.com/mudler/luet/cmd.BuildCommit={{ .FullCommit }}"
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
- 386
|
||||
goarm:
|
||||
- 6
|
||||
- 7
|
||||
archives:
|
||||
- format: binary # this removes the tar of the archives, leaving the binaries alone
|
||||
name_template: luet-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
- '^Merge pull request'
|
||||
release:
|
||||
header: |
|
||||
Luet is a multi-platform Package Manager based off from containers - it uses Docker (and others) to build packages.
|
||||
|
||||
It has zero dependencies and it is well suitable for "from scratch" environments.
|
||||
It can also version entire rootfs and enables delivery of OTA-alike updates, making it a perfect fit for the Edge computing era and IoT embedded device
|
53
CONTRIBUTING.md
Normal file
53
CONTRIBUTING.md
Normal file
@@ -0,0 +1,53 @@
|
||||
|
||||
We love your input! We want to make contributing to this project as easy and transparent as possible, whether it's:
|
||||
|
||||
- Reporting a bug
|
||||
- Discussing the current state of the code
|
||||
- Submitting a fix
|
||||
- Proposing new features
|
||||
- Becoming a maintainer
|
||||
|
||||
## We Develop with Github
|
||||
We use github to host code, to track issues and feature requests, as well as accept pull requests.
|
||||
|
||||
## Stay in touch
|
||||
|
||||
Join us in [slack](https://luet.slack.com/join/shared_invite/enQtOTQxMjcyNDQ0MDUxLWQ5ODVlNTI1MTYzNDRkYzkyYmM1YWE5YjM0NTliNDEzNmQwMTkxNDRhNDIzM2Y5NDBlOTZjZTYxYWQyNDE4YzY#/) and hang out with the community! It will be much easier to get started and do your first steps in contributing to the project.
|
||||
|
||||
## All Code Changes Happen Through Pull Requests
|
||||
Pull requests are the best way to propose changes to the codebase. We actively welcome your pull requests:
|
||||
|
||||
1. Fork the repo you want to contribute to and create your branch from `develop`.
|
||||
2. If you've added code that should be tested, add tests.
|
||||
3. If you've changed APIs, update the [documentation](https://github.com/Luet-lab/docs).
|
||||
4. Ensure the test suite passes.
|
||||
5. Make sure your code lints.
|
||||
6. Issue that pull request!
|
||||
|
||||
## Any contributions you make will be under the Software License of the repository
|
||||
In short, when you submit code changes, your submissions are understood to be under the same License that covers the project. Feel free to contact the maintainers if that's a concern.
|
||||
|
||||
## Report bugs using Github's [issues](https://github.com/mudler/luet/issues)
|
||||
We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/mudler/luet/issues/new); it's that easy!
|
||||
|
||||
## Write bug reports with detail, background, and sample code
|
||||
Try to be as more descriptive as possible. When opening a new issue you will be prompted to choose between a bug or a feature request, with a small template to fill details with. Be specific!
|
||||
|
||||
**Great Bug Reports** tend to have:
|
||||
|
||||
- A quick summary and/or background
|
||||
- Steps to reproduce
|
||||
- Be specific!
|
||||
- Give sample code if you can.
|
||||
- What you expected would happen
|
||||
- What actually happens
|
||||
- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work)
|
||||
|
||||
People *love* thorough bug reports.
|
||||
|
||||
|
||||
## License
|
||||
By contributing, you agree that your contributions will be licensed under the project Licenses.
|
||||
|
||||
## References
|
||||
This document was adapted from the open-source contribution guidelines from https://gist.github.com/briandk/3d2e8b3ec8daf5a27a62
|
12
Makefile
12
Makefile
@@ -6,10 +6,6 @@ override LDFLAGS += -X "github.com/mudler/luet/cmd.BuildCommit=$(shell git rev-p
|
||||
NAME ?= luet
|
||||
PACKAGE_NAME ?= $(NAME)
|
||||
PACKAGE_CONFLICT ?= $(PACKAGE_NAME)-beta
|
||||
REVISION := $(shell git rev-parse --short HEAD || echo dev)
|
||||
VERSION := $(shell git describe --tags || echo $(REVISION))
|
||||
VERSION := $(shell echo $(VERSION) | sed -e 's/^v//g')
|
||||
BUILD_PLATFORMS ?= -osarch="linux/amd64" -osarch="linux/386" -osarch="linux/arm"
|
||||
ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
.PHONY: all
|
||||
@@ -89,10 +85,10 @@ test-docker:
|
||||
bash -c "make test"
|
||||
|
||||
multiarch-build:
|
||||
CGO_ENABLED=0 gox $(BUILD_PLATFORMS) -ldflags '$(LDFLAGS)' -output="release/$(NAME)-$(VERSION)-{{.OS}}-{{.Arch}}"
|
||||
goreleaser build --snapshot --rm-dist
|
||||
|
||||
multiarch-build-small:
|
||||
@$(MAKE) LDFLAGS+="-s -w" multiarch-build
|
||||
for file in $(ROOT_DIR)/release/* ; do \
|
||||
@$(MAKE) multiarch-build
|
||||
for file in $(ROOT_DIR)/release/**/* ; do \
|
||||
upx --brute -1 $${file} ; \
|
||||
done
|
||||
done
|
||||
|
13
README.md
13
README.md
@@ -1,19 +1,24 @@
|
||||
|
||||
<p align="center">
|
||||
<img width=150 height=150 src="https://user-images.githubusercontent.com/2420543/119691600-0293d700-be4b-11eb-827f-49ff1174a07a.png">
|
||||
</p>
|
||||
|
||||
# luet - Container-based Package manager
|
||||
|
||||
[](https://quay.io/repository/luet/base)
|
||||
[](https://goreportcard.com/report/github.com/mudler/luet)
|
||||
[](https://travis-ci.org/mudler/luet)
|
||||
[](https://github.com/mudler/luet/actions/workflows/release.yml)
|
||||
[](https://godoc.org/github.com/mudler/luet)
|
||||
[](https://codecov.io/gh/mudler/luet)
|
||||
|
||||
[](https://asciinema.org/a/388348)
|
||||
|
||||
Luet is a multi-platform Package Manager based off from containers - it uses Docker (and others) to build packages. It has zero dependencies and it is well suitable for "from scratch" environments. It can also version entire rootfs and enables delivery of OTA-alike updates, making it a perfect fit for the Edge computing era and IoT embedded devices.
|
||||
|
||||
It offers a simple [specfile format](https://luet-lab.github.io/docs/docs/concepts/packages/specfile/) in YAML notation to define both [packages](https://luet-lab.github.io/docs/docs/concepts/packages/) and [rootfs](https://luet-lab.github.io/docs/docs/concepts/packages/#package-layers). As it is based on containers, it can be also used to build stages for Linux From Scratch installations and it can build and track updates for those systems.
|
||||
|
||||
It is written entirely in Golang and where used as package manager, it can run in from scratch environment, with zero dependencies.
|
||||
|
||||
[](https://asciinema.org/a/388348)
|
||||
|
||||
|
||||
## In a glance
|
||||
|
||||
- Luet can reuse Gentoo's portage tree hierarchy, and it is heavily inspired from it.
|
||||
|
90
cmd/build.go
90
cmd/build.go
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
@@ -35,7 +36,6 @@ import (
|
||||
tree "github.com/mudler/luet/pkg/tree"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var buildCmd = &cobra.Command{
|
||||
@@ -65,57 +65,55 @@ Build packages specifying multiple definition trees:
|
||||
|
||||
$ luet build --tree overlay/path --tree overlay/path2 utils/yq ...
|
||||
`, PreRun: func(cmd *cobra.Command, args []string) {
|
||||
viper.BindPFlag("tree", cmd.Flags().Lookup("tree"))
|
||||
viper.BindPFlag("destination", cmd.Flags().Lookup("destination"))
|
||||
viper.BindPFlag("backend", cmd.Flags().Lookup("backend"))
|
||||
viper.BindPFlag("privileged", cmd.Flags().Lookup("privileged"))
|
||||
viper.BindPFlag("revdeps", cmd.Flags().Lookup("revdeps"))
|
||||
viper.BindPFlag("all", cmd.Flags().Lookup("all"))
|
||||
viper.BindPFlag("compression", cmd.Flags().Lookup("compression"))
|
||||
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
|
||||
viper.BindPFlag("values", cmd.Flags().Lookup("values"))
|
||||
viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
|
||||
LuetCfg.Viper.BindPFlag("tree", cmd.Flags().Lookup("tree"))
|
||||
LuetCfg.Viper.BindPFlag("destination", cmd.Flags().Lookup("destination"))
|
||||
LuetCfg.Viper.BindPFlag("backend", cmd.Flags().Lookup("backend"))
|
||||
LuetCfg.Viper.BindPFlag("privileged", cmd.Flags().Lookup("privileged"))
|
||||
LuetCfg.Viper.BindPFlag("revdeps", cmd.Flags().Lookup("revdeps"))
|
||||
LuetCfg.Viper.BindPFlag("all", cmd.Flags().Lookup("all"))
|
||||
LuetCfg.Viper.BindPFlag("compression", cmd.Flags().Lookup("compression"))
|
||||
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
|
||||
util.BindValuesFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
|
||||
|
||||
viper.BindPFlag("image-repository", cmd.Flags().Lookup("image-repository"))
|
||||
viper.BindPFlag("push", cmd.Flags().Lookup("push"))
|
||||
viper.BindPFlag("pull", cmd.Flags().Lookup("pull"))
|
||||
viper.BindPFlag("wait", cmd.Flags().Lookup("wait"))
|
||||
viper.BindPFlag("keep-images", cmd.Flags().Lookup("keep-images"))
|
||||
LuetCfg.Viper.BindPFlag("image-repository", cmd.Flags().Lookup("image-repository"))
|
||||
LuetCfg.Viper.BindPFlag("push", cmd.Flags().Lookup("push"))
|
||||
LuetCfg.Viper.BindPFlag("pull", cmd.Flags().Lookup("pull"))
|
||||
LuetCfg.Viper.BindPFlag("wait", cmd.Flags().Lookup("wait"))
|
||||
LuetCfg.Viper.BindPFlag("keep-images", cmd.Flags().Lookup("keep-images"))
|
||||
|
||||
util.BindSolverFlags(cmd)
|
||||
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
LuetCfg.Viper.BindPFlag("general.show_build_output", cmd.Flags().Lookup("live-output"))
|
||||
LuetCfg.Viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
treePaths := viper.GetStringSlice("tree")
|
||||
dst := viper.GetString("destination")
|
||||
treePaths := LuetCfg.Viper.GetStringSlice("tree")
|
||||
dst := LuetCfg.Viper.GetString("destination")
|
||||
concurrency := LuetCfg.GetGeneral().Concurrency
|
||||
backendType := viper.GetString("backend")
|
||||
privileged := viper.GetBool("privileged")
|
||||
revdeps := viper.GetBool("revdeps")
|
||||
all := viper.GetBool("all")
|
||||
compressionType := viper.GetString("compression")
|
||||
imageRepository := viper.GetString("image-repository")
|
||||
values := viper.GetStringSlice("values")
|
||||
wait := viper.GetBool("wait")
|
||||
push := viper.GetBool("push")
|
||||
pull := viper.GetBool("pull")
|
||||
keepImages := viper.GetBool("keep-images")
|
||||
nodeps := viper.GetBool("nodeps")
|
||||
onlydeps := viper.GetBool("onlydeps")
|
||||
backendType := LuetCfg.Viper.GetString("backend")
|
||||
privileged := LuetCfg.Viper.GetBool("privileged")
|
||||
revdeps := LuetCfg.Viper.GetBool("revdeps")
|
||||
all := LuetCfg.Viper.GetBool("all")
|
||||
compressionType := LuetCfg.Viper.GetString("compression")
|
||||
imageRepository := LuetCfg.Viper.GetString("image-repository")
|
||||
values := util.ValuesFlags()
|
||||
wait := LuetCfg.Viper.GetBool("wait")
|
||||
push := LuetCfg.Viper.GetBool("push")
|
||||
pull := LuetCfg.Viper.GetBool("pull")
|
||||
keepImages := LuetCfg.Viper.GetBool("keep-images")
|
||||
nodeps := LuetCfg.Viper.GetBool("nodeps")
|
||||
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
|
||||
onlyTarget, _ := cmd.Flags().GetBool("only-target-package")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
rebuild, _ := cmd.Flags().GetBool("rebuild")
|
||||
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
var results Results
|
||||
backendArgs := viper.GetStringSlice("backend-args")
|
||||
backendArgs := LuetCfg.Viper.GetStringSlice("backend-args")
|
||||
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
if out != "terminal" {
|
||||
@@ -148,21 +146,11 @@ Build packages specifying multiple definition trees:
|
||||
|
||||
Info("Building in", dst)
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
opts := util.SetSolverConfig()
|
||||
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
|
||||
|
||||
LuetCfg.GetGeneral().ShowBuildOutput = LuetCfg.Viper.GetBool("general.show_build_output")
|
||||
|
||||
opts := &LuetSolverOptions{
|
||||
Type: stype,
|
||||
LearnRate: float32(rate),
|
||||
Discount: float32(discount),
|
||||
MaxAttempts: attempts,
|
||||
}
|
||||
|
||||
Debug("Solver", opts.CompactString())
|
||||
|
||||
if concurrent {
|
||||
@@ -179,6 +167,7 @@ Build packages specifying multiple definition trees:
|
||||
options.WithPullRepositories(pullRepo),
|
||||
options.WithPushRepository(imageRepository),
|
||||
options.Rebuild(rebuild),
|
||||
options.WithTemplateFolder(util.TemplateFolders(fromRepo, treePaths)),
|
||||
options.WithSolverOptions(*opts),
|
||||
options.Wait(wait),
|
||||
options.OnlyTarget(onlyTarget),
|
||||
@@ -247,11 +236,12 @@ Build packages specifying multiple definition trees:
|
||||
}
|
||||
|
||||
for _, sp := range toCalculate {
|
||||
packs, err := luetCompiler.ComputeDepTree(sp)
|
||||
ht := compiler.NewHashTree(generalRecipe.GetDatabase())
|
||||
hashTree, err := ht.Query(luetCompiler, sp)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
for _, p := range packs {
|
||||
for _, p := range hashTree.Dependencies {
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: p.Package.GetName(),
|
||||
|
@@ -20,9 +20,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -33,21 +33,13 @@ var cleanupCmd = &cobra.Command{
|
||||
Short: "Clean packages cache.",
|
||||
Long: `remove downloaded packages tarballs and clean cache directory`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
|
||||
util.BindSystemFlags(cmd)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var cleaned int = 0
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := config.LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := config.LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
// Check if cache dir exists
|
||||
if helpers.Exists(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) {
|
||||
if fileHelper.Exists(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) {
|
||||
|
||||
files, err := ioutil.ReadDir(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath())
|
||||
if err != nil {
|
||||
|
@@ -19,7 +19,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -30,46 +30,12 @@ var configCmd = &cobra.Command{
|
||||
Long: `Show luet configuration`,
|
||||
Aliases: []string{"c"},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println(config.LuetCfg.GetLogging())
|
||||
fmt.Println(config.LuetCfg.GetGeneral())
|
||||
fmt.Println(config.LuetCfg.GetSystem())
|
||||
if len(config.LuetCfg.CacheRepositories) > 0 {
|
||||
fmt.Println("repetitors:")
|
||||
for _, r := range config.LuetCfg.CacheRepositories {
|
||||
fmt.Println(" - ", r.String())
|
||||
}
|
||||
}
|
||||
if len(config.LuetCfg.SystemRepositories) > 0 {
|
||||
fmt.Println("repositories:")
|
||||
for _, r := range config.LuetCfg.SystemRepositories {
|
||||
fmt.Println(" - ", r.String())
|
||||
}
|
||||
data, err := config.LuetCfg.YAML()
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
|
||||
if len(config.LuetCfg.RepositoriesConfDir) > 0 {
|
||||
fmt.Println("repos_confdir:")
|
||||
for _, dir := range config.LuetCfg.RepositoriesConfDir {
|
||||
fmt.Println(" - ", dir)
|
||||
}
|
||||
}
|
||||
|
||||
if len(config.LuetCfg.ConfigProtectConfDir) > 0 {
|
||||
|
||||
// Load config protect configs
|
||||
installer.LoadConfigProtectConfs(config.LuetCfg)
|
||||
|
||||
fmt.Println("config_protect_confdir:")
|
||||
for _, dir := range config.LuetCfg.ConfigProtectConfDir {
|
||||
fmt.Println(" - ", dir)
|
||||
}
|
||||
|
||||
if len(config.LuetCfg.GetConfigProtectConfFiles()) > 0 {
|
||||
fmt.Println("protect_conf_files:")
|
||||
for _, file := range config.LuetCfg.GetConfigProtectConfFiles() {
|
||||
fmt.Println(" - ", file.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Println(string(data))
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -69,7 +69,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
viper.BindPFlag("meta-filename", cmd.Flags().Lookup("meta-filename"))
|
||||
viper.BindPFlag("reset-revision", cmd.Flags().Lookup("reset-revision"))
|
||||
viper.BindPFlag("repo", cmd.Flags().Lookup("repo"))
|
||||
|
||||
viper.BindPFlag("from-metadata", cmd.Flags().Lookup("from-metadata"))
|
||||
viper.BindPFlag("force-push", cmd.Flags().Lookup("force-push"))
|
||||
viper.BindPFlag("push-images", cmd.Flags().Lookup("push-images"))
|
||||
|
||||
@@ -80,7 +80,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
|
||||
treePaths := viper.GetStringSlice("tree")
|
||||
dst := viper.GetString("output")
|
||||
packages := viper.GetString("packages")
|
||||
|
||||
name := viper.GetString("name")
|
||||
descr := viper.GetString("descr")
|
||||
urls := viper.GetStringSlice("urls")
|
||||
@@ -101,6 +101,18 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
force := viper.GetBool("force-push")
|
||||
imagePush := viper.GetBool("push-images")
|
||||
|
||||
opts := []installer.RepositoryOption{
|
||||
installer.WithSource(viper.GetString("packages")),
|
||||
installer.WithPushImages(imagePush),
|
||||
installer.WithForce(force),
|
||||
installer.FromRepository(fromRepo),
|
||||
installer.WithConfig(LuetCfg),
|
||||
installer.WithImagePrefix(dst),
|
||||
installer.WithDatabase(pkg.NewInMemoryDatabase(false)),
|
||||
installer.WithCompilerBackend(compilerBackend),
|
||||
installer.FromMetadata(viper.GetBool("from-metadata")),
|
||||
}
|
||||
|
||||
if source_repo != "" {
|
||||
// Search for system repository
|
||||
lrepo, err := LuetCfg.GetSystemRepository(source_repo)
|
||||
@@ -114,27 +126,28 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
t = lrepo.Type
|
||||
}
|
||||
|
||||
repo, err = installer.GenerateRepository(lrepo.Name,
|
||||
lrepo.Description, t,
|
||||
lrepo.Urls,
|
||||
lrepo.Priority,
|
||||
packages,
|
||||
treePaths,
|
||||
pkg.NewInMemoryDatabase(false),
|
||||
compilerBackend,
|
||||
dst,
|
||||
imagePush,
|
||||
force,
|
||||
fromRepo,
|
||||
LuetCfg)
|
||||
helpers.CheckErr(err)
|
||||
opts = append(opts,
|
||||
installer.WithName(lrepo.Name),
|
||||
installer.WithDescription(lrepo.Description),
|
||||
installer.WithType(t),
|
||||
installer.WithUrls(lrepo.Urls...),
|
||||
installer.WithPriority(lrepo.Priority),
|
||||
installer.WithTree(treePaths...),
|
||||
)
|
||||
|
||||
} else {
|
||||
repo, err = installer.GenerateRepository(name, descr, t, urls, 1, packages,
|
||||
treePaths, pkg.NewInMemoryDatabase(false), compilerBackend, dst, imagePush, force, fromRepo, LuetCfg)
|
||||
helpers.CheckErr(err)
|
||||
opts = append(opts,
|
||||
installer.WithName(name),
|
||||
installer.WithDescription(descr),
|
||||
installer.WithType(t),
|
||||
installer.WithUrls(urls...),
|
||||
installer.WithTree(treePaths...),
|
||||
)
|
||||
}
|
||||
|
||||
repo, err = installer.GenerateRepository(opts...)
|
||||
helpers.CheckErr(err)
|
||||
|
||||
if treetype != "" {
|
||||
treeFile.SetCompressionType(compression.Implementation(treetype))
|
||||
}
|
||||
@@ -177,6 +190,7 @@ func init() {
|
||||
|
||||
createrepoCmd.Flags().Bool("force-push", false, "Force overwrite of docker images if already present online")
|
||||
createrepoCmd.Flags().Bool("push-images", false, "Enable/Disable docker image push for docker repositories")
|
||||
createrepoCmd.Flags().Bool("from-metadata", false, "Consider metadata files from the packages folder while indexing the new tree")
|
||||
|
||||
createrepoCmd.Flags().String("tree-compression", "gzip", "Compression alg: none, gzip, zstd")
|
||||
createrepoCmd.Flags().String("tree-filename", installer.TREE_TARBALL, "Repository tree filename")
|
||||
|
@@ -18,6 +18,7 @@ package cmd_database
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -45,21 +46,11 @@ The yaml must contain the package definition, and the file list at least.
|
||||
For reference, inspect a "metadata.yaml" file generated while running "luet build"`,
|
||||
Args: cobra.OnlyValidArgs,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
|
||||
util.BindSystemFlags(cmd)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
util.SetSystemConfig()
|
||||
systemDB := LuetCfg.GetSystemDB()
|
||||
|
||||
for _, a := range args {
|
||||
@@ -74,6 +65,12 @@ For reference, inspect a "metadata.yaml" file generated while running "luet buil
|
||||
|
||||
files := art.Files
|
||||
|
||||
// Check if the package is already present
|
||||
if p, err := systemDB.FindPackage(art.CompileSpec.GetPackage()); err == nil && p.GetName() != "" {
|
||||
Fatal("Package", art.CompileSpec.GetPackage().HumanReadableString(),
|
||||
" already present.")
|
||||
}
|
||||
|
||||
if _, err := systemDB.CreatePackage(art.CompileSpec.GetPackage()); err != nil {
|
||||
Fatal("Failed to create ", a, ": ", err.Error())
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
@@ -38,20 +39,11 @@ To return also files:
|
||||
$ luet database get --files system/foo`,
|
||||
Args: cobra.OnlyValidArgs,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
|
||||
util.BindSystemFlags(cmd)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
showFiles, _ := cmd.Flags().GetBool("files")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
|
||||
systemDB := LuetCfg.GetSystemDB()
|
||||
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -36,19 +37,10 @@ This commands takes multiple packages as arguments and prunes their entries from
|
||||
`,
|
||||
Args: cobra.OnlyValidArgs,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
|
||||
util.BindSystemFlags(cmd)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
|
||||
systemDB := LuetCfg.GetSystemDB()
|
||||
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -47,13 +48,8 @@ To force install a package:
|
||||
`,
|
||||
Aliases: []string{"i"},
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
|
||||
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
@@ -70,29 +66,16 @@ To force install a package:
|
||||
toInstall = append(toInstall, pack)
|
||||
}
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
nodeps := LuetCfg.Viper.GetBool("nodeps")
|
||||
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
finalizerEnvs, _ := cmd.Flags().GetStringArray("finalizer-env")
|
||||
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
util.SetSystemConfig()
|
||||
util.SetSolverConfig()
|
||||
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
@@ -106,6 +89,12 @@ To force install a package:
|
||||
// Load config protect configs
|
||||
installer.LoadConfigProtectConfs(LuetCfg)
|
||||
|
||||
// Load finalizer runtime environments
|
||||
err := util.SetCliFinalizerEnvs(finalizerEnvs)
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
|
||||
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
|
||||
Concurrency: LuetCfg.GetGeneral().Concurrency,
|
||||
SolverOptions: *LuetCfg.GetSolverOptions(),
|
||||
@@ -119,7 +108,7 @@ To force install a package:
|
||||
inst.Repositories(repos)
|
||||
|
||||
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
|
||||
err := inst.Install(toInstall, system)
|
||||
err = inst.Install(toInstall, system)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
@@ -141,6 +130,8 @@ func init() {
|
||||
installCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
installCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
installCmd.Flags().Bool("download-only", false, "Download only")
|
||||
installCmd.Flags().StringArray("finalizer-env", []string{},
|
||||
"Set finalizer environment in the format key=value.")
|
||||
|
||||
RootCmd.AddCommand(installCmd)
|
||||
}
|
||||
|
@@ -80,7 +80,7 @@ Afterwards, you can use the content generated and associate it with a tree and a
|
||||
}
|
||||
a.Files = filelist
|
||||
a.CompileSpec.GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
err = a.WriteYaml(dst)
|
||||
err = a.WriteYAML(dst)
|
||||
if err != nil {
|
||||
Fatal("failed writing metadata yaml file for ", packageName, ": ", err.Error())
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
@@ -27,9 +28,7 @@ var reclaimCmd = &cobra.Command{
|
||||
Use: "reclaim",
|
||||
Short: "Reclaim packages to Luet database from available repositories",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
util.BindSystemFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
},
|
||||
Long: `Reclaim tries to find association between packages in the online repositories and the system one.
|
||||
@@ -39,13 +38,7 @@ var reclaimCmd = &cobra.Command{
|
||||
It scans the target file system, and if finds a match with a package available in the repositories, it marks as installed in the system database.
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
|
||||
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
|
||||
repos := installer.Repositories{}
|
||||
|
128
cmd/reinstall.go
Normal file
128
cmd/reinstall.go
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var reinstallCmd = &cobra.Command{
|
||||
Use: "reinstall <pkg1> <pkg2> <pkg3>",
|
||||
Short: "reinstall a set of packages",
|
||||
Long: `Reinstall a group of packages in the system:
|
||||
|
||||
$ luet reinstall -y system/busybox shells/bash system/coreutils ...
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
LuetCfg.Viper.BindPFlag("for", cmd.Flags().Lookup("for"))
|
||||
|
||||
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var toUninstall pkg.Packages
|
||||
var toAdd pkg.Packages
|
||||
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
util.SetSystemConfig()
|
||||
|
||||
for _, a := range args {
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
if err != nil {
|
||||
Fatal("Invalid package string ", a, ": ", err.Error())
|
||||
}
|
||||
toUninstall = append(toUninstall, pack)
|
||||
toAdd = append(toAdd, pack)
|
||||
}
|
||||
|
||||
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
|
||||
repos := installer.Repositories{}
|
||||
for _, repo := range LuetCfg.SystemRepositories {
|
||||
if !repo.Enable {
|
||||
continue
|
||||
}
|
||||
r := installer.NewSystemRepository(repo)
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
util.SetSolverConfig()
|
||||
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
} else {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.SingleCoreSimple
|
||||
}
|
||||
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
|
||||
|
||||
// Load config protect configs
|
||||
installer.LoadConfigProtectConfs(LuetCfg)
|
||||
|
||||
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
|
||||
Concurrency: LuetCfg.GetGeneral().Concurrency,
|
||||
SolverOptions: *LuetCfg.GetSolverOptions(),
|
||||
NoDeps: true,
|
||||
Force: force,
|
||||
OnlyDeps: onlydeps,
|
||||
PreserveSystemEssentialData: true,
|
||||
Ask: !yes,
|
||||
DownloadOnly: downloadOnly,
|
||||
})
|
||||
inst.Repositories(repos)
|
||||
|
||||
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
|
||||
err := inst.Swap(toUninstall, toAdd, system)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
reinstallCmd.Flags().String("system-dbpath", "", "System db path")
|
||||
reinstallCmd.Flags().String("system-target", "", "System rootpath")
|
||||
reinstallCmd.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
reinstallCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
|
||||
reinstallCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
||||
reinstallCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
|
||||
reinstallCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
|
||||
reinstallCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
|
||||
reinstallCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
|
||||
reinstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
reinstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
reinstallCmd.Flags().Bool("download-only", false, "Download only")
|
||||
|
||||
RootCmd.AddCommand(reinstallCmd)
|
||||
}
|
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -35,13 +36,8 @@ var replaceCmd = &cobra.Command{
|
||||
$ luet replace -y system/busybox ... --for shells/bash --for system/coreutils ...
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
|
||||
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
@@ -54,24 +50,15 @@ var replaceCmd = &cobra.Command{
|
||||
var toAdd pkg.Packages
|
||||
|
||||
f := LuetCfg.Viper.GetStringSlice("for")
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
nodeps := LuetCfg.Viper.GetBool("nodeps")
|
||||
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
util.SetSystemConfig()
|
||||
util.SetSolverConfig()
|
||||
for _, a := range args {
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
if err != nil {
|
||||
@@ -98,11 +85,6 @@ var replaceCmd = &cobra.Command{
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
} else {
|
||||
|
76
cmd/root.go
76
cmd/root.go
@@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/marcsauter/single"
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
extensions "github.com/mudler/cobra-extensions"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
@@ -40,8 +41,14 @@ var Verbose bool
|
||||
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
||||
|
||||
const (
|
||||
LuetCLIVersion = "0.14.6"
|
||||
LuetCLIVersion = "0.17.11"
|
||||
LuetEnvPrefix = "LUET"
|
||||
license = `
|
||||
Luet Copyright (C) 2019-2021 Ettore Di Giacinto
|
||||
This program comes with ABSOLUTELY NO WARRANTY.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions.
|
||||
`
|
||||
)
|
||||
|
||||
// Build time and commit information.
|
||||
@@ -52,6 +59,47 @@ var (
|
||||
BuildCommit string
|
||||
)
|
||||
|
||||
func version() string {
|
||||
return fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime)
|
||||
}
|
||||
|
||||
var noBannerCommands = []string{"search", "exec", "tree", "database", "box", "cleanup"}
|
||||
|
||||
func displayVersionBanner() {
|
||||
display := true
|
||||
if len(os.Args) > 1 {
|
||||
for _, c := range noBannerCommands {
|
||||
if os.Args[1] == c {
|
||||
display = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if display {
|
||||
Info("Luet version", version())
|
||||
Info(license)
|
||||
}
|
||||
}
|
||||
|
||||
func handleLock() {
|
||||
if os.Getenv("LUET_NOLOCK") != "true" {
|
||||
if len(os.Args) > 1 {
|
||||
for _, lockedCmd := range LockedCommands {
|
||||
if os.Args[1] == lockedCmd {
|
||||
s := single.New("luet")
|
||||
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
|
||||
Fatal("another instance of the app is already running, exiting")
|
||||
} else if err != nil {
|
||||
// Another error occurred, might be worth handling it as well
|
||||
Fatal("failed to acquire exclusive app lock:", err.Error())
|
||||
}
|
||||
defer s.TryUnlock()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RootCmd represents the base command when called without any subcommands
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "luet",
|
||||
@@ -79,8 +127,9 @@ To build a package, from a tree definition:
|
||||
$ luet build --tree tree/path package
|
||||
|
||||
`,
|
||||
Version: fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime),
|
||||
Version: version(),
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
err := LoadConfig(config.LuetCfg)
|
||||
if err != nil {
|
||||
Fatal("failed to load configuration:", err.Error())
|
||||
@@ -97,7 +146,7 @@ To build a package, from a tree definition:
|
||||
|
||||
plugin := viper.GetStringSlice("plugin")
|
||||
|
||||
bus.Manager.Load(plugin...).Register()
|
||||
bus.Manager.Initialize(plugin...)
|
||||
if len(bus.Manager.Plugins) != 0 {
|
||||
Info(":lollipop:Enabled plugins:")
|
||||
for _, p := range bus.Manager.Plugins {
|
||||
@@ -154,23 +203,8 @@ func LoadConfig(c *config.LuetConfig) error {
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
|
||||
if os.Getenv("LUET_NOLOCK") != "true" {
|
||||
if len(os.Args) > 1 {
|
||||
for _, lockedCmd := range LockedCommands {
|
||||
if os.Args[1] == lockedCmd {
|
||||
s := single.New("luet")
|
||||
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
|
||||
Fatal("another instance of the app is already running, exiting")
|
||||
} else if err != nil {
|
||||
// Another error occurred, might be worth handling it as well
|
||||
Fatal("failed to acquire exclusive app lock:", err.Error())
|
||||
}
|
||||
defer s.TryUnlock()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
handleLock()
|
||||
displayVersionBanner()
|
||||
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
@@ -252,7 +286,7 @@ func initConfig() {
|
||||
}
|
||||
homeDir := helpers.GetHomeDir()
|
||||
|
||||
if helpers.Exists(filepath.Join(pwdDir, ".luet.yaml")) || (homeDir != "" && helpers.Exists(filepath.Join(homeDir, ".luet.yaml"))) {
|
||||
if fileHelper.Exists(filepath.Join(pwdDir, ".luet.yaml")) || (homeDir != "" && fileHelper.Exists(filepath.Join(homeDir, ".luet.yaml"))) {
|
||||
viper.AddConfigPath(".")
|
||||
if homeDir != "" {
|
||||
viper.AddConfigPath(homeDir)
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/jedib0t/go-pretty/table"
|
||||
"github.com/jedib0t/go-pretty/v6/list"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -306,14 +307,9 @@ Search can also return results in the terminal in different ways: as terminal ou
|
||||
`,
|
||||
Aliases: []string{"s"},
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var results Results
|
||||
@@ -325,32 +321,20 @@ Search can also return results in the terminal in different ways: as terminal ou
|
||||
hidden, _ := cmd.Flags().GetBool("hidden")
|
||||
|
||||
installed := LuetCfg.Viper.GetBool("installed")
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
searchWithLabel, _ := cmd.Flags().GetBool("by-label")
|
||||
searchWithLabelMatch, _ := cmd.Flags().GetBool("by-label-regex")
|
||||
revdeps, _ := cmd.Flags().GetBool("revdeps")
|
||||
tableMode, _ := cmd.Flags().GetBool("table")
|
||||
files, _ := cmd.Flags().GetBool("files")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
util.SetSolverConfig()
|
||||
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
if out != "terminal" {
|
||||
LuetCfg.GetLogging().SetLogLevel("error")
|
||||
}
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
|
||||
l := list.NewWriter()
|
||||
t := table.NewWriter()
|
||||
t.AppendHeader(rows)
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
//. "github.com/mudler/luet/pkg/config"
|
||||
"github.com/ghodss/yaml"
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
@@ -50,6 +51,7 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
if len(args) != 1 {
|
||||
Fatal("Expects one package as parameter")
|
||||
}
|
||||
util.BindValuesFlags(cmd)
|
||||
viper.BindPFlag("image-repository", cmd.Flags().Lookup("image-repository"))
|
||||
|
||||
},
|
||||
@@ -59,6 +61,7 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
treePath, _ := cmd.Flags().GetStringArray("tree")
|
||||
imageRepository := viper.GetString("image-repository")
|
||||
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
|
||||
values := util.ValuesFlags()
|
||||
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
if out != "terminal" {
|
||||
@@ -80,8 +83,10 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
luetCompiler := compiler.NewLuetCompiler(
|
||||
compilerBackend,
|
||||
reciper.GetDatabase(),
|
||||
options.WithBuildValues(values),
|
||||
options.WithPushRepository(imageRepository),
|
||||
options.WithPullRepositories(pullRepo),
|
||||
options.WithTemplateFolder(util.TemplateFolders(false, treePath)),
|
||||
options.WithSolverOptions(opts),
|
||||
)
|
||||
|
||||
@@ -96,9 +101,14 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
asserts, err := luetCompiler.ComputeDepTree(spec)
|
||||
|
||||
for _, assertion := range asserts { //highly dependent on the order
|
||||
ht := compiler.NewHashTree(reciper.GetDatabase())
|
||||
hashtree, err := ht.Query(luetCompiler, spec)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
for _, assertion := range hashtree.Solution { //highly dependent on the order
|
||||
|
||||
//buildImageHash := imageRepository + ":" + assertion.Hash.BuildHash
|
||||
currentPackageImageHash := imageRepository + ":" + assertion.Hash.PackageHash
|
||||
|
@@ -167,78 +167,79 @@ func NewTreePkglistCommand() *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
if addPkg {
|
||||
if revdeps {
|
||||
packs, _ := reciper.GetDatabase().GetRevdeps(p)
|
||||
for _, revdep := range packs {
|
||||
if !addPkg {
|
||||
continue
|
||||
}
|
||||
|
||||
if revdeps {
|
||||
packs, _ := reciper.GetDatabase().GetRevdeps(p)
|
||||
for i := range packs {
|
||||
revdep := packs[i]
|
||||
if full {
|
||||
pkgstr = pkgDetail(revdep)
|
||||
} else if verbose {
|
||||
pkgstr = revdep.HumanReadableString()
|
||||
} else {
|
||||
pkgstr = fmt.Sprintf("%s/%s", revdep.GetCategory(), revdep.GetName())
|
||||
}
|
||||
plist = append(plist, pkgstr)
|
||||
results.Packages = append(results.Packages, TreePackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Path: revdep.GetPath(),
|
||||
})
|
||||
}
|
||||
} else if deps {
|
||||
|
||||
solution, err := depSolver.Install(pkg.Packages{p})
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
ass := solution.SearchByName(p.GetPackageName())
|
||||
solution, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
|
||||
for _, pa := range solution {
|
||||
|
||||
if pa.Value {
|
||||
// Exclude itself
|
||||
if pa.Package.GetName() == p.GetName() && pa.Package.GetCategory() == p.GetCategory() {
|
||||
continue
|
||||
}
|
||||
|
||||
if full {
|
||||
pkgstr = pkgDetail(revdep)
|
||||
pkgstr = pkgDetail(pa.Package)
|
||||
} else if verbose {
|
||||
pkgstr = revdep.HumanReadableString()
|
||||
pkgstr = pa.Package.HumanReadableString()
|
||||
} else {
|
||||
pkgstr = fmt.Sprintf("%s/%s", revdep.GetCategory(), revdep.GetName())
|
||||
pkgstr = fmt.Sprintf("%s/%s", pa.Package.GetCategory(), pa.Package.GetName())
|
||||
}
|
||||
plist = append(plist, pkgstr)
|
||||
results.Packages = append(results.Packages, TreePackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Path: revdep.GetPath(),
|
||||
Name: pa.Package.GetName(),
|
||||
Version: pa.Package.GetVersion(),
|
||||
Category: pa.Package.GetCategory(),
|
||||
Path: pa.Package.GetPath(),
|
||||
})
|
||||
}
|
||||
} else if deps {
|
||||
|
||||
Spinner(32)
|
||||
solution, err := depSolver.Install(pkg.Packages{p})
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
ass := solution.SearchByName(p.GetPackageName())
|
||||
solution, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
for _, pa := range solution {
|
||||
|
||||
if pa.Value {
|
||||
// Exclude itself
|
||||
if pa.Package.GetName() == p.GetName() && pa.Package.GetCategory() == p.GetCategory() {
|
||||
continue
|
||||
}
|
||||
|
||||
if full {
|
||||
pkgstr = pkgDetail(pa.Package)
|
||||
} else if verbose {
|
||||
pkgstr = pa.Package.HumanReadableString()
|
||||
} else {
|
||||
pkgstr = fmt.Sprintf("%s/%s", pa.Package.GetCategory(), pa.Package.GetName())
|
||||
}
|
||||
plist = append(plist, pkgstr)
|
||||
results.Packages = append(results.Packages, TreePackageResult{
|
||||
Name: pa.Package.GetName(),
|
||||
Version: pa.Package.GetVersion(),
|
||||
Category: pa.Package.GetCategory(),
|
||||
Path: pa.Package.GetPath(),
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
plist = append(plist, pkgstr)
|
||||
results.Packages = append(results.Packages, TreePackageResult{
|
||||
Name: p.GetName(),
|
||||
Version: p.GetVersion(),
|
||||
Category: p.GetCategory(),
|
||||
Path: p.GetPath(),
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
plist = append(plist, pkgstr)
|
||||
results.Packages = append(results.Packages, TreePackageResult{
|
||||
Name: p.GetName(),
|
||||
Version: p.GetVersion(),
|
||||
Category: p.GetCategory(),
|
||||
Path: p.GetPath(),
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
y, err := yaml.Marshal(results)
|
||||
|
@@ -16,6 +16,7 @@ package cmd
|
||||
|
||||
import (
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -31,16 +32,11 @@ var uninstallCmd = &cobra.Command{
|
||||
Long: `Uninstall packages`,
|
||||
Aliases: []string{"rm", "un"},
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
toRemove := []pkg.Package{}
|
||||
@@ -53,10 +49,6 @@ var uninstallCmd = &cobra.Command{
|
||||
toRemove = append(toRemove, pack)
|
||||
}
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
nodeps, _ := cmd.Flags().GetBool("nodeps")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
@@ -64,18 +56,12 @@ var uninstallCmd = &cobra.Command{
|
||||
fullClean, _ := cmd.Flags().GetBool("full-clean")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
keepProtected, _ := cmd.Flags().GetBool("keep-protected-files")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
util.SetSolverConfig()
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
LuetCfg.ConfigProtectSkip = !keepProtected
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
} else {
|
||||
@@ -123,6 +109,7 @@ func init() {
|
||||
uninstallCmd.Flags().Bool("full-clean", false, "(experimental) Uninstall packages and all the other deps/revdeps of it.")
|
||||
uninstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
uninstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
uninstallCmd.Flags().BoolP("keep-protected-files", "k", false, "Keep package protected files around")
|
||||
|
||||
RootCmd.AddCommand(uninstallCmd)
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -28,15 +29,10 @@ var upgradeCmd = &cobra.Command{
|
||||
Short: "Upgrades the system",
|
||||
Aliases: []string{"u"},
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", installCmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", installCmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
},
|
||||
Long: `Upgrades packages in parallel`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@@ -51,10 +47,6 @@ var upgradeCmd = &cobra.Command{
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
nodeps, _ := cmd.Flags().GetBool("nodeps")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
@@ -63,25 +55,18 @@ var upgradeCmd = &cobra.Command{
|
||||
sync, _ := cmd.Flags().GetBool("sync")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
util.SetSystemConfig()
|
||||
opts := util.SetSolverConfig()
|
||||
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
} else {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.SingleCoreSimple
|
||||
}
|
||||
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().String())
|
||||
Debug("Solver", opts.CompactString())
|
||||
|
||||
// Load config protect configs
|
||||
installer.LoadConfigProtectConfs(LuetCfg)
|
||||
|
82
cmd/util.go
82
cmd/util.go
@@ -1,5 +1,4 @@
|
||||
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// Daniele Rondina <geaaru@sabayonlinux.org>
|
||||
// Copyright © 2020-2021 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
@@ -17,11 +16,88 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
. "github.com/mudler/luet/cmd/util"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-units"
|
||||
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewUnpackCommand() *cobra.Command {
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "unpack image path",
|
||||
Short: "Unpack a docker image natively",
|
||||
Long: `unpack doesn't need the docker daemon to run, and unpacks a docker image in the specified directory:
|
||||
|
||||
luet util unpack golang:alpine /alpine
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
if len(args) != 2 {
|
||||
Fatal("Expects an image and a path")
|
||||
}
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
image := args[0]
|
||||
destination, err := filepath.Abs(args[1])
|
||||
if err != nil {
|
||||
Error("Invalid path %s", destination)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
verify, _ := cmd.Flags().GetBool("verify")
|
||||
user, _ := cmd.Flags().GetString("auth-username")
|
||||
pass, _ := cmd.Flags().GetString("auth-password")
|
||||
authType, _ := cmd.Flags().GetString("auth-type")
|
||||
server, _ := cmd.Flags().GetString("auth-server-address")
|
||||
identity, _ := cmd.Flags().GetString("auth-identity-token")
|
||||
registryToken, _ := cmd.Flags().GetString("auth-registry-token")
|
||||
|
||||
temp, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
Fatal("Cannot create a tempdir", err.Error())
|
||||
}
|
||||
|
||||
Info("Downloading", image, "to", destination)
|
||||
auth := &types.AuthConfig{
|
||||
Username: user,
|
||||
Password: pass,
|
||||
ServerAddress: server,
|
||||
Auth: authType,
|
||||
IdentityToken: identity,
|
||||
RegistryToken: registryToken,
|
||||
}
|
||||
|
||||
info, err := docker.DownloadAndExtractDockerImage(temp, image, destination, auth, verify)
|
||||
if err != nil {
|
||||
Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
Info(fmt.Sprintf("Pulled: %s %s", info.Target.Digest, info.Name))
|
||||
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().String("auth-username", "", "Username to authenticate to registry/notary")
|
||||
c.Flags().String("auth-password", "", "Password to authenticate to registry")
|
||||
c.Flags().String("auth-type", "", "Auth type")
|
||||
c.Flags().String("auth-server-address", "", "Authentication server address")
|
||||
c.Flags().String("auth-identity-token", "", "Authentication identity token")
|
||||
c.Flags().String("auth-registry-token", "", "Authentication registry token")
|
||||
c.Flags().Bool("verify", false, "Verify signed images to notary before to pull")
|
||||
return c
|
||||
}
|
||||
|
||||
var utilGroup = &cobra.Command{
|
||||
Use: "util [command] [OPTIONS]",
|
||||
Short: "General luet internal utilities exposed",
|
||||
|
109
cmd/util/cli.go
Normal file
109
cmd/util/cli.go
Normal file
@@ -0,0 +1,109 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/installer"
|
||||
)
|
||||
|
||||
func BindSystemFlags(cmd *cobra.Command) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
}
|
||||
|
||||
func BindSolverFlags(cmd *cobra.Command) {
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
}
|
||||
|
||||
func BindValuesFlags(cmd *cobra.Command) {
|
||||
LuetCfg.Viper.BindPFlag("values", cmd.Flags().Lookup("values"))
|
||||
}
|
||||
|
||||
func ValuesFlags() []string {
|
||||
return LuetCfg.Viper.GetStringSlice("values")
|
||||
}
|
||||
|
||||
func SetSystemConfig() {
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.SetRootFS(rootfs)
|
||||
}
|
||||
|
||||
func SetSolverConfig() (c *config.LuetSolverOptions) {
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
|
||||
return &config.LuetSolverOptions{
|
||||
Type: stype,
|
||||
LearnRate: float32(rate),
|
||||
Discount: float32(discount),
|
||||
MaxAttempts: attempts,
|
||||
}
|
||||
}
|
||||
|
||||
func SetCliFinalizerEnvs(finalizerEnvs []string) error {
|
||||
if len(finalizerEnvs) > 0 {
|
||||
for _, v := range finalizerEnvs {
|
||||
idx := strings.Index(v, "=")
|
||||
if idx < 0 {
|
||||
return errors.New("Found invalid runtime finalizer environment: " + v)
|
||||
}
|
||||
|
||||
LuetCfg.SetFinalizerEnv(v[0:idx], v[idx+1:])
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TemplateFolders returns the default folders which holds shared template between packages in a given tree path
|
||||
func TemplateFolders(fromRepo bool, treePaths []string) []string {
|
||||
templateFolders := []string{}
|
||||
if !fromRepo {
|
||||
for _, t := range treePaths {
|
||||
templateFolders = append(templateFolders, filepath.Join(t, "templates"))
|
||||
}
|
||||
} else {
|
||||
for _, s := range installer.SystemRepositories(LuetCfg) {
|
||||
templateFolders = append(templateFolders, filepath.Join(s.TreePath, "templates"))
|
||||
}
|
||||
}
|
||||
return templateFolders
|
||||
}
|
@@ -1,98 +0,0 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func NewUnpackCommand() *cobra.Command {
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "unpack image path",
|
||||
Short: "Unpack a docker image natively",
|
||||
Long: `unpack doesn't need the docker daemon to run, and unpacks a docker image in the specified directory:
|
||||
|
||||
luet util unpack golang:alpine /alpine
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
if len(args) != 2 {
|
||||
Fatal("Expects an image and a path")
|
||||
}
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
image := args[0]
|
||||
destination, err := filepath.Abs(args[1])
|
||||
if err != nil {
|
||||
Error("Invalid path %s", destination)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
verify, _ := cmd.Flags().GetBool("verify")
|
||||
user, _ := cmd.Flags().GetString("auth-username")
|
||||
pass, _ := cmd.Flags().GetString("auth-password")
|
||||
authType, _ := cmd.Flags().GetString("auth-type")
|
||||
server, _ := cmd.Flags().GetString("auth-server-address")
|
||||
identity, _ := cmd.Flags().GetString("auth-identity-token")
|
||||
registryToken, _ := cmd.Flags().GetString("auth-registry-token")
|
||||
|
||||
temp, err := config.LuetCfg.GetSystem().TempDir("contentstore")
|
||||
if err != nil {
|
||||
Fatal("Cannot create a tempdir", err.Error())
|
||||
}
|
||||
|
||||
Info("Downloading", image, "to", destination)
|
||||
auth := &types.AuthConfig{
|
||||
Username: user,
|
||||
Password: pass,
|
||||
ServerAddress: server,
|
||||
Auth: authType,
|
||||
IdentityToken: identity,
|
||||
RegistryToken: registryToken,
|
||||
}
|
||||
|
||||
info, err := helpers.DownloadAndExtractDockerImage(temp, image, destination, auth, verify)
|
||||
if err != nil {
|
||||
Error(err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
Info(fmt.Sprintf("Pulled: %s %s", info.Target.Digest, info.Name))
|
||||
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().String("auth-username", "", "Username to authenticate to registry/notary")
|
||||
c.Flags().String("auth-password", "", "Password to authenticate to registry")
|
||||
c.Flags().String("auth-type", "", "Auth type")
|
||||
c.Flags().String("auth-server-address", "", "Authentication server address")
|
||||
c.Flags().String("auth-identity-token", "", "Authentication identity token")
|
||||
c.Flags().String("auth-registry-token", "", "Authentication registry token")
|
||||
c.Flags().Bool("verify", false, "Verify signed images to notary before to pull")
|
||||
return c
|
||||
}
|
@@ -7,7 +7,7 @@ fi
|
||||
set -ex
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
LUET_VERSION=$(curl -s https://api.github.com/repos/mudler/luet/releases/latest | ( grep -oP '"tag_name": "\K(.*)(?=")' || echo "0.9.24" ))
|
||||
LUET_VERSION=$(curl -s https://api.github.com/repos/mudler/luet/releases/latest | grep tag_name | awk '{ print $2 }' | sed -e 's/\"//g' -e 's/,//g' || echo "0.9.24" )
|
||||
LUET_ROOTFS=${LUET_ROOTFS:-/}
|
||||
LUET_DATABASE_PATH=${LUET_DATABASE_PATH:-/var/luet/db}
|
||||
LUET_DATABASE_ENGINE=${LUET_DATABASE_ENGINE:-boltdb}
|
||||
|
@@ -99,6 +99,14 @@
|
||||
# If set to false rootfs path is used as prefix.
|
||||
# config_from_host: true
|
||||
#
|
||||
#
|
||||
# ------------------------------------------------
|
||||
# Finalizer Environment Variables
|
||||
# -----------------------------------------------
|
||||
# finalizer_envs:
|
||||
# - key: "BUILD_ISO"
|
||||
# value: "1"
|
||||
#
|
||||
# System repositories
|
||||
# ---------------------------------------------
|
||||
# In alternative to define repositories files
|
||||
|
50
go.mod
50
go.mod
@@ -1,37 +1,51 @@
|
||||
module github.com/mudler/luet
|
||||
|
||||
go 1.14
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/DataDog/zstd v1.4.4 // indirect
|
||||
github.com/DataDog/zstd v1.4.5 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.1.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.0 // indirect
|
||||
github.com/Sabayon/pkgs-checker v0.8.1
|
||||
github.com/apex/log v1.9.0 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
|
||||
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154
|
||||
github.com/briandowns/spinner v1.12.1-0.20201220203425-e201aaea0a31
|
||||
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
|
||||
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
|
||||
github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765
|
||||
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/crillab/gophersat v1.3.2-0.20210701121804-72b19f5b6b38
|
||||
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd
|
||||
github.com/fatih/color v1.12.0 // indirect
|
||||
github.com/genuinetools/img v0.5.11
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/go-containerregistry v0.2.1
|
||||
github.com/google/renameio v1.0.0
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/go-version v1.2.1
|
||||
github.com/imdario/mergo v0.3.8
|
||||
github.com/hashicorp/go-version v1.3.0
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/imdario/mergo v0.3.12
|
||||
github.com/jedib0t/go-pretty v4.3.0+incompatible
|
||||
github.com/jedib0t/go-pretty/v6 v6.0.5
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
|
||||
github.com/klauspost/compress v1.8.3
|
||||
github.com/klauspost/compress v1.12.2
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/knqyf263/go-deb-version v0.0.0-20190517075300-09fca494f03d
|
||||
github.com/kyokomi/emoji v2.1.0+incompatible
|
||||
github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e
|
||||
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0
|
||||
github.com/mattn/go-isatty v0.0.13 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.1
|
||||
github.com/moby/buildkit v0.7.2
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
|
||||
@@ -47,15 +61,23 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/schollz/progressbar/v3 v3.7.1
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/viper v1.7.1
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/theupdateframework/notary v0.7.0
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
go.uber.org/atomic v1.5.1 // indirect
|
||||
go.uber.org/multierr v1.4.0
|
||||
go.uber.org/zap v1.13.0
|
||||
google.golang.org/grpc v1.29.1
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
go.uber.org/multierr v1.6.0
|
||||
go.uber.org/zap v1.17.0
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
|
||||
golang.org/x/mod v0.4.2
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210810183815-faf39c7919d5 // indirect
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
google.golang.org/genproto v0.0.0-20210811021853-ddbe55d93216 // indirect
|
||||
google.golang.org/grpc v1.39.1
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
helm.sh/helm/v3 v3.3.4
|
||||
|
||||
)
|
||||
|
@@ -23,9 +23,9 @@ import (
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Box interface {
|
||||
@@ -107,7 +107,7 @@ func (b *DefaultBox) Exec() error {
|
||||
|
||||
func (b *DefaultBox) Run() error {
|
||||
|
||||
if !helpers.Exists(b.Root) {
|
||||
if !fileHelper.Exists(b.Root) {
|
||||
return errors.New(b.Root + " does not exist")
|
||||
}
|
||||
|
||||
|
@@ -2,6 +2,7 @@ package bus
|
||||
|
||||
import (
|
||||
"github.com/mudler/go-pluggable"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -44,24 +45,59 @@ var (
|
||||
EventRepositoryPreBuild pluggable.EventType = "repository.pre.build"
|
||||
// EventRepositoryPostBuild is the event fired after a repository was built
|
||||
EventRepositoryPostBuild pluggable.EventType = "repository.post.build"
|
||||
|
||||
// Image unpack
|
||||
|
||||
// EventImagePreUnPack is the event fired before unpacking an image to a local dir
|
||||
EventImagePreUnPack pluggable.EventType = "image.pre.unpack"
|
||||
// EventImagePostUnPack is the event fired after unpacking an image to a local dir
|
||||
EventImagePostUnPack pluggable.EventType = "image.post.unpack"
|
||||
)
|
||||
|
||||
// Manager is the bus instance manager, which subscribes plugins to events emitted by Luet
|
||||
var Manager *pluggable.Manager = pluggable.NewManager(
|
||||
[]pluggable.EventType{
|
||||
EventPackageInstall,
|
||||
EventPackageUnInstall,
|
||||
EventPackagePreBuild,
|
||||
EventPackagePreBuildArtifact,
|
||||
EventPackagePostBuildArtifact,
|
||||
EventPackagePostBuild,
|
||||
EventRepositoryPreBuild,
|
||||
EventRepositoryPostBuild,
|
||||
EventImagePreBuild,
|
||||
EventImagePrePull,
|
||||
EventImagePrePush,
|
||||
EventImagePostBuild,
|
||||
EventImagePostPull,
|
||||
EventImagePostPush,
|
||||
},
|
||||
)
|
||||
var Manager *Bus = &Bus{
|
||||
Manager: pluggable.NewManager(
|
||||
[]pluggable.EventType{
|
||||
EventPackageInstall,
|
||||
EventPackageUnInstall,
|
||||
EventPackagePreBuild,
|
||||
EventPackagePreBuildArtifact,
|
||||
EventPackagePostBuildArtifact,
|
||||
EventPackagePostBuild,
|
||||
EventRepositoryPreBuild,
|
||||
EventRepositoryPostBuild,
|
||||
EventImagePreBuild,
|
||||
EventImagePrePull,
|
||||
EventImagePrePush,
|
||||
EventImagePostBuild,
|
||||
EventImagePostPull,
|
||||
EventImagePostPush,
|
||||
EventImagePreUnPack,
|
||||
EventImagePostUnPack,
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
type Bus struct {
|
||||
*pluggable.Manager
|
||||
}
|
||||
|
||||
func (b *Bus) Initialize(plugin ...string) {
|
||||
b.Manager.Load(plugin...).Register()
|
||||
|
||||
for _, e := range b.Manager.Events {
|
||||
b.Manager.Response(e, func(p *pluggable.Plugin, r *pluggable.EventResponse) {
|
||||
if r.Errored() {
|
||||
logrus.Fatal("Plugin", p.Name, "at", p.Executable, "Error", r.Error)
|
||||
}
|
||||
logrus.Debug(
|
||||
"plugin_event",
|
||||
"received from",
|
||||
p.Name,
|
||||
"at",
|
||||
p.Executable,
|
||||
r,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -24,6 +24,7 @@ import (
|
||||
"strings"
|
||||
|
||||
bus "github.com/mudler/luet/pkg/bus"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
capi "github.com/mudler/docker-companion/api"
|
||||
|
||||
@@ -181,6 +182,12 @@ func (b *SimpleDocker) ExtractRootfs(opts Options, keepPerms bool) error {
|
||||
name := opts.ImageName
|
||||
dst := opts.Destination
|
||||
|
||||
if !b.ImageExists(name) {
|
||||
if err := b.DownloadImage(opts); err != nil {
|
||||
return errors.Wrap(err, "failed pulling image "+name+" during extraction")
|
||||
}
|
||||
}
|
||||
|
||||
tempexport, err := ioutil.TempDir(dst, "tmprootfs")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
@@ -231,7 +238,7 @@ func (b *SimpleDocker) ExtractRootfs(opts Options, keepPerms bool) error {
|
||||
return errors.Wrap(err, "Error met while unpacking rootfs")
|
||||
}
|
||||
|
||||
manifest, err := helpers.Read(filepath.Join(rootfs, "manifest.json"))
|
||||
manifest, err := fileHelper.Read(filepath.Join(rootfs, "manifest.json"))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while reading image manifest")
|
||||
}
|
||||
|
@@ -21,12 +21,12 @@ import (
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
. "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -60,7 +60,7 @@ var _ = Describe("Docker backend", func() {
|
||||
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -79,11 +79,11 @@ ENV PACKAGE_CATEGORY=app-admin`))
|
||||
|
||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM luet/base
|
||||
@@ -103,7 +103,7 @@ RUN echo bar > /test2`))
|
||||
|
||||
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
|
||||
artifacts := []artifact.ArtifactNode{{
|
||||
Name: "/luetbuild/LuetDockerfile",
|
||||
@@ -132,7 +132,7 @@ RUN echo bar > /test2`))
|
||||
}
|
||||
|
||||
Expect(b.ImageDefinitionToTar(opts2)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output3.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir, "output3.tar"))).To(BeTrue())
|
||||
Expect(b.ImageExists(opts2.ImageName)).To(BeFalse())
|
||||
})
|
||||
|
||||
|
@@ -16,7 +16,9 @@
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
@@ -34,11 +36,13 @@ import (
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v2"
|
||||
"helm.sh/helm/v3/pkg/chart"
|
||||
)
|
||||
|
||||
const BuildFile = "build.yaml"
|
||||
@@ -89,7 +93,7 @@ func (cs *LuetCompiler) compilerWorker(i int, wg *sync.WaitGroup, cspecs chan *c
|
||||
defer wg.Done()
|
||||
|
||||
for s := range cspecs {
|
||||
ar, err := cs.compile(concurrency, keepPermissions, s)
|
||||
ar, err := cs.compile(concurrency, keepPermissions, nil, nil, s)
|
||||
if err != nil {
|
||||
errors <- err
|
||||
}
|
||||
@@ -146,11 +150,6 @@ func (cs *LuetCompiler) CompileParallel(keepPermissions bool, ps *compilerspec.L
|
||||
}
|
||||
|
||||
for _, p := range ps.All() {
|
||||
asserts, err := cs.ComputeDepTree(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
p.SetSourceAssertion(asserts)
|
||||
all <- p
|
||||
}
|
||||
|
||||
@@ -196,11 +195,15 @@ func (cs *LuetCompiler) stripFromRootfs(includes []string, rootfs string, includ
|
||||
for _, i := range includeRegexp {
|
||||
if i.MatchString(abspath) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if include && !match || !include && match {
|
||||
toRemove = append(toRemove, currentpath)
|
||||
Debug(":scissors: Removing file", currentpath)
|
||||
} else {
|
||||
Debug(":sun: Matched file", currentpath)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -295,17 +298,6 @@ func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p *co
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) genBuilderImageTag(p *compilerspec.LuetCompilationSpec, packageImage string) string {
|
||||
// Use packageImage as salt into the fp being used
|
||||
// so the hash is unique also in cases where
|
||||
// some package deps does have completely different
|
||||
// depgraphs
|
||||
// TODO: We should use the image tag, or pass by the package assertion hash which is unique
|
||||
// and identifies the deptree of the package.
|
||||
return fmt.Sprintf("builder-%s", p.GetPackage().HashFingerprint(helpers.StripRegistryFromImage(packageImage)))
|
||||
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImage string,
|
||||
concurrency int, keepPermissions bool,
|
||||
p *compilerspec.LuetCompilationSpec) (backend.Options, backend.Options, error) {
|
||||
@@ -333,7 +325,7 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
||||
defer os.RemoveAll(buildDir) // clean up
|
||||
|
||||
// First we copy the source definitions into the output - we create a copy which the builds will need (we need to cache this phase somehow)
|
||||
err = helpers.CopyDir(p.GetPackage().GetPath(), buildDir)
|
||||
err = fileHelper.CopyDir(p.GetPackage().GetPath(), buildDir)
|
||||
if err != nil {
|
||||
return builderOpts, runnerOpts, errors.Wrap(err, "Could not copy package sources")
|
||||
}
|
||||
@@ -450,7 +442,7 @@ func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builder
|
||||
a.CompileSpec = p
|
||||
a.CompileSpec.GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
|
||||
err = a.WriteYaml(p.GetOutputPath())
|
||||
err = a.WriteYAML(p.GetOutputPath())
|
||||
if err != nil {
|
||||
return a, errors.Wrap(err, "Failed while writing metadata file")
|
||||
}
|
||||
@@ -480,7 +472,7 @@ func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builder
|
||||
a.Files = filelist
|
||||
a.CompileSpec.GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
|
||||
err = a.WriteYaml(p.GetOutputPath())
|
||||
err = a.WriteYAML(p.GetOutputPath())
|
||||
if err != nil {
|
||||
return a, errors.Wrap(err, "Failed while writing metadata file")
|
||||
}
|
||||
@@ -522,7 +514,7 @@ func oneOfImagesAvailable(images []string, b CompilerBackend) (bool, string) {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) resolveExistingImageHash(imageHash string, p *compilerspec.LuetCompilationSpec) string {
|
||||
func (cs *LuetCompiler) findImageHash(imageHash string, p *compilerspec.LuetCompilationSpec) string {
|
||||
var resolvedImage string
|
||||
Debug("Resolving image hash for", p.Package.HumanReadableString(), "hash", imageHash, "Pull repositories", p.BuildOptions.PullImageRepository)
|
||||
toChecklist := append([]string{fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, imageHash)},
|
||||
@@ -535,6 +527,11 @@ func (cs *LuetCompiler) resolveExistingImageHash(imageHash string, p *compilersp
|
||||
resolvedImage = which
|
||||
}
|
||||
}
|
||||
return resolvedImage
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) resolveExistingImageHash(imageHash string, p *compilerspec.LuetCompilationSpec) string {
|
||||
resolvedImage := cs.findImageHash(imageHash, p)
|
||||
|
||||
if resolvedImage == "" {
|
||||
resolvedImage = fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, imageHash)
|
||||
@@ -619,12 +616,24 @@ func (cs *LuetCompiler) compileWithImage(image, builderHash string, packageTagHa
|
||||
Debug("Checking if an image is already available")
|
||||
// FIXUP here. If packageimage hash exists and pull is true, generate package
|
||||
resolved := cs.resolveExistingImageHash(packageTagHash, p)
|
||||
Debug("Resolved: " + resolved)
|
||||
Debug("Expected remote: " + resolved)
|
||||
Debug("Package image: " + packageImage)
|
||||
Debug("Resolved builder image: " + builderResolved)
|
||||
|
||||
//
|
||||
if resolved != packageImage && remoteBuildertaggedImage != builderResolved { // an image is there already
|
||||
Debug("Images available for", p.Package.HumanReadableString(), "generating artifact from remote images:", resolved)
|
||||
// a remote image is there already
|
||||
remoteImageAvailable := resolved != packageImage && remoteBuildertaggedImage != builderResolved
|
||||
// or a local one is available
|
||||
localImageAvailable := cs.Backend.ImageExists(remoteBuildertaggedImage) && cs.Backend.ImageExists(packageImage)
|
||||
|
||||
switch {
|
||||
case remoteImageAvailable:
|
||||
Debug("Images available remotely for", p.Package.HumanReadableString(), "generating artifact from remote images:", resolved)
|
||||
return cs.genArtifact(p, backend.Options{ImageName: builderResolved}, backend.Options{ImageName: resolved}, concurrency, keepPermissions)
|
||||
} else {
|
||||
case localImageAvailable:
|
||||
Debug("Images locally available for", p.Package.HumanReadableString(), "generating artifact from image:", resolved)
|
||||
return cs.genArtifact(p, backend.Options{ImageName: remoteBuildertaggedImage}, backend.Options{ImageName: packageImage}, concurrency, keepPermissions)
|
||||
default:
|
||||
Debug("Images not available for", p.Package.HumanReadableString())
|
||||
}
|
||||
}
|
||||
@@ -680,33 +689,7 @@ func (cs *LuetCompiler) FromDatabase(db pkg.PackageDatabase, minimum bool, dst s
|
||||
}
|
||||
}
|
||||
|
||||
// ComputeMinimumCompilableSet strips specs that are eventually compiled by leafs
|
||||
func (cs *LuetCompiler) ComputeMinimumCompilableSet(p ...*compilerspec.LuetCompilationSpec) ([]*compilerspec.LuetCompilationSpec, error) {
|
||||
// Generate a set with all the deps of the provided specs
|
||||
// we will use that set to remove the deps from the list of provided compilation specs
|
||||
allDependencies := solver.PackagesAssertions{} // Get all packages that will be in deps
|
||||
result := []*compilerspec.LuetCompilationSpec{}
|
||||
for _, spec := range p {
|
||||
ass, err := cs.ComputeDepTree(spec)
|
||||
if err != nil {
|
||||
return result, errors.Wrap(err, "computin specs deptree")
|
||||
}
|
||||
|
||||
allDependencies = append(allDependencies, ass.Drop(spec.GetPackage())...)
|
||||
}
|
||||
|
||||
for _, spec := range p {
|
||||
if found := allDependencies.Search(spec.GetPackage().GetFingerPrint()); found == nil {
|
||||
result = append(result, spec)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ComputeDepTree computes the dependency tree of a compilation spec and returns solver assertions
|
||||
// in order to be able to compile the spec.
|
||||
func (cs *LuetCompiler) ComputeDepTree(p *compilerspec.LuetCompilationSpec) (solver.PackagesAssertions, error) {
|
||||
|
||||
s := solver.NewResolver(cs.Options.SolverOptions.Options, pkg.NewInMemoryDatabase(false), cs.Database, pkg.NewInMemoryDatabase(false), cs.Options.SolverOptions.Resolver())
|
||||
|
||||
solution, err := s.Install(pkg.Packages{p.GetPackage()})
|
||||
@@ -718,32 +701,35 @@ func (cs *LuetCompiler) ComputeDepTree(p *compilerspec.LuetCompilationSpec) (sol
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While order a solution for "+p.GetPackage().HumanReadableString())
|
||||
}
|
||||
return dependencies, nil
|
||||
}
|
||||
|
||||
assertions := solver.PackagesAssertions{}
|
||||
for _, assertion := range dependencies { //highly dependent on the order
|
||||
if assertion.Value {
|
||||
nthsolution := dependencies.Cut(assertion.Package)
|
||||
assertion.Hash = solver.PackageHash{
|
||||
BuildHash: nthsolution.HashFrom(assertion.Package),
|
||||
PackageHash: nthsolution.AssertionHash(),
|
||||
}
|
||||
assertion.Package.SetTreeDir(p.Package.GetTreeDir())
|
||||
assertions = append(assertions, assertion)
|
||||
// ComputeMinimumCompilableSet strips specs that are eventually compiled by leafs
|
||||
func (cs *LuetCompiler) ComputeMinimumCompilableSet(p ...*compilerspec.LuetCompilationSpec) ([]*compilerspec.LuetCompilationSpec, error) {
|
||||
// Generate a set with all the deps of the provided specs
|
||||
// we will use that set to remove the deps from the list of provided compilation specs
|
||||
allDependencies := solver.PackagesAssertions{} // Get all packages that will be in deps
|
||||
result := []*compilerspec.LuetCompilationSpec{}
|
||||
for _, spec := range p {
|
||||
sol, err := cs.ComputeDepTree(spec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed querying hashtree")
|
||||
}
|
||||
allDependencies = append(allDependencies, sol.Drop(spec.GetPackage())...)
|
||||
}
|
||||
|
||||
for _, spec := range p {
|
||||
if found := allDependencies.Search(spec.GetPackage().GetFingerPrint()); found == nil {
|
||||
result = append(result, spec)
|
||||
}
|
||||
}
|
||||
p.SetSourceAssertion(assertions)
|
||||
return assertions, nil
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Compile is a non-parallel version of CompileParallel. It builds the compilation specs and generates
|
||||
// an artifact
|
||||
func (cs *LuetCompiler) Compile(keepPermissions bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
asserts, err := cs.ComputeDepTree(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.SetSourceAssertion(asserts)
|
||||
return cs.compile(cs.Options.Concurrency, keepPermissions, p)
|
||||
return cs.compile(cs.Options.Concurrency, keepPermissions, nil, nil, p)
|
||||
}
|
||||
|
||||
func genImageList(refs []string, hash string) []string {
|
||||
@@ -769,17 +755,197 @@ func (cs *LuetCompiler) inheritSpecBuildOptions(p *compilerspec.LuetCompilationS
|
||||
p.BuildOptions.PullImageRepository = append(p.BuildOptions.PullImageRepository, cs.Options.PullImageRepository...)
|
||||
Debug("Inheriting pull repository from PullImageRepository buildoptions", p.BuildOptions.PullImageRepository)
|
||||
}
|
||||
|
||||
Debug(p.GetPackage().HumanReadableString(), "Build options after inherit", p.BuildOptions)
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
// TODO: Racy, remove it
|
||||
// Inherit build options from compilation specs metadata
|
||||
// orig := cs.Options.PullImageRepository
|
||||
// defer func() { cs.Options.PullImageRepository = orig }()
|
||||
func (cs *LuetCompiler) getSpecHash(pkgs pkg.DefaultPackages, salt string) (string, error) {
|
||||
ht := NewHashTree(cs.Database)
|
||||
overallFp := ""
|
||||
for _, p := range pkgs {
|
||||
compileSpec, err := cs.FromPackage(p)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "Error while generating compilespec for "+p.GetName())
|
||||
}
|
||||
packageHashTree, err := ht.Query(cs, compileSpec)
|
||||
if err != nil {
|
||||
return "nil", errors.Wrap(err, "failed querying hashtree")
|
||||
}
|
||||
overallFp = overallFp + packageHashTree.Target.Hash.PackageHash + p.GetFingerPrint()
|
||||
}
|
||||
|
||||
h := md5.New()
|
||||
io.WriteString(h, fmt.Sprintf("%s-%s", overallFp, salt))
|
||||
return fmt.Sprintf("%x", h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) error {
|
||||
|
||||
joinTag := ">:loop: final images<"
|
||||
var fromPackages pkg.DefaultPackages
|
||||
|
||||
if len(p.Join) > 0 {
|
||||
fromPackages = p.Join
|
||||
Warning(joinTag, `
|
||||
Attention! the 'join' keyword is going to be deprecated in Luet >=0.18.x.
|
||||
Use 'requires_final_images: true' instead in the build.yaml file`)
|
||||
} else if p.RequiresFinalImages {
|
||||
Info(joinTag, "Generating a parent image from final packages")
|
||||
fromPackages = p.Package.GetRequires()
|
||||
} else {
|
||||
// No source image to resolve
|
||||
return nil
|
||||
}
|
||||
|
||||
// First compute a hash and check if image is available. if it is, then directly consume that
|
||||
overallFp, err := cs.getSpecHash(fromPackages, "join")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate image hash")
|
||||
}
|
||||
|
||||
Info(joinTag, "Searching existing image with hash", overallFp)
|
||||
|
||||
image := cs.findImageHash(overallFp, p)
|
||||
if image != "" {
|
||||
Info("Image already found", image)
|
||||
p.SetImage(image)
|
||||
return nil
|
||||
}
|
||||
Info(joinTag, "Image not found. Generating image join with hash ", overallFp)
|
||||
|
||||
// Make sure there is an output path
|
||||
if err := os.MkdirAll(p.GetOutputPath(), os.ModePerm); err != nil {
|
||||
return errors.Wrap(err, "while creating output path")
|
||||
}
|
||||
|
||||
// otherwise, generate it and push it aside
|
||||
joinDir, err := ioutil.TempDir(p.GetOutputPath(), "join")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create tempdir for joining images")
|
||||
}
|
||||
defer os.RemoveAll(joinDir) // clean up
|
||||
|
||||
for _, p := range fromPackages {
|
||||
Info(joinTag, ":arrow_right_hook:", p.HumanReadableString(), ":leaves:")
|
||||
}
|
||||
|
||||
current := 0
|
||||
for _, c := range fromPackages {
|
||||
current++
|
||||
if c != nil && c.Name != "" && c.Version != "" {
|
||||
joinTag2 := fmt.Sprintf("%s %d/%d ⤑ :hammer: build %s", joinTag, current, len(p.Join), c.HumanReadableString())
|
||||
|
||||
Info(joinTag2, "compilation starts")
|
||||
spec, err := cs.FromPackage(c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while generating images to join from")
|
||||
}
|
||||
wantsArtifact := true
|
||||
genDepsArtifact := !cs.Options.PackageTargetOnly
|
||||
|
||||
spec.SetOutputPath(p.GetOutputPath())
|
||||
|
||||
artifact, err := cs.compile(concurrency, keepPermissions, &wantsArtifact, &genDepsArtifact, spec)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed building join image")
|
||||
}
|
||||
|
||||
err = artifact.Unpack(joinDir, keepPermissions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed building join image")
|
||||
}
|
||||
Info(joinTag2, ":white_check_mark: Done")
|
||||
}
|
||||
}
|
||||
|
||||
artifactDir, err := ioutil.TempDir(p.GetOutputPath(), "artifact")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create tempdir for final artifact")
|
||||
}
|
||||
defer os.RemoveAll(joinDir) // clean up
|
||||
|
||||
Info(joinTag, ":droplet: generating artifact for source image of", p.GetPackage().HumanReadableString())
|
||||
|
||||
// After unpack, create a new artifact and a new final image from it.
|
||||
// no need to compress, as we are going to toss it away.
|
||||
a := artifact.NewPackageArtifact(filepath.Join(artifactDir, p.GetPackage().GetFingerPrint()+".join.tar"))
|
||||
if err := a.Compress(joinDir, concurrency); err != nil {
|
||||
return errors.Wrap(err, "error met while creating package archive")
|
||||
}
|
||||
|
||||
joinImageName := fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, overallFp)
|
||||
Info(joinTag, ":droplet: generating image from artifact", joinImageName)
|
||||
opts, err := a.GenerateFinalImage(joinImageName, cs.Backend, keepPermissions)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create final image")
|
||||
}
|
||||
if cs.Options.Push {
|
||||
Info(joinTag, ":droplet: pushing image from artifact", joinImageName)
|
||||
if err = cs.Backend.Push(opts); err != nil {
|
||||
return errors.Wrapf(err, "Could not push image: %s %s", image, opts.DockerFileName)
|
||||
}
|
||||
}
|
||||
Info(joinTag, ":droplet: Consuming image", joinImageName)
|
||||
p.SetImage(joinImageName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) resolveMultiStageImages(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) error {
|
||||
resolvedCopyFields := []compilerspec.CopyField{}
|
||||
copyTag := ">:droplet: copy<"
|
||||
|
||||
if len(p.Copy) != 0 {
|
||||
Info(copyTag, "Package has multi-stage copy, generating required images")
|
||||
}
|
||||
|
||||
current := 0
|
||||
// TODO: we should run this only if we are going to build the image
|
||||
for _, c := range p.Copy {
|
||||
current++
|
||||
if c.Package != nil && c.Package.Name != "" && c.Package.Version != "" {
|
||||
copyTag2 := fmt.Sprintf("%s %d/%d ⤑ :hammer: build %s", copyTag, current, len(p.Copy), c.Package.HumanReadableString())
|
||||
|
||||
Info(copyTag2, "generating multi-stage images for", c.Package.HumanReadableString())
|
||||
spec, err := cs.FromPackage(c.Package)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while generating images to copy from")
|
||||
}
|
||||
|
||||
// If we specify --only-target package, we don't want any artifact, otherwise we do
|
||||
genArtifact := !cs.Options.PackageTargetOnly
|
||||
spec.SetOutputPath(p.GetOutputPath())
|
||||
artifact, err := cs.compile(concurrency, keepPermissions, &genArtifact, &genArtifact, spec)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed building multi-stage image")
|
||||
}
|
||||
|
||||
resolvedCopyFields = append(resolvedCopyFields, compilerspec.CopyField{
|
||||
Image: cs.resolveExistingImageHash(artifact.PackageCacheImage, spec),
|
||||
Source: c.Source,
|
||||
Destination: c.Destination,
|
||||
})
|
||||
Info(copyTag2, ":white_check_mark: Done")
|
||||
} else {
|
||||
resolvedCopyFields = append(resolvedCopyFields, c)
|
||||
}
|
||||
}
|
||||
p.Copy = resolvedCopyFields
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateFinalArtifact *bool, generateDependenciesFinalArtifact *bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
Info(":package: Compiling", p.GetPackage().HumanReadableString(), ".... :coffee:")
|
||||
|
||||
//Before multistage : join - same as multistage, but keep artifacts, join them, create a new one and generate a final image.
|
||||
// When the image is there, use it as a source here, in place of GetImage().
|
||||
if err := cs.resolveFinalImages(concurrency, keepPermissions, p); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving join images")
|
||||
}
|
||||
|
||||
if err := cs.resolveMultiStageImages(concurrency, keepPermissions, p); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving multi-stage images")
|
||||
}
|
||||
|
||||
Debug(fmt.Sprintf("%s: has images %t, empty package: %t", p.GetPackage().HumanReadableString(), p.HasImageSource(), p.EmptyPackage()))
|
||||
if !p.HasImageSource() && !p.EmptyPackage() {
|
||||
return nil,
|
||||
@@ -789,40 +955,62 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
)
|
||||
}
|
||||
|
||||
targetAssertion := p.GetSourceAssertion().Search(p.GetPackage().GetFingerPrint())
|
||||
ht := NewHashTree(cs.Database)
|
||||
|
||||
packageHashTree, err := ht.Query(cs, p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed querying hashtree")
|
||||
}
|
||||
|
||||
// This is in order to have the metadata in the yaml
|
||||
p.SetSourceAssertion(packageHashTree.Solution)
|
||||
targetAssertion := packageHashTree.Target
|
||||
|
||||
bus.Manager.Publish(bus.EventPackagePreBuild, struct {
|
||||
CompileSpec *compilerspec.LuetCompilationSpec
|
||||
Assert solver.PackageAssert
|
||||
CompileSpec *compilerspec.LuetCompilationSpec
|
||||
Assert solver.PackageAssert
|
||||
PackageHashTree *PackageImageHashTree
|
||||
}{
|
||||
CompileSpec: p,
|
||||
Assert: *targetAssertion,
|
||||
CompileSpec: p,
|
||||
Assert: *targetAssertion,
|
||||
PackageHashTree: packageHashTree,
|
||||
})
|
||||
|
||||
// Update compilespec build options - it will be then serialized into the compilation metadata file
|
||||
//p.SetBuildOptions(cs.Options)
|
||||
p.BuildOptions.PushImageRepository = cs.Options.PushImageRepository
|
||||
//p.BuildOptions.BuildValues = cs.Options.BuildValues
|
||||
//p.BuildOptions.BuildValuesFile = cs.Options.BuildValuesFile
|
||||
|
||||
// - If image is set we just generate a plain dockerfile
|
||||
// Treat last case (easier) first. The image is provided and we just compute a plain dockerfile with the images listed as above
|
||||
if p.GetImage() != "" {
|
||||
return cs.compileWithImage(p.GetImage(), cs.genBuilderImageTag(p, targetAssertion.Hash.PackageHash), targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, true)
|
||||
localGenerateArtifact := true
|
||||
if generateFinalArtifact != nil {
|
||||
localGenerateArtifact = *generateFinalArtifact
|
||||
}
|
||||
|
||||
a, err := cs.compileWithImage(p.GetImage(), packageHashTree.BuilderImageHash, targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, localGenerateArtifact)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "building direct image")
|
||||
}
|
||||
a.SourceAssertion = p.GetSourceAssertion()
|
||||
|
||||
a.PackageCacheImage = targetAssertion.Hash.PackageHash
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// - If image is not set, we read a base_image. Then we will build one image from it to kick-off our build based
|
||||
// on how we compute the resolvable tree.
|
||||
// This means to recursively build all the build-images needed to reach that tree part.
|
||||
// - We later on compute an hash used to identify the image, so each similar deptree keeps the same build image.
|
||||
|
||||
dependencies := p.GetSourceAssertion().Drop(p.GetPackage()) // at this point we should have a flattened list of deps to build, including all of them (with all constraints propagated already)
|
||||
departifacts := []*artifact.PackageArtifact{} // TODO: Return this somehow
|
||||
var lastHash string
|
||||
dependencies := packageHashTree.Dependencies // at this point we should have a flattened list of deps to build, including all of them (with all constraints propagated already)
|
||||
departifacts := []*artifact.PackageArtifact{} // TODO: Return this somehow
|
||||
depsN := 0
|
||||
currentN := 0
|
||||
|
||||
packageDeps := !cs.Options.PackageTargetOnly
|
||||
if generateDependenciesFinalArtifact != nil {
|
||||
packageDeps = *generateDependenciesFinalArtifact
|
||||
}
|
||||
|
||||
buildDeps := !cs.Options.NoDeps
|
||||
buildTarget := !cs.Options.OnlyDeps
|
||||
|
||||
@@ -845,8 +1033,6 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
Debug("PullImage repos:", compileSpec.BuildOptions.PullImageRepository)
|
||||
|
||||
compileSpec.SetOutputPath(p.GetOutputPath())
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Builder image from hash", assertion.Hash.BuildHash)
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Package image from hash", assertion.Hash.PackageHash)
|
||||
|
||||
bus.Manager.Publish(bus.EventPackagePreBuild, struct {
|
||||
CompileSpec *compilerspec.LuetCompilationSpec
|
||||
@@ -856,29 +1042,53 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
Assert: assertion,
|
||||
})
|
||||
|
||||
lastHash = assertion.Hash.PackageHash
|
||||
// for the source instead, pick an image and a buildertaggedImage from hashes if they exists.
|
||||
// otherways fallback to the pushed repo
|
||||
// Resolve images from the hashtree
|
||||
resolvedBuildImage := cs.resolveExistingImageHash(assertion.Hash.BuildHash, compileSpec)
|
||||
if compileSpec.GetImage() != "" {
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().HumanReadableString()+" from image")
|
||||
|
||||
a, err := cs.compileWithImage(compileSpec.GetImage(), assertion.Hash.BuildHash, assertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, compileSpec, packageDeps)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().HumanReadableString())
|
||||
}
|
||||
departifacts = append(departifacts, a)
|
||||
Info(pkgTag, ":white_check_mark: Done")
|
||||
continue
|
||||
if err := cs.resolveFinalImages(concurrency, keepPermissions, compileSpec); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving join images")
|
||||
}
|
||||
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().HumanReadableString()+" from tree")
|
||||
a, err := cs.compileWithImage(resolvedBuildImage, cs.genBuilderImageTag(compileSpec, targetAssertion.Hash.PackageHash), assertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, compileSpec, packageDeps)
|
||||
if err := cs.resolveMultiStageImages(concurrency, keepPermissions, compileSpec); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving multi-stage images")
|
||||
}
|
||||
|
||||
buildHash, err := packageHashTree.DependencyBuildImage(assertion.Package)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed looking for dependency in hashtree")
|
||||
}
|
||||
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Builder image from hash", assertion.Hash.BuildHash)
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Package image from hash", assertion.Hash.PackageHash)
|
||||
|
||||
var sourceImage string
|
||||
|
||||
if compileSpec.GetImage() != "" {
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().HumanReadableString()+" from image")
|
||||
sourceImage = compileSpec.GetImage()
|
||||
} else {
|
||||
// for the source instead, pick an image and a buildertaggedImage from hashes if they exists.
|
||||
// otherways fallback to the pushed repo
|
||||
// Resolve images from the hashtree
|
||||
sourceImage = cs.resolveExistingImageHash(assertion.Hash.BuildHash, compileSpec)
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().HumanReadableString()+" from tree")
|
||||
}
|
||||
|
||||
a, err := cs.compileWithImage(
|
||||
sourceImage,
|
||||
buildHash,
|
||||
assertion.Hash.PackageHash,
|
||||
concurrency,
|
||||
keepPermissions,
|
||||
cs.Options.KeepImg,
|
||||
compileSpec,
|
||||
packageDeps,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().HumanReadableString())
|
||||
}
|
||||
|
||||
a.PackageCacheImage = assertion.Hash.PackageHash
|
||||
|
||||
Info(pkgTag, ":white_check_mark: Done")
|
||||
|
||||
bus.Manager.Publish(bus.EventPackagePostBuild, struct {
|
||||
CompileSpec *compilerspec.LuetCompilationSpec
|
||||
Artifact *artifact.PackageArtifact
|
||||
@@ -888,24 +1098,24 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
})
|
||||
|
||||
departifacts = append(departifacts, a)
|
||||
Info(pkgTag, ":white_check_mark: Done")
|
||||
}
|
||||
|
||||
} else if len(dependencies) > 0 {
|
||||
lastHash = dependencies[len(dependencies)-1].Hash.PackageHash
|
||||
}
|
||||
|
||||
if buildTarget {
|
||||
resolvedBuildImage := cs.resolveExistingImageHash(lastHash, p)
|
||||
localGenerateArtifact := true
|
||||
if generateFinalArtifact != nil {
|
||||
localGenerateArtifact = *generateFinalArtifact
|
||||
}
|
||||
resolvedSourceImage := cs.resolveExistingImageHash(packageHashTree.SourceHash, p)
|
||||
Info(":rocket: All dependencies are satisfied, building package requested by the user", p.GetPackage().HumanReadableString())
|
||||
Info(":package:", p.GetPackage().HumanReadableString(), " Using image: ", resolvedBuildImage)
|
||||
a, err := cs.compileWithImage(resolvedBuildImage, cs.genBuilderImageTag(p, targetAssertion.Hash.PackageHash), targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, true)
|
||||
Info(":package:", p.GetPackage().HumanReadableString(), " Using image: ", resolvedSourceImage)
|
||||
a, err := cs.compileWithImage(resolvedSourceImage, packageHashTree.BuilderImageHash, targetAssertion.Hash.PackageHash, concurrency, keepPermissions, cs.Options.KeepImg, p, localGenerateArtifact)
|
||||
if err != nil {
|
||||
return a, err
|
||||
}
|
||||
a.Dependencies = departifacts
|
||||
a.SourceAssertion = p.GetSourceAssertion()
|
||||
|
||||
a.PackageCacheImage = targetAssertion.Hash.PackageHash
|
||||
bus.Manager.Publish(bus.EventPackagePostBuild, struct {
|
||||
CompileSpec *compilerspec.LuetCompilationSpec
|
||||
Artifact *artifact.PackageArtifact
|
||||
@@ -923,6 +1133,14 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p *compil
|
||||
type templatedata map[string]interface{}
|
||||
|
||||
func (cs *LuetCompiler) templatePackage(vals []map[string]interface{}, pack pkg.Package, dst templatedata) ([]byte, error) {
|
||||
// Grab shared templates first
|
||||
var chartFiles []*chart.File
|
||||
if len(cs.Options.TemplatesFolder) != 0 {
|
||||
c, err := helpers.ChartFiles(cs.Options.TemplatesFolder)
|
||||
if err == nil {
|
||||
chartFiles = c
|
||||
}
|
||||
}
|
||||
|
||||
var dataresult []byte
|
||||
val := pack.Rel(DefinitionFile)
|
||||
@@ -960,7 +1178,7 @@ func (cs *LuetCompiler) templatePackage(vals []map[string]interface{}, pack pkg.
|
||||
return nil, errors.Wrap(err, "merging values maps")
|
||||
}
|
||||
|
||||
dat, err := helpers.RenderHelm(string(dataBuild), td, dst)
|
||||
dat, err := helpers.RenderHelm(append(chartFiles, helpers.ChartFileB(dataBuild)...), td, dst)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+pack.Rel(BuildFile))
|
||||
}
|
||||
@@ -978,14 +1196,20 @@ func (cs *LuetCompiler) templatePackage(vals []map[string]interface{}, pack pkg.
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "while marshalling values file")
|
||||
}
|
||||
f := filepath.Join(valuesdir, helpers.RandStringRunes(20))
|
||||
f := filepath.Join(valuesdir, fileHelper.RandStringRunes(20))
|
||||
if err := ioutil.WriteFile(f, out, os.ModePerm); err != nil {
|
||||
return nil, errors.Wrap(err, "while writing temporary values file")
|
||||
}
|
||||
bv = append([]string{f}, bv...)
|
||||
}
|
||||
}
|
||||
out, err := helpers.RenderFiles(pack.Rel(BuildFile), val, bv...)
|
||||
|
||||
raw, err := ioutil.ReadFile(pack.Rel(BuildFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := helpers.RenderFiles(append(chartFiles, helpers.ChartFileB(raw)...), val, bv...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rendering file "+pack.Rel(BuildFile))
|
||||
}
|
||||
@@ -1051,6 +1275,11 @@ func (cs *LuetCompiler) FromPackage(p pkg.Package) (*compilerspec.LuetCompilatio
|
||||
|
||||
cs.inheritSpecBuildOptions(newSpec)
|
||||
|
||||
// Update the package in the compiler database to catch updates from NewLuetCompilationSpec
|
||||
if err := cs.Database.UpdatePackage(newSpec.Package); err != nil {
|
||||
return nil, errors.Wrap(err, "failed updating new package entry in compiler database")
|
||||
}
|
||||
|
||||
return newSpec, err
|
||||
}
|
||||
|
||||
|
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -59,15 +60,15 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
@@ -75,6 +76,68 @@ var _ = Describe("Compiler", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Context("Copy and Join", func() {
|
||||
It("Compiles it correctly with Copy", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/copy")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.2"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(spec.Rel("result"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("bina/busybox"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles it correctly with Join", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/join")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.2"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(spec.Rel("newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Simple package build definition", func() {
|
||||
It("Compiles it in parallel", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
@@ -102,7 +165,7 @@ var _ = Describe("Compiler", func() {
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2))
|
||||
Expect(errs).To(BeNil())
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
@@ -165,23 +228,23 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(3))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("c"))
|
||||
content1, err := fileHelper.Read(spec.Rel("c"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("cd"))
|
||||
content2, err := fileHelper.Read(spec.Rel("cd"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("c\n"))
|
||||
Expect(content2).To(Equal("c\n"))
|
||||
|
||||
content1, err = helpers.Read(spec.Rel("d"))
|
||||
content1, err = fileHelper.Read(spec.Rel("d"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err = helpers.Read(spec.Rel("dd"))
|
||||
content2, err = fileHelper.Read(spec.Rel("dd"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("s\n"))
|
||||
Expect(content2).To(Equal("dd\n"))
|
||||
@@ -215,17 +278,17 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts2)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, artifact := range artifacts2 {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("etc/hosts"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test1"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("etc/hosts"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test1"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and includes ony wanted files", func() {
|
||||
@@ -254,12 +317,12 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and excludes files", func() {
|
||||
@@ -288,13 +351,13 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles includes and excludes files", func() {
|
||||
@@ -323,13 +386,13 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and excludes ony wanted files also from unpacked packages", func() {
|
||||
@@ -357,12 +420,12 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles includes and excludes ony wanted files also from unpacked packages", func() {
|
||||
@@ -390,12 +453,12 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and includes ony wanted files also from unpacked packages", func() {
|
||||
@@ -423,16 +486,16 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("var/lib/udhcpd"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test2"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("lib/firmware"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("var/lib/udhcpd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test2"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("lib/firmware"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles a more complex tree", func() {
|
||||
@@ -461,18 +524,18 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("extra-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("base-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("base-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles with provides support", func() {
|
||||
@@ -502,19 +565,19 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("c-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("d"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("dd"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("c"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("cd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("d"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("dd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("c"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("cd"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles with provides and selectors support", func() {
|
||||
@@ -545,19 +608,19 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("c-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("d"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("dd"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("c"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("cd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("d"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("dd"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("c"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("cd"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
})
|
||||
It("Compiles revdeps", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
@@ -585,16 +648,16 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(2))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("extra-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles complex dependencies trees with best matches", func() {
|
||||
@@ -623,17 +686,17 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(6))
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Untar(spec.Rel("vhba-sys-fs-5.4.2-20190410.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("sabayon-build-portage-layer-0.20191126.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("build-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("build-sabayon-overlay-layer-0.20191212.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("build-sabayon-overlays-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("linux-sabayon-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("sabayon-sources-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("vhba"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("sabayon-build-portage-layer-0.20191126.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("build-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("build-sabayon-overlay-layer-0.20191212.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("build-sabayon-overlays-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("linux-sabayon-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("sabayon-sources-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("vhba"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles revdeps with seeds", func() {
|
||||
@@ -658,31 +721,31 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(4))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
// A deps on B, so A artifacts are here:
|
||||
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
|
||||
// B
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("artifact42"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("artifact42"))).To(BeTrue())
|
||||
|
||||
// C depends on B, so B is here
|
||||
content1, err := helpers.Read(spec.Rel("c"))
|
||||
content1, err := fileHelper.Read(spec.Rel("c"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("cd"))
|
||||
content2, err := fileHelper.Read(spec.Rel("cd"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("c\n"))
|
||||
Expect(content2).To(Equal("c\n"))
|
||||
|
||||
// D is here as it requires C, and C was recompiled
|
||||
content1, err = helpers.Read(spec.Rel("d"))
|
||||
content1, err = fileHelper.Read(spec.Rel("d"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err = helpers.Read(spec.Rel("dd"))
|
||||
content2, err = fileHelper.Read(spec.Rel("dd"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("s\n"))
|
||||
Expect(content2).To(Equal("dd\n"))
|
||||
@@ -715,19 +778,19 @@ var _ = Describe("Compiler", func() {
|
||||
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
for _, d := range artifact.Dependencies {
|
||||
Expect(helpers.Exists(d.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(d.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(d.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
}
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
})
|
||||
})
|
||||
@@ -759,8 +822,8 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
Expect(helpers.Untar(spec.Rel("runtime-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -807,13 +870,13 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(0))
|
||||
|
||||
Expect(helpers.Untar(spec.Rel("dironly-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("test1"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test2"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test1"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test2"))).To(BeTrue())
|
||||
|
||||
Expect(helpers.Untar(spec2.Rel("dironly_filter-test-1.0.package.tar"), tmpdir2, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec2.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec2.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec2.Rel("artifact42"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("artifact42"))).ToNot(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -843,12 +906,12 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.package.tar"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(spec.Rel("runtime-layer-0.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("runtime-layer-0.1.package.tar"))).To(BeFalse())
|
||||
Expect(artifacts[0].Unpack(tmpdir, false)).ToNot(HaveOccurred())
|
||||
// Expect(helpers.Untar(spec.Rel("runtime-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -899,7 +962,7 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
|
||||
Expect(artifacts[0].Files).To(ContainElement("bin/busybox"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("runtime-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
art, err := LoadArtifactFromYaml(spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
161
pkg/compiler/imagehashtree.go
Normal file
161
pkg/compiler/imagehashtree.go
Normal file
@@ -0,0 +1,161 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ImageHashTree is holding the Database
|
||||
// and the options to resolve PackageImageHashTrees
|
||||
// for a given specfile
|
||||
// It is responsible of returning a concrete result
|
||||
// which identifies a Package in a HashTree
|
||||
type ImageHashTree struct {
|
||||
Database pkg.PackageDatabase
|
||||
SolverOptions config.LuetSolverOptions
|
||||
}
|
||||
|
||||
// PackageImageHashTree represent the Package into a given image hash tree
|
||||
// The hash tree is constructed by a set of images representing
|
||||
// the package during its build stage. A Hash is assigned to each image
|
||||
// from the package fingerprint, plus the SAT solver assertion result (which is hashed as well)
|
||||
// and the specfile signatures. This guarantees that each image of the build stage
|
||||
// is unique and can be identified later on.
|
||||
type PackageImageHashTree struct {
|
||||
Target *solver.PackageAssert
|
||||
Dependencies solver.PackagesAssertions
|
||||
Solution solver.PackagesAssertions
|
||||
dependencyBuilderImageHashes map[string]string
|
||||
SourceHash string
|
||||
BuilderImageHash string
|
||||
}
|
||||
|
||||
func NewHashTree(db pkg.PackageDatabase) *ImageHashTree {
|
||||
return &ImageHashTree{
|
||||
Database: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *PackageImageHashTree) DependencyBuildImage(p pkg.Package) (string, error) {
|
||||
found, ok := ht.dependencyBuilderImageHashes[p.GetFingerPrint()]
|
||||
if !ok {
|
||||
return "", errors.New("package hash not found")
|
||||
}
|
||||
return found, nil
|
||||
}
|
||||
|
||||
func (ht *PackageImageHashTree) String() string {
|
||||
return fmt.Sprintf(
|
||||
"Target buildhash: %s\nTarget packagehash: %s\nBuilder Imagehash: %s\nSource Imagehash: %s\n",
|
||||
ht.Target.Hash.BuildHash,
|
||||
ht.Target.Hash.PackageHash,
|
||||
ht.BuilderImageHash,
|
||||
ht.SourceHash,
|
||||
)
|
||||
}
|
||||
|
||||
// Query takes a compiler and a compilation spec and returns a PackageImageHashTree tied to it.
|
||||
// PackageImageHashTree contains all the informations to resolve the spec build images in order to
|
||||
// reproducibly re-build images from packages
|
||||
func (ht *ImageHashTree) Query(cs *LuetCompiler, p *compilerspec.LuetCompilationSpec) (*PackageImageHashTree, error) {
|
||||
assertions, err := ht.resolve(cs, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
targetAssertion := assertions.Search(p.GetPackage().GetFingerPrint())
|
||||
|
||||
dependencies := assertions.Drop(p.GetPackage())
|
||||
var sourceHash string
|
||||
imageHashes := map[string]string{}
|
||||
for _, assertion := range dependencies {
|
||||
var depbuildImageTag string
|
||||
compileSpec, err := cs.FromPackage(assertion.Package)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error while generating compilespec for "+assertion.Package.GetName())
|
||||
}
|
||||
if compileSpec.GetImage() != "" {
|
||||
depbuildImageTag = assertion.Hash.BuildHash
|
||||
} else {
|
||||
depbuildImageTag = ht.genBuilderImageTag(compileSpec, targetAssertion.Hash.PackageHash)
|
||||
}
|
||||
imageHashes[assertion.Package.GetFingerPrint()] = depbuildImageTag
|
||||
sourceHash = assertion.Hash.PackageHash
|
||||
}
|
||||
|
||||
return &PackageImageHashTree{
|
||||
Dependencies: dependencies,
|
||||
Target: targetAssertion,
|
||||
SourceHash: sourceHash,
|
||||
BuilderImageHash: ht.genBuilderImageTag(p, targetAssertion.Hash.PackageHash),
|
||||
dependencyBuilderImageHashes: imageHashes,
|
||||
Solution: assertions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ht *ImageHashTree) genBuilderImageTag(p *compilerspec.LuetCompilationSpec, packageImage string) string {
|
||||
// Use packageImage as salt into the fp being used
|
||||
// so the hash is unique also in cases where
|
||||
// some package deps does have completely different
|
||||
// depgraphs
|
||||
return fmt.Sprintf("builder-%s", p.GetPackage().HashFingerprint(packageImage))
|
||||
}
|
||||
|
||||
// resolve computes the dependency tree of a compilation spec and returns solver assertions
|
||||
// in order to be able to compile the spec.
|
||||
func (ht *ImageHashTree) resolve(cs *LuetCompiler, p *compilerspec.LuetCompilationSpec) (solver.PackagesAssertions, error) {
|
||||
dependencies, err := cs.ComputeDepTree(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().HumanReadableString())
|
||||
}
|
||||
|
||||
// Get hash from buildpsecs
|
||||
salts := map[string]string{}
|
||||
for _, assertion := range dependencies { //highly dependent on the order
|
||||
if assertion.Value {
|
||||
spec, err := cs.FromPackage(assertion.Package)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "while computing hash buildspecs")
|
||||
}
|
||||
hash, err := spec.Hash()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed computing hash")
|
||||
}
|
||||
salts[assertion.Package.GetFingerPrint()] = hash
|
||||
}
|
||||
}
|
||||
|
||||
assertions := solver.PackagesAssertions{}
|
||||
for _, assertion := range dependencies { //highly dependent on the order
|
||||
if assertion.Value {
|
||||
nthsolution := dependencies.Cut(assertion.Package)
|
||||
assertion.Hash = solver.PackageHash{
|
||||
BuildHash: nthsolution.SaltedHashFrom(assertion.Package, salts),
|
||||
PackageHash: nthsolution.SaltedAssertionHash(salts),
|
||||
}
|
||||
assertion.Package.SetTreeDir(p.Package.GetTreeDir())
|
||||
assertions = append(assertions, assertion)
|
||||
}
|
||||
}
|
||||
|
||||
return assertions, nil
|
||||
}
|
150
pkg/compiler/imagehashtree_test.go
Normal file
150
pkg/compiler/imagehashtree_test.go
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compiler_test
|
||||
|
||||
import (
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
sd "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("ImageHashTree", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
hashtree := NewHashTree(generalRecipe.GetDatabase())
|
||||
Context("Simple package definition", func() {
|
||||
BeforeEach(func() {
|
||||
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
hashtree = NewHashTree(generalRecipe.GetDatabase())
|
||||
|
||||
})
|
||||
|
||||
It("Calculates the hash correctly", func() {
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
packageHash, err := hashtree.Query(compiler, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(packageHash.Target.Hash.BuildHash).To(Equal("53993e5a02da4c21ad845371c872f5836fe45ff3a4e3c5ccb6296d0faee2b107"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("a786d3fd29d0b8bdfe5f304c8bf8be909d5c764cd7059c0e63294a8bff17f3ef"))
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-0cd3c0d07fc9be568377b3bf1b699e06"))
|
||||
})
|
||||
})
|
||||
|
||||
expectedPackageHash := "0d568ac04c4ca528a4e5b67978f2ad3a75d31d443ab20f9d7683b9608cc0d494"
|
||||
|
||||
Context("complex package definition", func() {
|
||||
BeforeEach(func() {
|
||||
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
hashtree = NewHashTree(generalRecipe.GetDatabase())
|
||||
|
||||
})
|
||||
It("Calculates the hash correctly", func() {
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
packageHash, err := hashtree.Query(compiler, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(expectedPackageHash))
|
||||
Expect(packageHash.SourceHash).To(Equal(expectedPackageHash))
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-0f45c345f59103e84fc8bebbf02f2e2b"))
|
||||
|
||||
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("2e8159583ac825acada763358290cfbea919a33873a926cab84f4f1a67ecf111"))
|
||||
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
|
||||
hash, err := packageHash.DependencyBuildImage(a)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
|
||||
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
|
||||
Expect(assertionA.Hash.PackageHash).To(Equal(expectedPackageHash))
|
||||
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
|
||||
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
|
||||
|
||||
Expect(assertionB.Hash.PackageHash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
hashB, err := packageHash.DependencyBuildImage(b)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hashB).To(Equal("315075265aeb2e3c04c5428d31911f53c194ec9fa3db1421e8478f44b1e0def8"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("complex package definition, with small change in build.yaml", func() {
|
||||
BeforeEach(func() {
|
||||
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
//Definition of A here is slightly changed in the steps build.yaml file (1 character only)
|
||||
err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision_content_changed")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
hashtree = NewHashTree(generalRecipe.GetDatabase())
|
||||
|
||||
})
|
||||
It("Calculates the hash correctly", func() {
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
packageHash, err := hashtree.Query(compiler, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).ToNot(Equal(expectedPackageHash))
|
||||
sourceHash := "66ec001fe72052d0e605ca96f607ae39ea4f8b53f0b7f762e377622d9c654de3"
|
||||
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(sourceHash))
|
||||
Expect(packageHash.SourceHash).To(Equal(sourceHash))
|
||||
|
||||
Expect(packageHash.SourceHash).ToNot(Equal(expectedPackageHash))
|
||||
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-ffc02fd8aaa916d0e17249885b3226b1"))
|
||||
|
||||
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("b9c0286ebf6d28be831926ec7da9cb3cda6b489722d656aefc363ebd7173f937"))
|
||||
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
|
||||
hash, err := packageHash.DependencyBuildImage(a)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
|
||||
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
|
||||
|
||||
Expect(assertionA.Hash.PackageHash).To(Equal("66ec001fe72052d0e605ca96f607ae39ea4f8b53f0b7f762e377622d9c654de3"))
|
||||
Expect(assertionA.Hash.PackageHash).ToNot(Equal(expectedPackageHash))
|
||||
|
||||
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
|
||||
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
|
||||
|
||||
Expect(assertionB.Hash.PackageHash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
hashB, err := packageHash.DependencyBuildImage(b)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hashB).To(Equal("315075265aeb2e3c04c5428d31911f53c194ec9fa3db1421e8478f44b1e0def8"))
|
||||
})
|
||||
})
|
||||
|
||||
})
|
@@ -42,6 +42,7 @@ import (
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
@@ -55,12 +56,14 @@ import (
|
||||
type PackageArtifact struct {
|
||||
Path string `json:"path"`
|
||||
|
||||
Dependencies []*PackageArtifact `json:"dependencies"`
|
||||
CompileSpec *compilerspec.LuetCompilationSpec `json:"compilationspec"`
|
||||
Checksums Checksums `json:"checksums"`
|
||||
SourceAssertion solver.PackagesAssertions `json:"-"`
|
||||
CompressionType compression.Implementation `json:"compressiontype"`
|
||||
Files []string `json:"files"`
|
||||
Dependencies []*PackageArtifact `json:"dependencies"`
|
||||
CompileSpec *compilerspec.LuetCompilationSpec `json:"compilationspec"`
|
||||
Checksums Checksums `json:"checksums"`
|
||||
SourceAssertion solver.PackagesAssertions `json:"-"`
|
||||
CompressionType compression.Implementation `json:"compressiontype"`
|
||||
Files []string `json:"files"`
|
||||
PackageCacheImage string `json:"package_cacheimage"`
|
||||
Runtime *pkg.DefaultPackage `json:"runtime,omitempty"`
|
||||
}
|
||||
|
||||
func (p *PackageArtifact) ShallowCopy() *PackageArtifact {
|
||||
@@ -99,19 +102,23 @@ func (a *PackageArtifact) Verify() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) WriteYaml(dst string) error {
|
||||
func (a *PackageArtifact) WriteYAML(dst string) error {
|
||||
// First compute checksum of artifact. When we write the yaml we want to write up-to-date informations.
|
||||
err := a.Hash()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed generating checksums for artifact")
|
||||
}
|
||||
|
||||
//p := a.CompileSpec.GetPackage().GetPath()
|
||||
// Update runtime package information
|
||||
if a.CompileSpec != nil && a.CompileSpec.Package != nil {
|
||||
runtime, err := a.CompileSpec.Package.GetRuntimePackage()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "getting runtime package for '%s'", a.CompileSpec.Package.HumanReadableString())
|
||||
}
|
||||
Debug(fmt.Sprintf("embedding runtime package (%s) definition to artifact metadata", a.CompileSpec.Package.HumanReadableString()))
|
||||
a.Runtime = runtime
|
||||
}
|
||||
|
||||
//a.CompileSpec.GetPackage().SetPath("")
|
||||
// for _, ass := range a.CompileSpec.GetSourceAssertion() {
|
||||
// ass.Package.SetPath("")
|
||||
// }
|
||||
data, err := yaml.Marshal(a)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
|
||||
@@ -167,7 +174,7 @@ func CreateArtifactForFile(s string, opts ...func(*PackageArtifact)) (*PackageAr
|
||||
}
|
||||
defer os.RemoveAll(archive) // clean up
|
||||
dst := filepath.Join(archive, fileName)
|
||||
if err := helpers.CopyFile(s, dst); err != nil {
|
||||
if err := fileHelper.CopyFile(s, dst); err != nil {
|
||||
return nil, errors.Wrapf(err, "error while copying %s to %s", s, dst)
|
||||
}
|
||||
|
||||
@@ -208,7 +215,7 @@ func (a *PackageArtifact) GenerateFinalImage(imageName string, b ImageBuilder, k
|
||||
return builderOpts, errors.Wrap(err, "error met while uncompressing artifact "+a.Path)
|
||||
}
|
||||
|
||||
empty, err := helpers.DirectoryIsEmpty(uncompressedFiles)
|
||||
empty, err := fileHelper.DirectoryIsEmpty(uncompressedFiles)
|
||||
if err != nil {
|
||||
return builderOpts, errors.Wrap(err, "error met while checking if directory is empty "+uncompressedFiles)
|
||||
}
|
||||
@@ -217,7 +224,7 @@ func (a *PackageArtifact) GenerateFinalImage(imageName string, b ImageBuilder, k
|
||||
// We can't generate FROM scratch empty images. Docker will refuse to export them
|
||||
// workaround: Inject a .virtual empty file
|
||||
if empty {
|
||||
helpers.Touch(filepath.Join(uncompressedFiles, ".virtual"))
|
||||
fileHelper.Touch(filepath.Join(uncompressedFiles, ".virtual"))
|
||||
}
|
||||
|
||||
data := a.genDockerfile()
|
||||
@@ -400,12 +407,12 @@ func tarModifierWrapperFunc(dst, path string, header *tar.Header, content io.Rea
|
||||
// We want to protect file only if the hash of the files are differing OR the file size are
|
||||
differs := (existingHash != "" && existingHash != tarHash) || (err != nil && f != nil && header.Size != f.Size())
|
||||
// Check if exists
|
||||
if helpers.Exists(destPath) && differs {
|
||||
if fileHelper.Exists(destPath) && differs {
|
||||
for i := 1; i < 1000; i++ {
|
||||
name := filepath.Join(filepath.Join(filepath.Dir(path),
|
||||
fmt.Sprintf("._cfg%04d_%s", i, filepath.Base(path))))
|
||||
|
||||
if helpers.Exists(name) {
|
||||
if fileHelper.Exists(name) {
|
||||
continue
|
||||
}
|
||||
Info(fmt.Sprintf("Found protected file %s. Creating %s.", destPath,
|
||||
@@ -451,6 +458,9 @@ func (a *PackageArtifact) GetProtectFiles() []string {
|
||||
|
||||
// Unpack Untar and decompress (TODO) to the given path
|
||||
func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
if !strings.HasPrefix(dst, "/") {
|
||||
return errors.New("destination must be an absolute path")
|
||||
}
|
||||
|
||||
// Create
|
||||
protectedFiles := a.GetProtectFiles()
|
||||
@@ -627,7 +637,7 @@ func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
||||
_, err := os.Lstat(job.Dst)
|
||||
if err != nil {
|
||||
Debug("Copying ", job.Src)
|
||||
if err := helpers.DeepCopyFile(job.Src, job.Dst); err != nil {
|
||||
if err := fileHelper.DeepCopyFile(job.Src, job.Dst); err != nil {
|
||||
Warning("Error copying", job, err)
|
||||
}
|
||||
}
|
||||
|
32
pkg/compiler/types/artifact/artifact_suite_test.go
Normal file
32
pkg/compiler/types/artifact/artifact_suite_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package artifact_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/mudler/luet/cmd"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestArtifact(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
LoadConfig(config.LuetCfg)
|
||||
RunSpecs(t, "Artifact Suite")
|
||||
}
|
@@ -29,6 +29,7 @@ import (
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -41,7 +42,7 @@ var _ = Describe("Artifact", func() {
|
||||
|
||||
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildtree")
|
||||
err := generalRecipe.Load("../../../../tests/fixtures/buildtree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
@@ -71,7 +72,7 @@ var _ = Describe("Artifact", func() {
|
||||
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -89,12 +90,12 @@ ENV PACKAGE_CATEGORY=app-admin`))
|
||||
}
|
||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "LuetDockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM luet/base
|
||||
@@ -113,7 +114,7 @@ RUN echo bar > /test2`))
|
||||
}
|
||||
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
diffs, err := compiler.GenerateChanges(b, opts, opts2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -140,15 +141,15 @@ RUN echo bar > /test2`))
|
||||
|
||||
a, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, compression.None)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue())
|
||||
err = helpers.Untar(a.Path, unpacked, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(unpacked, "test"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(unpacked, "test2"))).To(BeTrue())
|
||||
content1, err := helpers.Read(filepath.Join(unpacked, "test"))
|
||||
Expect(fileHelper.Exists(filepath.Join(unpacked, "test"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(unpacked, "test2"))).To(BeTrue())
|
||||
content1, err := fileHelper.Read(filepath.Join(unpacked, "test"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("foo\n"))
|
||||
content2, err := helpers.Read(filepath.Join(unpacked, "test2"))
|
||||
content2, err := fileHelper.Read(filepath.Join(unpacked, "test2"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content2).To(Equal("bar\n"))
|
||||
|
||||
@@ -156,7 +157,7 @@ RUN echo bar > /test2`))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = a.Verify()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.CopyFile(filepath.Join(tmpdir, "output2.tar"), filepath.Join(tmpdir, "package.tar"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.CopyFile(filepath.Join(tmpdir, "output2.tar"), filepath.Join(tmpdir, "package.tar"))).ToNot(HaveOccurred())
|
||||
|
||||
err = a.Verify()
|
||||
Expect(err).To(HaveOccurred())
|
||||
@@ -244,7 +245,7 @@ RUN echo bar > /test2`))
|
||||
err = b.ExtractRootfs(backend.Options{ImageName: resultingImage, Destination: result}, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.DirectoryIsEmpty(result)).To(BeFalse())
|
||||
Expect(fileHelper.DirectoryIsEmpty(result)).To(BeFalse())
|
||||
content, err := ioutil.ReadFile(filepath.Join(result, ".virtual"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
|
@@ -40,13 +40,13 @@ var _ = Describe("Checksum", func() {
|
||||
Expect(len(definitionsum)).To(Equal(0))
|
||||
Expect(len(definitionsum2)).To(Equal(0))
|
||||
|
||||
err = buildsum.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/build.yaml"))
|
||||
err = buildsum.Generate(NewPackageArtifact("../../../../tests/fixtures/layers/alpine/build.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = definitionsum.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/definition.yaml"))
|
||||
err = definitionsum.Generate(NewPackageArtifact("../../../../tests/fixtures/layers/alpine/definition.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = definitionsum2.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/definition.yaml"))
|
||||
err = definitionsum2.Generate(NewPackageArtifact("../../../../tests/fixtures/layers/alpine/definition.yaml"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(buildsum)).To(Equal(1))
|
||||
|
@@ -43,6 +43,9 @@ type Compiler struct {
|
||||
BackendArgs []string
|
||||
|
||||
BackendType string
|
||||
|
||||
// TemplatesFolder. should default to tree/templates
|
||||
TemplatesFolder []string
|
||||
}
|
||||
|
||||
func NewDefaultCompiler() *Compiler {
|
||||
@@ -87,6 +90,13 @@ func WithBackendType(r string) func(cfg *Compiler) error {
|
||||
}
|
||||
}
|
||||
|
||||
func WithTemplateFolder(r []string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.TemplatesFolder = r
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithBuildValues(r []string) func(cfg *Compiler) error {
|
||||
return func(cfg *Compiler) error {
|
||||
cfg.BuildValuesFile = r
|
||||
|
@@ -16,15 +16,18 @@
|
||||
package compilerspec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mitchellh/hashstructure/v2"
|
||||
options "github.com/mudler/luet/pkg/compiler/types/options"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/otiai10/copy"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
dirhash "golang.org/x/mod/sumdb/dirhash"
|
||||
)
|
||||
|
||||
type LuetCompilationspecs []LuetCompilationSpec
|
||||
@@ -85,6 +88,13 @@ func (specs *LuetCompilationspecs) Unique() *LuetCompilationspecs {
|
||||
return &newSpecs
|
||||
}
|
||||
|
||||
type CopyField struct {
|
||||
Package *pkg.DefaultPackage `json:"package"`
|
||||
Image string `json:"image"`
|
||||
Source string `json:"source"`
|
||||
Destination string `json:"destination"`
|
||||
}
|
||||
|
||||
type LuetCompilationSpec struct {
|
||||
Steps []string `json:"steps"` // Are run inside a container and the result layer diff is saved
|
||||
Env []string `json:"env"`
|
||||
@@ -103,15 +113,75 @@ type LuetCompilationSpec struct {
|
||||
Excludes []string `json:"excludes"`
|
||||
|
||||
BuildOptions *options.Compiler `json:"build_options"`
|
||||
|
||||
Copy []CopyField `json:"copy"`
|
||||
|
||||
Join pkg.DefaultPackages `json:"join"`
|
||||
RequiresFinalImages bool `json:"requires_final_images" yaml:"requires_final_images"`
|
||||
}
|
||||
|
||||
// Signature is a portion of the spec that yields a signature for the hash
|
||||
type Signature struct {
|
||||
Image string
|
||||
Steps []string
|
||||
PackageDir string
|
||||
Prelude []string
|
||||
Seed string
|
||||
Env []string
|
||||
Retrieve []string
|
||||
Unpack bool
|
||||
Includes []string
|
||||
Excludes []string
|
||||
Copy []CopyField
|
||||
Join pkg.DefaultPackages
|
||||
RequiresFinalImages bool
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) signature() Signature {
|
||||
return Signature{
|
||||
Image: cs.Image,
|
||||
Steps: cs.Steps,
|
||||
PackageDir: cs.PackageDir,
|
||||
Prelude: cs.Prelude,
|
||||
Seed: cs.Seed,
|
||||
Env: cs.Env,
|
||||
Retrieve: cs.Retrieve,
|
||||
Unpack: cs.Unpack,
|
||||
Includes: cs.Includes,
|
||||
Excludes: cs.Excludes,
|
||||
Copy: cs.Copy,
|
||||
Join: cs.Join,
|
||||
RequiresFinalImages: cs.RequiresFinalImages,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLuetCompilationSpec(b []byte, p pkg.Package) (*LuetCompilationSpec, error) {
|
||||
var spec LuetCompilationSpec
|
||||
var packageDefinition pkg.DefaultPackage
|
||||
err := yaml.Unmarshal(b, &spec)
|
||||
if err != nil {
|
||||
return &spec, err
|
||||
}
|
||||
spec.Package = p.(*pkg.DefaultPackage)
|
||||
err = yaml.Unmarshal(b, &packageDefinition)
|
||||
if err != nil {
|
||||
return &spec, err
|
||||
}
|
||||
|
||||
// Update requires/conflict/provides
|
||||
// When we have been passed a bytes slice, parse it as a package
|
||||
// and updates requires/conflicts/provides.
|
||||
// This is required in order to allow manipulation of such fields with templating
|
||||
copy := *p.(*pkg.DefaultPackage)
|
||||
spec.Package = ©
|
||||
if len(packageDefinition.GetRequires()) != 0 {
|
||||
spec.Package.Requires(packageDefinition.GetRequires())
|
||||
}
|
||||
if len(packageDefinition.GetConflicts()) != 0 {
|
||||
spec.Package.Conflicts(packageDefinition.GetConflicts())
|
||||
}
|
||||
if len(packageDefinition.GetProvides()) != 0 {
|
||||
spec.Package.SetProvides(packageDefinition.GetProvides())
|
||||
}
|
||||
return &spec, nil
|
||||
}
|
||||
func (cs *LuetCompilationSpec) GetSourceAssertion() solver.PackagesAssertions {
|
||||
@@ -196,7 +266,7 @@ func (cs *LuetCompilationSpec) SetSeedImage(s string) {
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) EmptyPackage() bool {
|
||||
return len(cs.BuildSteps()) == 0 && len(cs.GetPreBuildSteps()) == 0 && !cs.UnpackedPackage()
|
||||
return len(cs.BuildSteps()) == 0 && !cs.UnpackedPackage()
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) UnpackedPackage() bool {
|
||||
@@ -213,7 +283,21 @@ func (cs *LuetCompilationSpec) UnpackedPackage() bool {
|
||||
// a compilation spec has an image source when it depends on other packages or have a source image
|
||||
// explictly supplied
|
||||
func (cs *LuetCompilationSpec) HasImageSource() bool {
|
||||
return (cs.Package != nil && len(cs.GetPackage().GetRequires()) != 0) || cs.GetImage() != ""
|
||||
return (cs.Package != nil && len(cs.GetPackage().GetRequires()) != 0) || cs.GetImage() != "" || len(cs.Join) != 0
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) Hash() (string, error) {
|
||||
// build a signature, we want to be part of the hash only the fields that are relevant for build purposes
|
||||
signature := cs.signature()
|
||||
h, err := hashstructure.Hash(signature, hashstructure.FormatV2, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sum, err := dirhash.HashDir(cs.Package.Path, "", dirhash.DefaultHash)
|
||||
if err != nil {
|
||||
return fmt.Sprint(h), err
|
||||
}
|
||||
return fmt.Sprint(h, sum), err
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) CopyRetrieves(dest string) error {
|
||||
@@ -256,6 +340,13 @@ ADD ` + s + ` /luetbuild/`
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range cs.Copy {
|
||||
if c.Image != "" {
|
||||
copyLine := fmt.Sprintf("\nCOPY --from=%s %s %s\n", c.Image, c.Source, c.Destination)
|
||||
spec = spec + copyLine
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range cs.Env {
|
||||
spec = spec + `
|
||||
ENV ` + s
|
||||
|
32
pkg/compiler/types/spec/spec_suite_test.go
Normal file
32
pkg/compiler/types/spec/spec_suite_test.go
Normal file
@@ -0,0 +1,32 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package compilerspec_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/mudler/luet/cmd"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestSpec(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
LoadConfig(config.LuetCfg)
|
||||
RunSpecs(t, "Spec Suite")
|
||||
}
|
@@ -20,10 +20,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
options "github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
. "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -74,11 +75,67 @@ var _ = Describe("Spec", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Context("Image hashing", func() {
|
||||
It("is stable", func() {
|
||||
spec1 := &compilerspec.LuetCompilationSpec{
|
||||
Image: "foo",
|
||||
BuildOptions: &options.Compiler{BuildValues: []map[string]interface{}{{"foo": "bar", "baz": true}}},
|
||||
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "Bar",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
spec2 := &compilerspec.LuetCompilationSpec{
|
||||
Image: "foo",
|
||||
BuildOptions: &options.Compiler{BuildValues: []map[string]interface{}{{"foo": "bar", "baz": true}}},
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "Bar",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
spec3 := &compilerspec.LuetCompilationSpec{
|
||||
Image: "foo",
|
||||
Steps: []string{"foo"},
|
||||
Package: &pkg.DefaultPackage{
|
||||
Name: "foo",
|
||||
Category: "Bar",
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
hash, err := spec1.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
hash2, err := spec2.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
hash3, err := spec3.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hash).To(Equal(hash2))
|
||||
hashagain, err := spec2.Hash()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(hash).ToNot(Equal(hash3))
|
||||
Expect(hash).To(Equal(hashagain))
|
||||
})
|
||||
})
|
||||
|
||||
Context("Simple package build definition", func() {
|
||||
It("Loads it correctly", func() {
|
||||
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildtree")
|
||||
err := generalRecipe.Load("../../../../tests/fixtures/buildtree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
@@ -97,7 +154,7 @@ var _ = Describe("Spec", func() {
|
||||
lspec.Env = []string{"test=1"}
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -110,7 +167,7 @@ ENV test=1`))
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM luet/base
|
||||
@@ -130,7 +187,7 @@ RUN echo bar > /test2`))
|
||||
It("Renders retrieve and env fields", func() {
|
||||
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/retrieve")
|
||||
err := generalRecipe.Load("../../../../tests/fixtures/retrieve")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
@@ -148,7 +205,7 @@ RUN echo bar > /test2`))
|
||||
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -165,7 +222,7 @@ ENV test=1`))
|
||||
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM alpine
|
||||
@@ -180,7 +237,7 @@ ENV test=1`))
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(dockerfile).To(Equal(`
|
||||
|
@@ -17,7 +17,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -27,11 +26,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
solver "github.com/mudler/luet/pkg/solver"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
v "github.com/spf13/viper"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var LuetCfg = NewLuetConfig(v.GetViper())
|
||||
@@ -55,22 +56,22 @@ type LuetLoggingConfig struct {
|
||||
}
|
||||
|
||||
type LuetGeneralConfig struct {
|
||||
SameOwner bool `mapstructure:"same_owner"`
|
||||
Concurrency int `mapstructure:"concurrency"`
|
||||
Debug bool `mapstructure:"debug"`
|
||||
ShowBuildOutput bool `mapstructure:"show_build_output"`
|
||||
SpinnerMs int `mapstructure:"spinner_ms"`
|
||||
SpinnerCharset int `mapstructure:"spinner_charset"`
|
||||
FatalWarns bool `mapstructure:"fatal_warnings"`
|
||||
SameOwner bool `yaml:"same_owner,omitempty" mapstructure:"same_owner"`
|
||||
Concurrency int `yaml:"concurrency,omitempty" mapstructure:"concurrency"`
|
||||
Debug bool `yaml:"debug,omitempty" mapstructure:"debug"`
|
||||
ShowBuildOutput bool `yaml:"show_build_output,omitempty" mapstructure:"show_build_output"`
|
||||
SpinnerMs int `yaml:"spinner_ms,omitempty" mapstructure:"spinner_ms"`
|
||||
SpinnerCharset int `yaml:"spinner_charset,omitempty" mapstructure:"spinner_charset"`
|
||||
FatalWarns bool `yaml:"fatal_warnings,omitempty" mapstructure:"fatal_warnings"`
|
||||
}
|
||||
|
||||
type LuetSolverOptions struct {
|
||||
solver.Options
|
||||
Type string `mapstructure:"type"`
|
||||
LearnRate float32 `mapstructure:"rate"`
|
||||
Discount float32 `mapstructure:"discount"`
|
||||
MaxAttempts int `mapstructure:"max_attempts"`
|
||||
Implementation solver.SolverType `mapstructure:"implementation"`
|
||||
solver.Options `yaml:"options,omitempty"`
|
||||
Type string `yaml:"type,omitempty" mapstructure:"type"`
|
||||
LearnRate float32 `yaml:"rate,omitempty" mapstructure:"rate"`
|
||||
Discount float32 `yaml:"discount,omitempty" mapstructure:"discount"`
|
||||
MaxAttempts int `yaml:"max_attempts,omitempty" mapstructure:"max_attempts"`
|
||||
Implementation solver.SolverType `yaml:"implementation,omitempty" mapstructure:"implementation"`
|
||||
}
|
||||
|
||||
func (opts LuetSolverOptions) ResolverIsSet() bool {
|
||||
@@ -92,7 +93,7 @@ func (opts LuetSolverOptions) Resolver() solver.PackageResolver {
|
||||
return solver.SimpleQLearningSolver()
|
||||
}
|
||||
|
||||
return &solver.DummyPackageResolver{}
|
||||
return &solver.Explainer{}
|
||||
}
|
||||
|
||||
func (opts *LuetSolverOptions) CompactString() string {
|
||||
@@ -108,6 +109,16 @@ type LuetSystemConfig struct {
|
||||
TmpDirBase string `yaml:"tmpdir_base" mapstructure:"tmpdir_base"`
|
||||
}
|
||||
|
||||
func (s *LuetSystemConfig) SetRootFS(path string) error {
|
||||
p, err := fileHelper.Rel2Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.Rootfs = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *LuetSystemConfig) GetRepoDatabaseDirPath(name string) string {
|
||||
dbpath := filepath.Join(sc.Rootfs, sc.DatabasePath)
|
||||
dbpath = filepath.Join(dbpath, "repos/"+name)
|
||||
@@ -167,9 +178,9 @@ type LuetRepository struct {
|
||||
// Serialized options not used in repository configuration
|
||||
|
||||
// Incremented value that identify revision of the repository in a user-friendly way.
|
||||
Revision int `json:"revision,omitempty" yaml:"-,omitempty" mapstructure:"-,omitempty"`
|
||||
Revision int `json:"revision,omitempty" yaml:"-" mapstructure:"-"`
|
||||
// Epoch time in seconds
|
||||
LastUpdate string `json:"last_update,omitempty" yaml:"-,omitempty" mapstructure:"-,omitempty"`
|
||||
LastUpdate string `json:"last_update,omitempty" yaml:"-" mapstructure:"-"`
|
||||
}
|
||||
|
||||
func NewLuetRepository(name, t, descr string, urls []string, priority int, enable, cached bool) *LuetRepository {
|
||||
@@ -209,22 +220,29 @@ func (r *LuetRepository) String() string {
|
||||
r.Name, r.Priority, r.Type, r.Enable, r.Cached)
|
||||
}
|
||||
|
||||
type LuetKV struct {
|
||||
Key string `json:"key" yaml:"key" mapstructure:"key"`
|
||||
Value string `json:"value" yaml:"value" mapstructure:"value"`
|
||||
}
|
||||
|
||||
type LuetConfig struct {
|
||||
Viper *v.Viper
|
||||
Viper *v.Viper `yaml:"-"`
|
||||
|
||||
Logging LuetLoggingConfig `mapstructure:"logging"`
|
||||
General LuetGeneralConfig `mapstructure:"general"`
|
||||
System LuetSystemConfig `mapstructure:"system"`
|
||||
Solver LuetSolverOptions `mapstructure:"solver"`
|
||||
Logging LuetLoggingConfig `yaml:"logging,omitempty" mapstructure:"logging"`
|
||||
General LuetGeneralConfig `yaml:"general,omitempty" mapstructure:"general"`
|
||||
System LuetSystemConfig `yaml:"system" mapstructure:"system"`
|
||||
Solver LuetSolverOptions `yaml:"solver,omitempty" mapstructure:"solver"`
|
||||
|
||||
RepositoriesConfDir []string `mapstructure:"repos_confdir"`
|
||||
ConfigProtectConfDir []string `mapstructure:"config_protect_confdir"`
|
||||
ConfigProtectSkip bool `mapstructure:"config_protect_skip"`
|
||||
ConfigFromHost bool `mapstructure:"config_from_host"`
|
||||
CacheRepositories []LuetRepository `mapstructure:"repetitors"`
|
||||
SystemRepositories []LuetRepository `mapstructure:"repositories"`
|
||||
RepositoriesConfDir []string `yaml:"repos_confdir,omitempty" mapstructure:"repos_confdir"`
|
||||
ConfigProtectConfDir []string `yaml:"config_protect_confdir,omitempty" mapstructure:"config_protect_confdir"`
|
||||
ConfigProtectSkip bool `yaml:"config_protect_skip,omitempty" mapstructure:"config_protect_skip"`
|
||||
ConfigFromHost bool `yaml:"config_from_host,omitempty" mapstructure:"config_from_host"`
|
||||
CacheRepositories []LuetRepository `yaml:"repetitors,omitempty" mapstructure:"repetitors"`
|
||||
SystemRepositories []LuetRepository `yaml:"repositories,omitempty" mapstructure:"repositories"`
|
||||
|
||||
ConfigProtectConfFiles []ConfigProtectConfFile
|
||||
FinalizerEnvs []LuetKV `json:"finalizer_envs,omitempty" yaml:"finalizer_envs,omitempty" mapstructure:"finalizer_envs,omitempty"`
|
||||
|
||||
ConfigProtectConfFiles []ConfigProtectConfFile `yaml:"-" mapstructure:"-"`
|
||||
}
|
||||
|
||||
func NewLuetConfig(viper *v.Viper) *LuetConfig {
|
||||
@@ -272,6 +290,7 @@ func GenDefault(viper *v.Viper) {
|
||||
viper.SetDefault("config_from_host", true)
|
||||
viper.SetDefault("cache_repositories", []string{})
|
||||
viper.SetDefault("system_repositories", []string{})
|
||||
viper.SetDefault("finalizer_envs", make(map[string]string, 0))
|
||||
|
||||
viper.SetDefault("solver.type", "")
|
||||
viper.SetDefault("solver.rate", 0.7)
|
||||
@@ -293,6 +312,58 @@ func (c *LuetConfig) AddSystemRepository(r LuetRepository) {
|
||||
c.SystemRepositories = append(c.SystemRepositories, r)
|
||||
}
|
||||
|
||||
func (c *LuetConfig) GetFinalizerEnvsMap() map[string]string {
|
||||
ans := make(map[string]string, 0)
|
||||
|
||||
for _, kv := range c.FinalizerEnvs {
|
||||
ans[kv.Key] = kv.Value
|
||||
}
|
||||
return ans
|
||||
}
|
||||
|
||||
func (c *LuetConfig) SetFinalizerEnv(k, v string) {
|
||||
keyPresent := false
|
||||
envs := []LuetKV{}
|
||||
|
||||
for _, kv := range c.FinalizerEnvs {
|
||||
if kv.Key == k {
|
||||
keyPresent = true
|
||||
envs = append(envs, LuetKV{Key: kv.Key, Value: v})
|
||||
} else {
|
||||
envs = append(envs, kv)
|
||||
}
|
||||
}
|
||||
if !keyPresent {
|
||||
envs = append(envs, LuetKV{Key: k, Value: v})
|
||||
}
|
||||
|
||||
c.FinalizerEnvs = envs
|
||||
}
|
||||
|
||||
func (c *LuetConfig) GetFinalizerEnvs() []string {
|
||||
ans := []string{}
|
||||
for _, kv := range c.FinalizerEnvs {
|
||||
ans = append(ans, fmt.Sprintf("%s=%s", kv.Key, kv.Value))
|
||||
}
|
||||
return ans
|
||||
}
|
||||
|
||||
func (c *LuetConfig) GetFinalizerEnv(k string) (string, error) {
|
||||
keyNotPresent := true
|
||||
ans := ""
|
||||
for _, kv := range c.FinalizerEnvs {
|
||||
if kv.Key == k {
|
||||
keyNotPresent = false
|
||||
ans = kv.Value
|
||||
}
|
||||
}
|
||||
|
||||
if keyNotPresent {
|
||||
return "", errors.New("Finalizer key " + k + " not found")
|
||||
}
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
func (c *LuetConfig) GetLogging() *LuetLoggingConfig {
|
||||
return &c.Logging
|
||||
}
|
||||
@@ -309,6 +380,10 @@ func (c *LuetConfig) GetSolverOptions() *LuetSolverOptions {
|
||||
return &c.Solver
|
||||
}
|
||||
|
||||
func (c *LuetConfig) YAML() ([]byte, error) {
|
||||
return yaml.Marshal(c)
|
||||
}
|
||||
|
||||
func (c *LuetConfig) GetConfigProtectConfFiles() []ConfigProtectConfFile {
|
||||
return c.ConfigProtectConfFiles
|
||||
}
|
||||
@@ -337,34 +412,6 @@ func (c *LuetConfig) GetSystemRepository(name string) (*LuetRepository, error) {
|
||||
return ans, nil
|
||||
}
|
||||
|
||||
func (c *LuetSolverOptions) String() string {
|
||||
ans := fmt.Sprintf(`
|
||||
solver:
|
||||
type: %s
|
||||
rate: %f
|
||||
discount: %f
|
||||
max_attempts: %d`, c.Type, c.LearnRate, c.Discount,
|
||||
c.MaxAttempts)
|
||||
|
||||
return ans
|
||||
}
|
||||
|
||||
func (c *LuetGeneralConfig) String() string {
|
||||
ans := fmt.Sprintf(`
|
||||
general:
|
||||
concurrency: %d
|
||||
same_owner: %t
|
||||
debug: %t
|
||||
fatal_warnings: %t
|
||||
show_build_output: %t
|
||||
spinner_ms: %d
|
||||
spinner_charset: %d`, c.Concurrency, c.SameOwner, c.Debug,
|
||||
c.FatalWarns, c.ShowBuildOutput,
|
||||
c.SpinnerMs, c.SpinnerCharset)
|
||||
|
||||
return ans
|
||||
}
|
||||
|
||||
func (c *LuetGeneralConfig) GetSpinnerMs() time.Duration {
|
||||
duration, err := time.ParseDuration(fmt.Sprintf("%dms", c.SpinnerMs))
|
||||
if err != nil {
|
||||
@@ -377,37 +424,22 @@ func (c *LuetLoggingConfig) SetLogLevel(s string) {
|
||||
c.Level = s
|
||||
}
|
||||
|
||||
func (c *LuetLoggingConfig) String() string {
|
||||
ans := fmt.Sprintf(`
|
||||
logging:
|
||||
enable_logfile: %t
|
||||
path: %s
|
||||
json_format: %t
|
||||
color: %t
|
||||
enable_emoji: %t
|
||||
level: %s`, c.EnableLogFile, c.Path, c.JsonFormat,
|
||||
c.Color, c.EnableEmoji, c.Level)
|
||||
|
||||
return ans
|
||||
}
|
||||
|
||||
func (c *LuetSystemConfig) String() string {
|
||||
ans := fmt.Sprintf(`
|
||||
system:
|
||||
database_engine: %s
|
||||
database_path: %s
|
||||
pkgs_cache_path: %s
|
||||
tmpdir_base: %s
|
||||
rootfs: %s`,
|
||||
c.DatabaseEngine, c.DatabasePath, c.PkgsCachePath,
|
||||
c.TmpDirBase, c.Rootfs)
|
||||
|
||||
return ans
|
||||
}
|
||||
|
||||
func (c *LuetSystemConfig) InitTmpDir() error {
|
||||
if !helpers.Exists(c.TmpDirBase) {
|
||||
return os.MkdirAll(c.TmpDirBase, os.ModePerm)
|
||||
if !filepath.IsAbs(c.TmpDirBase) {
|
||||
abs, err := fileHelper.Rel2Abs(c.TmpDirBase)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while converting relative path to absolute path")
|
||||
}
|
||||
c.TmpDirBase = abs
|
||||
}
|
||||
|
||||
if _, err := os.Stat(c.TmpDirBase); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = os.MkdirAll(c.TmpDirBase, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -38,7 +38,7 @@ var _ = Describe("Config", func() {
|
||||
tmpDir, err := config.LuetCfg.GetSystem().TempDir("test1")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(strings.HasPrefix(tmpDir, filepath.Join(os.TempDir(), "tmpluet"))).To(BeTrue())
|
||||
Expect(helpers.Exists(tmpDir)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(tmpDir)).To(BeTrue())
|
||||
|
||||
defer os.RemoveAll(tmpDir)
|
||||
})
|
||||
@@ -49,7 +49,7 @@ var _ = Describe("Config", func() {
|
||||
tmpFile, err := config.LuetCfg.GetSystem().TempFile("testfile1")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(strings.HasPrefix(tmpFile.Name(), filepath.Join(os.TempDir(), "tmpluet"))).To(BeTrue())
|
||||
Expect(helpers.Exists(tmpFile.Name())).To(BeTrue())
|
||||
Expect(fileHelper.Exists(tmpFile.Name())).To(BeTrue())
|
||||
|
||||
defer os.Remove(tmpFile.Name())
|
||||
})
|
||||
|
@@ -25,6 +25,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
. "github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -128,7 +130,7 @@ var _ = Describe("Helpers Archive", func() {
|
||||
err = archive.Untar(replacerArchive, targetDir, opts)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(Exists(filepath.Join(targetDir, "._cfg0001_file-0"))).Should(Equal(true))
|
||||
Expect(fileHelper.Exists(filepath.Join(targetDir, "._cfg0001_file-0"))).Should(Equal(true))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -1,129 +0,0 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/mudler/luet/pkg/helpers/imgworker"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// See also https://github.com/docker/cli/blob/88c6089300a82d3373892adf6845a4fed1a4ba8d/cli/command/image/trust.go#L171
|
||||
|
||||
func verifyImage(image string, authConfig *types.AuthConfig) (string, error) {
|
||||
ref, err := reference.ParseAnyReference(image)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "invalid reference %s", image)
|
||||
}
|
||||
|
||||
// only check if image ref doesn't contain hashes
|
||||
if _, ok := ref.(reference.Digested); !ok {
|
||||
namedRef, ok := ref.(reference.Named)
|
||||
if !ok {
|
||||
return "", errors.New("failed to resolve image digest using content trust: reference is not named")
|
||||
}
|
||||
namedRef = reference.TagNameOnly(namedRef)
|
||||
taggedRef, ok := namedRef.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return "", errors.New("failed to resolve image digest using content trust: reference is not tagged")
|
||||
}
|
||||
|
||||
resolvedImage, err := trustedResolveDigest(context.Background(), taggedRef, authConfig, "luet")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to resolve image digest using content trust")
|
||||
}
|
||||
resolvedFamiliar := reference.FamiliarString(resolvedImage)
|
||||
return resolvedFamiliar, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func trustedResolveDigest(ctx context.Context, ref reference.NamedTagged, authConfig *types.AuthConfig, useragent string) (reference.Canonical, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
notaryRepo, err := trust.GetNotaryRepository(os.Stdin, os.Stdout, useragent, repoInfo, authConfig, "pull")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
// Only get the tag if it's in the top level targets role or the releases delegation role
|
||||
// ignore it if it's in any other delegation roles
|
||||
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", reference.FamiliarString(ref)))
|
||||
}
|
||||
|
||||
h, ok := t.Hashes["sha256"]
|
||||
if !ok {
|
||||
return nil, errors.New("no valid hash, expecting sha256")
|
||||
}
|
||||
|
||||
dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h))
|
||||
|
||||
// Allow returning canonical reference with tag
|
||||
return reference.WithDigest(ref, dgst)
|
||||
}
|
||||
|
||||
// DownloadAndExtractDockerImage is a re-adaption
|
||||
// from genuinetools/img https://github.com/genuinetools/img/blob/54d0ca981c1260546d43961a538550eef55c87cf/pull.go
|
||||
func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
|
||||
|
||||
if verify {
|
||||
img, err := verifyImage(image, auth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed verifying image")
|
||||
}
|
||||
image = img
|
||||
}
|
||||
|
||||
defer os.RemoveAll(temp)
|
||||
c, err := imgworker.New(temp, auth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed creating client")
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
listedImage, err := c.Pull(image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed listing images")
|
||||
|
||||
}
|
||||
|
||||
os.RemoveAll(dest)
|
||||
err = c.Unpack(image, dest)
|
||||
return listedImage, err
|
||||
}
|
||||
|
||||
func StripInvalidStringsFromImage(s string) string {
|
||||
return strings.ReplaceAll(s, "+", "-")
|
||||
}
|
268
pkg/helpers/docker/docker.go
Normal file
268
pkg/helpers/docker/docker.go
Normal file
@@ -0,0 +1,268 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
"github.com/mudler/luet/pkg/helpers/imgworker"
|
||||
|
||||
continerdarchive "github.com/containerd/containerd/archive"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
"github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/theupdateframework/notary/tuf/data"
|
||||
)
|
||||
|
||||
// See also https://github.com/docker/cli/blob/88c6089300a82d3373892adf6845a4fed1a4ba8d/cli/command/image/trust.go#L171
|
||||
|
||||
func verifyImage(image string, authConfig *types.AuthConfig) (string, error) {
|
||||
ref, err := reference.ParseAnyReference(image)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "invalid reference %s", image)
|
||||
}
|
||||
|
||||
// only check if image ref doesn't contain hashes
|
||||
if _, ok := ref.(reference.Digested); !ok {
|
||||
namedRef, ok := ref.(reference.Named)
|
||||
if !ok {
|
||||
return "", errors.New("failed to resolve image digest using content trust: reference is not named")
|
||||
}
|
||||
namedRef = reference.TagNameOnly(namedRef)
|
||||
taggedRef, ok := namedRef.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return "", errors.New("failed to resolve image digest using content trust: reference is not tagged")
|
||||
}
|
||||
|
||||
resolvedImage, err := trustedResolveDigest(context.Background(), taggedRef, authConfig, "luet")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to resolve image digest using content trust")
|
||||
}
|
||||
resolvedFamiliar := reference.FamiliarString(resolvedImage)
|
||||
return resolvedFamiliar, nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func trustedResolveDigest(ctx context.Context, ref reference.NamedTagged, authConfig *types.AuthConfig, useragent string) (reference.Canonical, error) {
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
notaryRepo, err := trust.GetNotaryRepository(os.Stdin, os.Stdout, useragent, repoInfo, authConfig, "pull")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error establishing connection to trust repository")
|
||||
}
|
||||
|
||||
t, err := notaryRepo.GetTargetByName(ref.Tag(), trust.ReleasesRole, data.CanonicalTargetsRole)
|
||||
if err != nil {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), err)
|
||||
}
|
||||
// Only get the tag if it's in the top level targets role or the releases delegation role
|
||||
// ignore it if it's in any other delegation roles
|
||||
if t.Role != trust.ReleasesRole && t.Role != data.CanonicalTargetsRole {
|
||||
return nil, trust.NotaryError(repoInfo.Name.Name(), errors.Errorf("No trust data for %s", reference.FamiliarString(ref)))
|
||||
}
|
||||
|
||||
h, ok := t.Hashes["sha256"]
|
||||
if !ok {
|
||||
return nil, errors.New("no valid hash, expecting sha256")
|
||||
}
|
||||
|
||||
dgst := digest.NewDigestFromHex("sha256", hex.EncodeToString(h))
|
||||
|
||||
// Allow returning canonical reference with tag
|
||||
return reference.WithDigest(ref, dgst)
|
||||
}
|
||||
|
||||
type staticAuth struct {
|
||||
auth *types.AuthConfig
|
||||
}
|
||||
|
||||
func (s staticAuth) Authorization() (*authn.AuthConfig, error) {
|
||||
if s.auth == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return &authn.AuthConfig{
|
||||
Username: s.auth.Username,
|
||||
Password: s.auth.Password,
|
||||
Auth: s.auth.Auth,
|
||||
IdentityToken: s.auth.IdentityToken,
|
||||
RegistryToken: s.auth.RegistryToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UnpackEventData is the data structure to pass for the bus events
|
||||
type UnpackEventData struct {
|
||||
Image string
|
||||
Dest string
|
||||
}
|
||||
|
||||
// privilegedExtractImage uses the imgworker (which requires privileges) to extract a container image
|
||||
func privilegedExtractImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
|
||||
defer os.RemoveAll(temp)
|
||||
c, err := imgworker.New(temp, auth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed creating client")
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
listedImage, err := c.Pull(image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed listing images")
|
||||
}
|
||||
|
||||
os.RemoveAll(dest)
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
err = c.Unpack(image, dest)
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return listedImage, err
|
||||
}
|
||||
|
||||
// UnarchiveLayers extract layers with archive.Untar from docker instead of containerd
|
||||
func UnarchiveLayers(temp string, img v1.Image, image, dest string, auth *types.AuthConfig, verify bool) (int64, error) {
|
||||
layers, err := img.Layers()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading layers from '%s' image failed: %v", image, err)
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
var size int64
|
||||
for _, l := range layers {
|
||||
s, err := l.Size()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading layer size from '%s' image failed: %v", image, err)
|
||||
}
|
||||
size += s
|
||||
|
||||
layerReader, err := l.Uncompressed()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading uncompressed layer from '%s' image failed: %v", image, err)
|
||||
}
|
||||
defer layerReader.Close()
|
||||
|
||||
// Unpack the tarfile to the rootfs path.
|
||||
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
|
||||
if err := archive.Untar(layerReader, dest, &archive.TarOptions{
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
}); err != nil {
|
||||
return 0, fmt.Errorf("extracting '%s' image to directory %s failed: %v", image, dest, err)
|
||||
}
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// DownloadAndExtractDockerImage extracts a container image natively. It supports privileged/unprivileged mode
|
||||
func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
|
||||
if verify {
|
||||
img, err := verifyImage(image, auth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed verifying image")
|
||||
}
|
||||
image = img
|
||||
}
|
||||
|
||||
if os.Getenv("LUET_PRIVILEGED_EXTRACT") == "true" {
|
||||
return privilegedExtractImage(temp, image, dest, auth, verify)
|
||||
}
|
||||
|
||||
if !fileHelper.Exists(dest) {
|
||||
if err := os.MkdirAll(dest, os.ModePerm); err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot create destination directory")
|
||||
}
|
||||
}
|
||||
|
||||
ref, err := name.ParseReference(image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
img, err := remote.Image(ref, remote.WithAuth(staticAuth{auth}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m, err := img.Manifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mt, err := img.MediaType()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, err := img.Digest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reader := mutate.Extract(img)
|
||||
defer reader.Close()
|
||||
defer os.RemoveAll(temp)
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
c, err := continerdarchive.Apply(context.TODO(), dest, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return &imgworker.ListedImage{
|
||||
Image: images.Image{
|
||||
Name: image,
|
||||
Labels: m.Annotations,
|
||||
Target: specs.Descriptor{
|
||||
MediaType: string(mt),
|
||||
Digest: digest.Digest(d.String()),
|
||||
Size: c,
|
||||
},
|
||||
},
|
||||
ContentSize: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func StripInvalidStringsFromImage(s string) string {
|
||||
return strings.ReplaceAll(s, "+", "-")
|
||||
}
|
@@ -13,10 +13,10 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers_test
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
. "github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
var _ = Describe("StripInvalidStringsFromImage", func() {
|
||||
Context("Image names", func() {
|
||||
It("strips invalid chars", func() {
|
||||
Expect(StripInvalidStringsFromImage("foo+bar")).To(Equal("foo-bar"))
|
||||
Expect(docker.StripInvalidStringsFromImage("foo+bar")).To(Equal("foo-bar"))
|
||||
})
|
||||
})
|
||||
})
|
@@ -13,7 +13,7 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers
|
||||
package file
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -287,3 +287,15 @@ func CopyDir(src string, dst string) (err error) {
|
||||
Sync: true,
|
||||
OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||
}
|
||||
|
||||
func Rel2Abs(s string) (string, error) {
|
||||
pathToSet := s
|
||||
if !filepath.IsAbs(s) {
|
||||
abs, err := filepath.Abs(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pathToSet = abs
|
||||
}
|
||||
return pathToSet, nil
|
||||
}
|
@@ -13,14 +13,15 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers_test
|
||||
package file_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@@ -28,8 +29,8 @@ import (
|
||||
var _ = Describe("Helpers", func() {
|
||||
Context("Exists", func() {
|
||||
It("Detect existing and not-existing files", func() {
|
||||
Expect(Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml")).To(BeTrue())
|
||||
Expect(Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml.not.exists")).To(BeFalse())
|
||||
Expect(fileHelper.Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml")).To(BeTrue())
|
||||
Expect(fileHelper.Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml.not.exists")).To(BeFalse())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -38,15 +39,15 @@ var _ = Describe("Helpers", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
Expect(DirectoryIsEmpty(testDir)).To(BeTrue())
|
||||
Expect(fileHelper.DirectoryIsEmpty(testDir)).To(BeTrue())
|
||||
})
|
||||
It("Detects directory with files", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
err = Touch(filepath.Join(testDir, "foo"))
|
||||
err = fileHelper.Touch(filepath.Join(testDir, "foo"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(DirectoryIsEmpty(testDir)).To(BeFalse())
|
||||
Expect(fileHelper.DirectoryIsEmpty(testDir)).To(BeFalse())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -72,7 +73,7 @@ var _ = Describe("Helpers", func() {
|
||||
err = ioutil.WriteFile(filepath.Join(testDir, "baz2", "foo"), []byte("test\n"), 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ordered, notExisting := OrderFiles(testDir, []string{"bar", "baz", "bar/foo", "baz2", "foo", "baz2/foo", "notexisting"})
|
||||
ordered, notExisting := fileHelper.OrderFiles(testDir, []string{"bar", "baz", "bar/foo", "baz2", "foo", "baz2/foo", "notexisting"})
|
||||
|
||||
Expect(ordered).To(Equal([]string{"baz", "bar/foo", "foo", "baz2/foo", "bar", "baz2"}))
|
||||
Expect(notExisting).To(Equal([]string{"notexisting"}))
|
||||
@@ -96,7 +97,7 @@ var _ = Describe("Helpers", func() {
|
||||
err = os.MkdirAll(filepath.Join(testDir, "foo", "baz", "fa"), os.ModePerm)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ordered, _ := OrderFiles(testDir, []string{"foo", "foo/bar", "bar", "foo/baz/fa", "foo/baz"})
|
||||
ordered, _ := fileHelper.OrderFiles(testDir, []string{"foo", "foo/bar", "bar", "foo/baz/fa", "foo/baz"})
|
||||
Expect(ordered).To(Equal([]string{"foo/baz/fa", "foo/bar", "foo/baz", "foo", "bar"}))
|
||||
})
|
||||
})
|
@@ -2,6 +2,10 @@ package helpers
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/pkg/errors"
|
||||
@@ -11,17 +15,86 @@ import (
|
||||
"helm.sh/helm/v3/pkg/engine"
|
||||
)
|
||||
|
||||
// ChartFileB is an helper that takes a slice of bytes and construct a chart.File slice from it
|
||||
func ChartFileB(s []byte) []*chart.File {
|
||||
return []*chart.File{
|
||||
{Name: "templates", Data: s},
|
||||
}
|
||||
}
|
||||
|
||||
// ChartFileS is an helper that takes a string and construct a chart.File slice from it
|
||||
func ChartFileS(s string) []*chart.File {
|
||||
return []*chart.File{
|
||||
{Name: "templates", Data: []byte(s)},
|
||||
}
|
||||
}
|
||||
|
||||
// ChartFile reads all the given files and returns a slice of []*chart.File
|
||||
// containing the raw content and the file name for each file
|
||||
func ChartFile(s ...string) []*chart.File {
|
||||
files := []*chart.File{}
|
||||
for _, c := range s {
|
||||
raw, err := ioutil.ReadFile(c)
|
||||
if err != nil {
|
||||
return files
|
||||
}
|
||||
files = append(files, &chart.File{Name: c, Data: raw})
|
||||
}
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
// ChartFiles reads a list of paths and reads all yaml file inside. It returns a
|
||||
// slice of pointers of chart.File(s) with the raw content of the yaml
|
||||
func ChartFiles(path []string) ([]*chart.File, error) {
|
||||
var chartFiles []*chart.File
|
||||
for _, t := range path {
|
||||
rel, err := fileHelper.Rel2Abs(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !fileHelper.Exists(rel) {
|
||||
continue
|
||||
}
|
||||
files, err := fileHelper.ListDir(rel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if strings.ToLower(filepath.Ext(f)) == ".yaml" {
|
||||
raw, err := ioutil.ReadFile(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
chartFiles = append(chartFiles, &chart.File{Name: f, Data: raw})
|
||||
}
|
||||
}
|
||||
}
|
||||
return chartFiles, nil
|
||||
}
|
||||
|
||||
// RenderHelm renders the template string with helm
|
||||
func RenderHelm(template string, values, d map[string]interface{}) (string, error) {
|
||||
func RenderHelm(files []*chart.File, values, d map[string]interface{}) (string, error) {
|
||||
|
||||
// We slurp all the files into one here. This is not elegant, but still works.
|
||||
// As a reminder, the files passed here have on the head the templates in the 'templates/' folder
|
||||
// of each luet tree, and it have at the bottom the package buildpsec to be templated.
|
||||
// TODO: Replace by correctly populating the files so that the helm render engine templates it
|
||||
// correctly
|
||||
toTemplate := ""
|
||||
for _, f := range files {
|
||||
toTemplate += string(f.Data)
|
||||
}
|
||||
|
||||
c := &chart.Chart{
|
||||
Metadata: &chart.Metadata{
|
||||
Name: "",
|
||||
Version: "",
|
||||
},
|
||||
Templates: []*chart.File{
|
||||
{Name: "templates", Data: []byte(template)},
|
||||
},
|
||||
Values: map[string]interface{}{"Values": values},
|
||||
Templates: ChartFileS(toTemplate),
|
||||
Values: map[string]interface{}{"Values": values},
|
||||
}
|
||||
|
||||
v, err := chartutil.CoalesceValues(c, map[string]interface{}{"Values": d})
|
||||
@@ -69,23 +142,18 @@ func reverse(s []string) []string {
|
||||
return s
|
||||
}
|
||||
|
||||
func RenderFiles(toTemplate, valuesFile string, defaultFile ...string) (string, error) {
|
||||
raw, err := ioutil.ReadFile(toTemplate)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "reading file "+toTemplate)
|
||||
}
|
||||
|
||||
if !Exists(valuesFile) {
|
||||
return "", errors.Wrap(err, "file not existing "+valuesFile)
|
||||
func RenderFiles(files []*chart.File, valuesFile string, defaultFile ...string) (string, error) {
|
||||
if !fileHelper.Exists(valuesFile) {
|
||||
return "", errors.New("file does not exist: " + valuesFile)
|
||||
}
|
||||
val, err := ioutil.ReadFile(valuesFile)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "reading file "+valuesFile)
|
||||
return "", errors.Wrap(err, "reading file: "+valuesFile)
|
||||
}
|
||||
|
||||
var values templatedata
|
||||
if err = yaml.Unmarshal(val, &values); err != nil {
|
||||
return "", errors.Wrap(err, "unmarshalling file "+toTemplate)
|
||||
return "", errors.Wrap(err, "unmarshalling values")
|
||||
}
|
||||
|
||||
dst, err := UnMarshalValues(defaultFile)
|
||||
@@ -93,5 +161,5 @@ func RenderFiles(toTemplate, valuesFile string, defaultFile ...string) (string,
|
||||
return "", errors.Wrap(err, "unmarshalling values")
|
||||
}
|
||||
|
||||
return RenderHelm(string(raw), values, dst)
|
||||
return RenderHelm(files, values, dst)
|
||||
}
|
||||
|
@@ -30,21 +30,21 @@ func writeFile(path string, content string) {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
var _ = Describe("Helpers", func() {
|
||||
var _ = Describe("Helm", func() {
|
||||
Context("RenderHelm", func() {
|
||||
It("Renders templates", func() {
|
||||
out, err := RenderHelm("{{.Values.Test}}{{.Values.Bar}}", map[string]interface{}{"Test": "foo"}, map[string]interface{}{"Bar": "bar"})
|
||||
out, err := RenderHelm(ChartFileS("{{.Values.Test}}{{.Values.Bar}}"), map[string]interface{}{"Test": "foo"}, map[string]interface{}{"Bar": "bar"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(out).To(Equal("foobar"))
|
||||
})
|
||||
It("Renders templates with overrides", func() {
|
||||
out, err := RenderHelm("{{.Values.Test}}{{.Values.Bar}}", map[string]interface{}{"Test": "foo", "Bar": "baz"}, map[string]interface{}{"Bar": "bar"})
|
||||
out, err := RenderHelm(ChartFileS("{{.Values.Test}}{{.Values.Bar}}"), map[string]interface{}{"Test": "foo", "Bar": "baz"}, map[string]interface{}{"Bar": "bar"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(out).To(Equal("foobar"))
|
||||
})
|
||||
|
||||
It("Renders templates", func() {
|
||||
out, err := RenderHelm("{{.Values.Test}}{{.Values.Bar}}", map[string]interface{}{"Test": "foo", "Bar": "bar"}, map[string]interface{}{})
|
||||
out, err := RenderHelm(ChartFileS("{{.Values.Test}}{{.Values.Bar}}"), map[string]interface{}{"Test": "foo", "Bar": "bar"}, map[string]interface{}{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(out).To(Equal("foobar"))
|
||||
})
|
||||
@@ -68,7 +68,7 @@ foo: "baz"
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderFiles(toTemplate, values, d)
|
||||
res, err := RenderFiles(ChartFile(toTemplate), values, d)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("baz"))
|
||||
|
||||
@@ -93,7 +93,7 @@ faa: "baz"
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderFiles(toTemplate, values, d)
|
||||
res, err := RenderFiles(ChartFile(toTemplate), values, d)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("bar"))
|
||||
|
||||
@@ -114,7 +114,7 @@ foo: "bar"
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderFiles(toTemplate, values)
|
||||
res, err := RenderFiles(ChartFile(toTemplate), values)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("bar"))
|
||||
})
|
||||
@@ -145,11 +145,11 @@ bar: "nei"
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderFiles(toTemplate, values, d2, d)
|
||||
res, err := RenderFiles(ChartFile(toTemplate), values, d2, d)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("bazneif"))
|
||||
|
||||
res, err = RenderFiles(toTemplate, values, d, d2)
|
||||
res, err = RenderFiles(ChartFile(toTemplate), values, d, d2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal("doneif"))
|
||||
})
|
||||
@@ -173,7 +173,7 @@ faa: "baz"
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
res, err := RenderFiles(toTemplate, values, d)
|
||||
res, err := RenderFiles(ChartFile(toTemplate), values, d)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(res).To(Equal(""))
|
||||
|
||||
|
@@ -5,6 +5,7 @@ package imgworker
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
@@ -18,6 +19,12 @@ import (
|
||||
// TODO: this requires root permissions to mount/unmount layers, althrought it shouldn't be required.
|
||||
// See how backends are unpacking images without asking for root permissions.
|
||||
|
||||
// UnpackEventData is the data structure to pass for the bus events
|
||||
type UnpackEventData struct {
|
||||
Image string
|
||||
Dest string
|
||||
}
|
||||
|
||||
// Unpack exports an image to a rootfs destination directory.
|
||||
func (c *Client) Unpack(image, dest string) error {
|
||||
|
||||
@@ -59,6 +66,8 @@ func (c *Client) Unpack(image, dest string) error {
|
||||
return fmt.Errorf("getting image manifest failed: %v", err)
|
||||
}
|
||||
|
||||
_,_ = bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
for _, desc := range manifest.Layers {
|
||||
logrus.Debugf("Unpacking layer %s", desc.Digest.String())
|
||||
|
||||
@@ -78,5 +87,7 @@ func (c *Client) Unpack(image, dest string) error {
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -14,7 +14,7 @@
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package helpers
|
||||
package match
|
||||
|
||||
import (
|
||||
"reflect"
|
10
pkg/helpers/slice.go
Normal file
10
pkg/helpers/slice.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package helpers
|
||||
|
||||
func Contains(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
@@ -28,8 +28,8 @@ import (
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/helpers/imgworker"
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
)
|
||||
|
||||
@@ -64,7 +64,7 @@ func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.
|
||||
artifactName := path.Base(a.Path)
|
||||
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
|
||||
Debug("Cache file", cacheFile)
|
||||
if err := helpers.EnsureDir(cacheFile); err != nil {
|
||||
if err := fileHelper.EnsureDir(cacheFile); err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create cache folder %s for %s", config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), cacheFile)
|
||||
}
|
||||
ok := false
|
||||
@@ -77,7 +77,7 @@ func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.
|
||||
// is done in such cases (see repository.go)
|
||||
|
||||
// Check if file is already in cache
|
||||
if helpers.Exists(cacheFile) {
|
||||
if fileHelper.Exists(cacheFile) {
|
||||
Debug("Cache hit for artifact", artifactName)
|
||||
resultingArtifact = a
|
||||
resultingArtifact.Path = cacheFile
|
||||
@@ -102,7 +102,7 @@ func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.
|
||||
}
|
||||
|
||||
// imageName := fmt.Sprintf("%s/%s", uri, artifact.GetCompileSpec().GetPackage().GetPackageImageName())
|
||||
info, err := helpers.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
info, err := docker.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
@@ -140,7 +140,6 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
var file *os.File = nil
|
||||
var err error
|
||||
var temp, contentstore string
|
||||
var info *imgworker.ListedImage
|
||||
// Files should be in URI/repository:<file>
|
||||
ok := false
|
||||
|
||||
@@ -161,10 +160,10 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
imageName := fmt.Sprintf("%s:%s", uri, helpers.StripInvalidStringsFromImage(name))
|
||||
imageName := fmt.Sprintf("%s:%s", uri, docker.StripInvalidStringsFromImage(name))
|
||||
Info("Downloading", imageName)
|
||||
|
||||
info, err = helpers.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
info, err := docker.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
@@ -174,7 +173,7 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
|
||||
|
||||
Debug("\nCopying file ", filepath.Join(temp, name), "to", file.Name())
|
||||
err = helpers.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
err = fileHelper.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -22,8 +22,8 @@ import (
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
)
|
||||
|
||||
// This test expect that the repository defined in UNIT_TEST_DOCKER_IMAGE is in zstd format.
|
||||
// the repository is built by the 01_simple_docker.sh integration test file.
|
||||
// the repository is built by the 01_simple_docker.sh integration test fileHelper.
|
||||
// This test also require root. At the moment, unpacking docker images with 'img' requires root permission to
|
||||
// mount/unmount layers.
|
||||
var _ = Describe("Docker client", func() {
|
||||
@@ -51,7 +51,7 @@ var _ = Describe("Docker client", func() {
|
||||
It("Downloads single files", func() {
|
||||
f, err := c.DownloadFile("repository.yaml")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(f)).To(ContainSubstring("Test Repo"))
|
||||
Expect(fileHelper.Read(f)).To(ContainSubstring("Test Repo"))
|
||||
os.RemoveAll(f)
|
||||
})
|
||||
|
||||
@@ -71,8 +71,8 @@ var _ = Describe("Docker client", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
Expect(f.Unpack(tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(filepath.Join(tmpdir, "c"))).To(Equal("c\n"))
|
||||
Expect(helpers.Read(filepath.Join(tmpdir, "cd"))).To(Equal("c\n"))
|
||||
Expect(fileHelper.Read(filepath.Join(tmpdir, "c"))).To(Equal("c\n"))
|
||||
Expect(fileHelper.Read(filepath.Join(tmpdir, "cd"))).To(Equal("c\n"))
|
||||
os.RemoveAll(f.Path)
|
||||
})
|
||||
})
|
||||
|
@@ -18,19 +18,20 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
"github.com/cavaliercoder/grab"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
|
||||
"github.com/schollz/progressbar/v3"
|
||||
)
|
||||
@@ -43,6 +44,27 @@ func NewHttpClient(r RepoData) *HttpClient {
|
||||
return &HttpClient{RepoData: r}
|
||||
}
|
||||
|
||||
func NewGrabClient() *grab.Client {
|
||||
httpTimeout := 120
|
||||
timeout := os.Getenv("HTTP_TIMEOUT")
|
||||
if timeout != "" {
|
||||
timeoutI, err := strconv.Atoi(timeout)
|
||||
if err == nil {
|
||||
httpTimeout = timeoutI
|
||||
}
|
||||
}
|
||||
|
||||
return &grab.Client{
|
||||
UserAgent: "grab",
|
||||
HTTPClient: &http.Client{
|
||||
Timeout: time.Duration(httpTimeout) * time.Second,
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HttpClient) PrepareReq(dst, url string) (*grab.Request, error) {
|
||||
|
||||
req, err := grab.NewRequest(dst, url)
|
||||
@@ -77,7 +99,7 @@ func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.Pa
|
||||
ok := false
|
||||
|
||||
// Check if file is already in cache
|
||||
if helpers.Exists(cacheFile) {
|
||||
if fileHelper.Exists(cacheFile) {
|
||||
Debug("Use artifact", artifactName, "from cache.")
|
||||
} else {
|
||||
|
||||
@@ -87,7 +109,7 @@ func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.Pa
|
||||
}
|
||||
defer os.RemoveAll(temp)
|
||||
|
||||
client := grab.NewClient()
|
||||
client := NewGrabClient()
|
||||
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
Debug("Downloading artifact", artifactName, "from", uri)
|
||||
@@ -156,7 +178,7 @@ func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.Pa
|
||||
fmt.Sprintf("%.2f", (float64(resp.BytesPerSecond())/1024)/1024), "MiB/s )")
|
||||
|
||||
Debug("\nCopying file ", filepath.Join(temp, artifactName), "to", cacheFile)
|
||||
err = helpers.CopyFile(filepath.Join(temp, artifactName), cacheFile)
|
||||
err = fileHelper.CopyFile(filepath.Join(temp, artifactName), cacheFile)
|
||||
|
||||
bar.Finish()
|
||||
ok = true
|
||||
@@ -187,7 +209,7 @@ func (c *HttpClient) DownloadFile(name string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
client := grab.NewClient()
|
||||
client := NewGrabClient()
|
||||
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
|
||||
@@ -218,7 +240,7 @@ func (c *HttpClient) DownloadFile(name string) (string, error) {
|
||||
fmt.Sprintf("%.2f", (float64(resp.BytesComplete())/1000)/1000), "MB (",
|
||||
fmt.Sprintf("%.2f", (float64(resp.BytesPerSecond())/1024)/1024), "MiB/s )")
|
||||
|
||||
err = helpers.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
err = fileHelper.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -23,8 +23,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -47,7 +46,7 @@ var _ = Describe("Http client", func() {
|
||||
c := NewHttpClient(RepoData{Urls: []string{ts.URL}})
|
||||
path, err := c.DownloadFile("test.txt")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path)).To(Equal("test"))
|
||||
Expect(fileHelper.Read(path)).To(Equal("test"))
|
||||
os.RemoveAll(path)
|
||||
})
|
||||
|
||||
@@ -65,7 +64,7 @@ var _ = Describe("Http client", func() {
|
||||
c := NewHttpClient(RepoData{Urls: []string{ts.URL}})
|
||||
path, err := c.DownloadArtifact(&artifact.PackageArtifact{Path: "test.txt"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path.Path)).To(Equal("test"))
|
||||
Expect(fileHelper.Read(path.Path)).To(Equal("test"))
|
||||
os.RemoveAll(path.Path)
|
||||
})
|
||||
|
||||
|
@@ -22,9 +22,8 @@ import (
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
)
|
||||
|
||||
type LocalClient struct {
|
||||
@@ -50,7 +49,7 @@ func (c *LocalClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.P
|
||||
}
|
||||
|
||||
// Check if file is already in cache
|
||||
if helpers.Exists(cacheFile) {
|
||||
if fileHelper.Exists(cacheFile) {
|
||||
Debug("Use artifact", artifactName, "from cache.")
|
||||
} else {
|
||||
ok := false
|
||||
@@ -61,7 +60,7 @@ func (c *LocalClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.P
|
||||
Info("Downloading artifact", artifactName, "from", uri)
|
||||
|
||||
//defer os.Remove(file.Name())
|
||||
err = helpers.CopyFile(filepath.Join(uri, artifactName), cacheFile)
|
||||
err = fileHelper.CopyFile(filepath.Join(uri, artifactName), cacheFile)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -104,7 +103,7 @@ func (c *LocalClient) DownloadFile(name string) (string, error) {
|
||||
}
|
||||
//defer os.Remove(file.Name())
|
||||
|
||||
err = helpers.CopyFile(filepath.Join(uri, name), file.Name())
|
||||
err = fileHelper.CopyFile(filepath.Join(uri, name), file.Name())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@@ -21,8 +21,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -42,7 +41,7 @@ var _ = Describe("Local client", func() {
|
||||
c := NewLocalClient(RepoData{Urls: []string{tmpdir}})
|
||||
path, err := c.DownloadFile("test.txt")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path)).To(Equal("test"))
|
||||
Expect(fileHelper.Read(path)).To(Equal("test"))
|
||||
os.RemoveAll(path)
|
||||
})
|
||||
|
||||
@@ -58,7 +57,7 @@ var _ = Describe("Local client", func() {
|
||||
c := NewLocalClient(RepoData{Urls: []string{tmpdir}})
|
||||
path, err := c.DownloadArtifact(&artifact.PackageArtifact{Path: "test.txt"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path.Path)).To(Equal("test"))
|
||||
Expect(fileHelper.Read(path.Path)).To(Equal("test"))
|
||||
os.RemoveAll(path.Path)
|
||||
})
|
||||
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
box "github.com/mudler/luet/pkg/box"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -51,13 +52,14 @@ func (f *LuetFinalizer) RunInstall(s *System) error {
|
||||
Info(":shell: Executing finalizer on ", s.Target, cmd, toRun)
|
||||
if s.Target == string(os.PathSeparator) {
|
||||
cmd := exec.Command(cmd, toRun...)
|
||||
cmd.Env = LuetCfg.GetFinalizerEnvs()
|
||||
stdoutStderr, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed running command: "+string(stdoutStderr))
|
||||
}
|
||||
Info(string(stdoutStderr))
|
||||
} else {
|
||||
b := box.NewBox(cmd, toRun, []string{}, []string{}, s.Target, false, true, true)
|
||||
b := box.NewBox(cmd, toRun, []string{}, LuetCfg.GetFinalizerEnvs(), s.Target, false, true, true)
|
||||
err := b.Run()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed running command ")
|
||||
|
@@ -24,16 +24,17 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
. "github.com/logrusorgru/aurora"
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
"github.com/mudler/luet/pkg/helpers/match"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
. "github.com/logrusorgru/aurora"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -271,15 +272,25 @@ func (l *LuetInstaller) swap(o Option, syncedRepos Repositories, toRemove pkg.Pa
|
||||
if err := l.download(syncedRepos, match); err != nil {
|
||||
return errors.Wrap(err, "Pre-downloading packages")
|
||||
}
|
||||
|
||||
if err := l.checkFileconflicts(match, false, s); err != nil {
|
||||
if !l.Options.Force {
|
||||
return errors.Wrap(err, "file conflict found")
|
||||
} else {
|
||||
Warning("file conflict found", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if l.Options.DownloadOnly {
|
||||
return nil
|
||||
}
|
||||
|
||||
ops := l.getOpsWithOptions(toRemove, match, Option{
|
||||
Force: o.Force,
|
||||
NoDeps: false,
|
||||
OnlyDeps: o.OnlyDeps,
|
||||
RunFinalizers: false,
|
||||
Force: o.Force,
|
||||
NoDeps: false,
|
||||
OnlyDeps: o.OnlyDeps,
|
||||
RunFinalizers: false,
|
||||
CheckFileConflicts: false,
|
||||
}, o, syncedRepos, packages, assertions, allRepos)
|
||||
|
||||
err = l.runOps(ops, s)
|
||||
@@ -303,6 +314,8 @@ type Option struct {
|
||||
FullCleanUninstall bool
|
||||
OnlyDeps bool
|
||||
RunFinalizers bool
|
||||
|
||||
CheckFileConflicts bool
|
||||
}
|
||||
|
||||
type operation struct {
|
||||
@@ -508,10 +521,11 @@ func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
}
|
||||
|
||||
o := Option{
|
||||
NoDeps: l.Options.NoDeps,
|
||||
Force: l.Options.Force,
|
||||
OnlyDeps: l.Options.OnlyDeps,
|
||||
RunFinalizers: true,
|
||||
NoDeps: l.Options.NoDeps,
|
||||
Force: l.Options.Force,
|
||||
OnlyDeps: l.Options.OnlyDeps,
|
||||
CheckFileConflicts: true,
|
||||
RunFinalizers: true,
|
||||
}
|
||||
match, packages, assertions, allRepos, err := l.computeInstall(o, syncedRepos, cp, s)
|
||||
if err != nil {
|
||||
@@ -537,6 +551,11 @@ func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
if m.Package.GetName() == p.GetName() {
|
||||
found = true
|
||||
}
|
||||
for _, pack := range m.Package.GetProvides() {
|
||||
if pack.GetName() == p.GetName() {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
@@ -597,7 +616,7 @@ func (l *LuetInstaller) Reclaim(s *System) error {
|
||||
"from", repo.GetName(), "is installed")
|
||||
FILES:
|
||||
for _, f := range artefact.Files {
|
||||
if helpers.Exists(filepath.Join(s.Target, f)) {
|
||||
if fileHelper.Exists(filepath.Join(s.Target, f)) {
|
||||
p, err := repo.GetTree().GetDatabase().FindPackage(artefact.CompileSpec.GetPackage())
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -755,12 +774,66 @@ func (l *LuetInstaller) getFinalizers(allRepos pkg.PackageDatabase, solution sol
|
||||
return toFinalize, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) checkFileconflicts(toInstall map[string]ArtifactMatch, checkSystem bool, s *System) error {
|
||||
Info("Checking for file conflicts..")
|
||||
defer s.Clean() // Release memory
|
||||
|
||||
filesToInstall := []string{}
|
||||
for _, m := range toInstall {
|
||||
a, err := l.downloadPackage(m)
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Failed downloading package")
|
||||
}
|
||||
files, err := a.FileList()
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrapf(err, "Could not get filelist for %s", a.CompileSpec.Package.HumanReadableString())
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if helpers.Contains(filesToInstall, f) {
|
||||
return fmt.Errorf(
|
||||
"file conflict between packages to be installed",
|
||||
)
|
||||
}
|
||||
if checkSystem {
|
||||
exists, p, err := s.ExistsPackageFile(f)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed checking into system db")
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf(
|
||||
"file conflict between '%s' and '%s' ( file: %s )",
|
||||
p.HumanReadableString(),
|
||||
m.Package.HumanReadableString(),
|
||||
f,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
filesToInstall = append(filesToInstall, files...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) install(o Option, syncedRepos Repositories, toInstall map[string]ArtifactMatch, p pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) error {
|
||||
// Install packages into rootfs in parallel.
|
||||
|
||||
// Download packages in parallel first
|
||||
if err := l.download(syncedRepos, toInstall); err != nil {
|
||||
return errors.Wrap(err, "Downloading packages")
|
||||
}
|
||||
|
||||
if o.CheckFileConflicts {
|
||||
// Check file conflicts
|
||||
if err := l.checkFileconflicts(toInstall, true, s); err != nil {
|
||||
if !l.Options.Force {
|
||||
return errors.Wrap(err, "file conflict found")
|
||||
} else {
|
||||
Warning("file conflict found", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if l.Options.DownloadOnly {
|
||||
return nil
|
||||
}
|
||||
@@ -915,7 +988,7 @@ func pruneEmptyFilePath(path string) {
|
||||
currentPath = filepath.Join(currentPath, p)
|
||||
allPaths = append(allPaths, currentPath)
|
||||
}
|
||||
helpers.ReverseAny(allPaths)
|
||||
match.ReverseAny(allPaths)
|
||||
for _, p := range allPaths {
|
||||
checkAndPrunePath(p)
|
||||
}
|
||||
@@ -943,7 +1016,7 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
cp.Map(files)
|
||||
}
|
||||
|
||||
toRemove, notPresent := helpers.OrderFiles(s.Target, files)
|
||||
toRemove, notPresent := fileHelper.OrderFiles(s.Target, files)
|
||||
|
||||
// Remove from target
|
||||
for _, f := range toRemove {
|
||||
@@ -1065,7 +1138,7 @@ func (l *LuetInstaller) computeUninstall(o Option, s *System, packs ...pkg.Packa
|
||||
func (l *LuetInstaller) generateUninstallFn(o Option, s *System, packs ...pkg.Package) (pkg.Packages, func() error, error) {
|
||||
for _, p := range packs {
|
||||
if packs, _ := s.Database.FindPackages(p); len(packs) == 0 {
|
||||
return nil, nil, errors.New("Package not found in the system")
|
||||
return nil, nil, errors.New(fmt.Sprintf("Package %s not found in the system", p.HumanReadableString()))
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/mudler/luet/pkg/compiler/types/options"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -37,14 +38,15 @@ import (
|
||||
|
||||
func stubRepo(tmpdir, tree string) (*LuetSystemRepository, error) {
|
||||
return GenerateRepository(
|
||||
"test",
|
||||
"description",
|
||||
"disk",
|
||||
[]string{tmpdir},
|
||||
1,
|
||||
tmpdir,
|
||||
[]string{tree},
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false, false, nil)
|
||||
WithName("test"),
|
||||
WithDescription("description"),
|
||||
WithType("disk"),
|
||||
WithUrls(tmpdir),
|
||||
WithPriority(1),
|
||||
WithSource(tmpdir),
|
||||
WithTree(tree),
|
||||
WithDatabase(pkg.NewInMemoryDatabase(false)),
|
||||
)
|
||||
}
|
||||
|
||||
var _ = Describe("Installer", func() {
|
||||
@@ -84,34 +86,34 @@ var _ = Describe("Installer", func() {
|
||||
|
||||
a, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(a.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(a.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(a.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -136,8 +138,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -154,8 +156,8 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
@@ -199,21 +201,21 @@ urls:
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -223,15 +225,15 @@ urls:
|
||||
repo.SetRepositoryFile(REPOFILE_TREE_KEY, treeFile)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -256,8 +258,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -274,8 +276,8 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
@@ -319,41 +321,43 @@ urls:
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := GenerateRepository(
|
||||
"test",
|
||||
"description",
|
||||
"disk",
|
||||
[]string{tmpdir}, 1,
|
||||
tmpdir,
|
||||
[]string{"../../tests/fixtures/buildable"},
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false, false, nil)
|
||||
WithName("test"),
|
||||
WithDescription("description"),
|
||||
WithType("disk"),
|
||||
WithUrls(tmpdir),
|
||||
WithPriority(1),
|
||||
WithSource(tmpdir),
|
||||
WithTree("../../tests/fixtures/buildable"),
|
||||
WithDatabase(pkg.NewInMemoryDatabase(false)),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -383,8 +387,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -401,8 +405,8 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
@@ -444,42 +448,43 @@ urls:
|
||||
|
||||
artifact, err := c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := GenerateRepository(
|
||||
"test",
|
||||
"description",
|
||||
"disk",
|
||||
[]string{tmpdir},
|
||||
1,
|
||||
tmpdir,
|
||||
[]string{"../../tests/fixtures/buildable"},
|
||||
pkg.NewInMemoryDatabase(false), nil, "", false, false, false, nil)
|
||||
WithName("test"),
|
||||
WithDescription("description"),
|
||||
WithType("disk"),
|
||||
WithUrls(tmpdir),
|
||||
WithPriority(1),
|
||||
WithSource(tmpdir),
|
||||
WithTree("../../tests/fixtures/buildable"),
|
||||
WithDatabase(pkg.NewInMemoryDatabase(false)),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -531,7 +536,7 @@ urls:
|
||||
|
||||
artifact, err = c.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
|
||||
repo, err = stubRepo(tmpdir2, "../../tests/fixtures/alpine")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -604,15 +609,15 @@ urls:
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -642,8 +647,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -660,11 +665,11 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
// New version - new files
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
@@ -729,9 +734,9 @@ urls:
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_old_repo")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -774,8 +779,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -793,11 +798,11 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
// New version - new files
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
@@ -856,17 +861,17 @@ urls:
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.1.package.tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.1.package.tar"))).ToNot(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -896,8 +901,8 @@ urls:
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -914,11 +919,11 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
// New version - new files
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
@@ -1014,17 +1019,17 @@ urls:
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.1.package.tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.1.package.tar.gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.1.package.tar"))).ToNot(BeTrue())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -1059,13 +1064,13 @@ urls:
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
Expect(len(system.Database.World())).To(Equal(0))
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeFalse())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeFalse())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "c"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "c"))).To(BeFalse())
|
||||
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "test5"))).ToNot(HaveOccurred())
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "test6"))).ToNot(HaveOccurred())
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "c"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "test5"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "test6"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "c"))).ToNot(HaveOccurred())
|
||||
|
||||
err = inst.Reclaim(system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1115,15 +1120,15 @@ urls:
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_old_repo")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
@@ -1158,13 +1163,13 @@ urls:
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
Expect(len(system.Database.World())).To(Equal(0))
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeFalse())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeFalse())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "c"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeFalse())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "c"))).To(BeFalse())
|
||||
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "test5"))).ToNot(HaveOccurred())
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "test6"))).ToNot(HaveOccurred())
|
||||
Expect(helpers.Touch(filepath.Join(fakeroot, "c"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "test5"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "test6"))).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Touch(filepath.Join(fakeroot, "c"))).ToNot(HaveOccurred())
|
||||
|
||||
err = inst.Reclaim(system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -1218,11 +1223,11 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
// New version - new files
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
|
@@ -24,15 +24,16 @@ import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compression "github.com/mudler/luet/pkg/compiler/types/compression"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
"go.uber.org/multierr"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/installer/client"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -252,11 +253,9 @@ func (f *LuetRepositoryFile) GetChecksums() artifact.Checksums {
|
||||
// GenerateRepository generates a new repository from the given argument.
|
||||
// If the repository is of the docker type, it will also push the package images.
|
||||
// In case the repository is local, it will build the package Index
|
||||
func GenerateRepository(name, descr, t string, urls []string,
|
||||
priority int, src string, treesDir []string, db pkg.PackageDatabase,
|
||||
b compiler.CompilerBackend, imagePrefix string, pushImages, force, fromRepo bool, c *config.LuetConfig) (*LuetSystemRepository, error) {
|
||||
|
||||
// 1: First filter the runtime db to only the metadata we actually have
|
||||
func GenerateRepository(p ...RepositoryOption) (*LuetSystemRepository, error) {
|
||||
c := RepositoryConfig{}
|
||||
c.Apply(p...)
|
||||
|
||||
btr := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
runtimeTree := pkg.NewInMemoryDatabase(false)
|
||||
@@ -264,7 +263,7 @@ func GenerateRepository(name, descr, t string, urls []string,
|
||||
tempTree := pkg.NewInMemoryDatabase(false)
|
||||
temptr := tree.NewInstallerRecipe(tempTree)
|
||||
|
||||
for _, treeDir := range treesDir {
|
||||
for _, treeDir := range c.Tree {
|
||||
if err := temptr.Load(treeDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -279,8 +278,8 @@ func GenerateRepository(name, descr, t string, urls []string,
|
||||
repodb := pkg.NewInMemoryDatabase(false)
|
||||
generalRecipe := tree.NewCompilerRecipe(repodb)
|
||||
|
||||
if fromRepo {
|
||||
if err := LoadBuildTree(generalRecipe, repodb, c); err != nil {
|
||||
if c.FromRepository {
|
||||
if err := LoadBuildTree(generalRecipe, repodb, c.config); err != nil {
|
||||
Warning("errors while loading trees from repositories", err.Error())
|
||||
}
|
||||
|
||||
@@ -292,23 +291,66 @@ func GenerateRepository(name, descr, t string, urls []string,
|
||||
|
||||
// Pick only atoms in db which have a real metadata for runtime db (tr)
|
||||
for _, p := range tempTree.World() {
|
||||
if _, err := os.Stat(filepath.Join(src, p.GetMetadataFilePath())); err == nil {
|
||||
if _, err := os.Stat(filepath.Join(c.Src, p.GetMetadataFilePath())); err == nil {
|
||||
runtimeTree.CreatePackage(p)
|
||||
}
|
||||
}
|
||||
|
||||
// Load packages from metadata files if not present already.
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only those which are metadata
|
||||
if !strings.HasSuffix(info.Name(), pkg.PackageMetaSuffix) {
|
||||
return nil
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
art, err := artifact.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if _, err := runtimeTree.FindPackage(art.CompileSpec.Package); err != nil && art.CompileSpec.Package.Name != "" {
|
||||
Debug("Adding", art.CompileSpec.Package.HumanReadableString(), "from metadata file", currentpath)
|
||||
if art.Runtime != nil && art.Runtime.Name != "" {
|
||||
runtimeTree.CreatePackage(art.Runtime)
|
||||
} else {
|
||||
// We don't have runtime at this point. So we import the package as is
|
||||
r := []*pkg.DefaultPackage{}
|
||||
p := art.CompileSpec.Package.Clone()
|
||||
p.Requires(r)
|
||||
p.SetProvides(r)
|
||||
p.Conflicts(r)
|
||||
runtimeTree.CreatePackage(p)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if c.FromMetadata {
|
||||
// Best effort
|
||||
filepath.Walk(c.Src, ff)
|
||||
}
|
||||
|
||||
repo := &LuetSystemRepository{
|
||||
LuetRepository: config.NewLuetRepository(name, t, descr, urls, priority, true, false),
|
||||
LuetRepository: config.NewLuetRepository(c.Name, c.Type, c.Description, c.Urls, c.Priority, true, false),
|
||||
Tree: tree.NewInstallerRecipe(runtimeTree),
|
||||
BuildTree: btr,
|
||||
RepositoryFiles: map[string]LuetRepositoryFile{},
|
||||
PushImages: pushImages,
|
||||
ForcePush: force,
|
||||
Backend: b,
|
||||
imagePrefix: imagePrefix,
|
||||
PushImages: c.PushImages,
|
||||
ForcePush: c.Force,
|
||||
Backend: c.CompilerBackend,
|
||||
imagePrefix: c.ImagePrefix,
|
||||
}
|
||||
|
||||
if err := repo.initialize(src); err != nil {
|
||||
if err := repo.initialize(c.Src); err != nil {
|
||||
return nil, errors.Wrap(err, "while building repository artifact index")
|
||||
}
|
||||
|
||||
@@ -723,7 +765,7 @@ func (r *LuetSystemRepository) SyncBuildMetadata(path string) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "while downloading metadata for %s", ai.HumanReadableString())
|
||||
}
|
||||
if err := helpers.Move(file, filepath.Join(path, ai.GetMetadataFilePath())); err != nil {
|
||||
if err := fileHelper.Move(file, filepath.Join(path, ai.GetMetadataFilePath())); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -810,7 +852,7 @@ func (r *LuetSystemRepository) Sync(force bool) (*LuetSystemRepository, error) {
|
||||
|
||||
if r.Cached {
|
||||
// Copy updated repository.yaml file to repo dir now that the tree is synced.
|
||||
err = helpers.CopyFile(file, filepath.Join(repobasedir, REPOSITORY_SPECFILE))
|
||||
err = fileHelper.CopyFile(file, filepath.Join(repobasedir, REPOSITORY_SPECFILE))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error on update "+REPOSITORY_SPECFILE)
|
||||
}
|
||||
|
@@ -24,16 +24,16 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/backend"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -44,7 +44,6 @@ type dockerRepositoryGenerator struct {
|
||||
}
|
||||
|
||||
func (l *dockerRepositoryGenerator) Initialize(path string, db pkg.PackageDatabase) ([]*artifact.PackageArtifact, error) {
|
||||
|
||||
Info("Generating docker images for packages in", l.imagePrefix)
|
||||
var art []*artifact.PackageArtifact
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
@@ -61,7 +60,7 @@ func (l *dockerRepositoryGenerator) Initialize(path string, db pkg.PackageDataba
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := l.pushImageFromArtifact(artifact.NewPackageArtifact(currentpath), l.b); err != nil {
|
||||
if err := l.pushImageFromArtifact(artifact.NewPackageArtifact(currentpath), l.b, true); err != nil {
|
||||
return errors.Wrap(err, "while pushing metadata file associated to the artifact")
|
||||
}
|
||||
|
||||
@@ -159,16 +158,20 @@ func (d *dockerRepositoryGenerator) pushRepoMetadata(repospec string, r *LuetSys
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerRepositoryGenerator) pushImageFromArtifact(a *artifact.PackageArtifact, b compiler.CompilerBackend) error {
|
||||
func (d *dockerRepositoryGenerator) pushImageFromArtifact(a *artifact.PackageArtifact, b compiler.CompilerBackend, checkIfExists bool) error {
|
||||
// we generate a new archive containing the required compressed file.
|
||||
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
|
||||
treeArchive, err := artifact.CreateArtifactForFile(a.Path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed generating checksums for tree")
|
||||
}
|
||||
imageTree := fmt.Sprintf("%s:%s", d.imagePrefix, helpers.StripInvalidStringsFromImage(a.GetFileName()))
|
||||
|
||||
return d.pushFileFromArtifact(treeArchive, imageTree)
|
||||
imageTree := fmt.Sprintf("%s:%s", d.imagePrefix, docker.StripInvalidStringsFromImage(a.GetFileName()))
|
||||
if checkIfExists && d.imagePush && d.b.ImageAvailable(imageTree) && !d.force {
|
||||
Info("Image", imageTree, "already present, skipping. use --force-push to override")
|
||||
return nil
|
||||
} else {
|
||||
return d.pushFileFromArtifact(treeArchive, imageTree)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate creates a Docker luet repository
|
||||
@@ -225,7 +228,7 @@ func (d *dockerRepositoryGenerator) Generate(r *LuetSystemRepository, imagePrefi
|
||||
|
||||
// we generate a new archive containing the required compressed file.
|
||||
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
|
||||
if err := d.pushImageFromArtifact(a, d.b); err != nil {
|
||||
if err := d.pushImageFromArtifact(a, d.b, false); err != nil {
|
||||
return errors.Wrap(err, "error met while pushing runtime tree")
|
||||
}
|
||||
|
||||
@@ -235,7 +238,7 @@ func (d *dockerRepositoryGenerator) Generate(r *LuetSystemRepository, imagePrefi
|
||||
}
|
||||
// we generate a new archive containing the required compressed file.
|
||||
// TODO: Bundle all the extra files in 1 docker image only, instead of an image for each file
|
||||
if err := d.pushImageFromArtifact(a, d.b); err != nil {
|
||||
if err := d.pushImageFromArtifact(a, d.b, false); err != nil {
|
||||
return errors.Wrap(err, "error met while pushing compiler tree")
|
||||
}
|
||||
|
||||
@@ -251,7 +254,7 @@ func (d *dockerRepositoryGenerator) Generate(r *LuetSystemRepository, imagePrefi
|
||||
return errors.Wrap(err, "failed adding Metadata file to repository")
|
||||
}
|
||||
|
||||
if err := d.pushImageFromArtifact(a, d.b); err != nil {
|
||||
if err := d.pushImageFromArtifact(a, d.b, false); err != nil {
|
||||
return errors.Wrap(err, "error met while pushing docker image from artifact")
|
||||
}
|
||||
|
||||
|
164
pkg/installer/repository_options.go
Normal file
164
pkg/installer/repository_options.go
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@sabayon.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
)
|
||||
|
||||
type RepositoryOption func(cfg *RepositoryConfig) error
|
||||
|
||||
type RepositoryConfig struct {
|
||||
Name, Description, Type string
|
||||
Urls []string
|
||||
Priority int
|
||||
Src string
|
||||
Tree []string
|
||||
DB pkg.PackageDatabase
|
||||
CompilerBackend compiler.CompilerBackend
|
||||
ImagePrefix string
|
||||
|
||||
config *config.LuetConfig
|
||||
PushImages, Force, FromRepository, FromMetadata bool
|
||||
}
|
||||
|
||||
// Apply applies the given options to the config, returning the first error
|
||||
// encountered (if any).
|
||||
func (cfg *RepositoryConfig) Apply(opts ...RepositoryOption) error {
|
||||
for _, opt := range opts {
|
||||
if opt == nil {
|
||||
continue
|
||||
}
|
||||
if err := opt(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithConfig(c *config.LuetConfig) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.config = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithDatabase(b pkg.PackageDatabase) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.DB = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithCompilerBackend(b compiler.CompilerBackend) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.CompilerBackend = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithTree(s ...string) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.Tree = append(cfg.Tree, s...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithUrls(s ...string) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.Urls = append(cfg.Urls, s...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSource(s string) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.Src = s
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithName(s string) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.Name = s
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithDescription(s string) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.Description = s
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithType(s string) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.Type = s
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithImagePrefix(s string) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.ImagePrefix = s
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithPushImages(b bool) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.PushImages = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithForce(b bool) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.Force = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// FromRepository when enabled
|
||||
// considers packages metadata
|
||||
// from remote repositories when building
|
||||
// the new repository index
|
||||
func FromRepository(b bool) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.FromRepository = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// FromMetadata when enabled
|
||||
// considers packages metadata
|
||||
// when building repository indexes
|
||||
func FromMetadata(b bool) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.FromMetadata = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithPriority(b int) func(cfg *RepositoryConfig) error {
|
||||
return func(cfg *RepositoryConfig) error {
|
||||
cfg.Priority = b
|
||||
return nil
|
||||
}
|
||||
}
|
@@ -26,28 +26,33 @@ import (
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func dockerStubRepo(tmpdir, tree, image string, push, force bool) (*LuetSystemRepository, error) {
|
||||
return GenerateRepository(
|
||||
"test",
|
||||
"description",
|
||||
"docker",
|
||||
[]string{image},
|
||||
1,
|
||||
tmpdir,
|
||||
[]string{tree},
|
||||
pkg.NewInMemoryDatabase(false), backend.NewSimpleDockerBackend(), image, push, force, false, nil)
|
||||
WithName("test"),
|
||||
WithDescription("description"),
|
||||
WithType("docker"),
|
||||
WithUrls(image),
|
||||
WithPriority(1),
|
||||
WithSource(tmpdir),
|
||||
WithTree(tree),
|
||||
WithDatabase(pkg.NewInMemoryDatabase(false)),
|
||||
WithCompilerBackend(backend.NewSimpleDockerBackend()),
|
||||
WithImagePrefix(image),
|
||||
WithPushImages(push),
|
||||
WithForce(force))
|
||||
}
|
||||
|
||||
var _ = Describe("Repository", func() {
|
||||
@@ -83,34 +88,34 @@ var _ = Describe("Repository", func() {
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Generate repository metadata of files ONLY referenced in a tree", func() {
|
||||
@@ -156,41 +161,41 @@ var _ = Describe("Repository", func() {
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
artifact2, err := compiler2.Compile(false, spec2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact2.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(artifact2.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact2.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec2.Rel("alpine-seed-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec2.Rel("alpine-seed-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("alpine-seed-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("alpine-seed-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
|
||||
// We check now that the artifact not referenced in the tree
|
||||
// (spec2) is not indexed in the repository
|
||||
@@ -209,6 +214,114 @@ urls:
|
||||
_, err = repos.GetTree().GetDatabase().FindPackage(spec2.GetPackage())
|
||||
Expect(err).To(HaveOccurred()) // should throw error
|
||||
})
|
||||
|
||||
It("Generate repository metadata of files referenced in a tree and from packages", func() {
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
generalRecipe2 := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err = generalRecipe2.Load("../../tests/fixtures/finalizers")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe2.GetDatabase().GetPackages())).To(Equal(1))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler2 := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe2.GetDatabase())
|
||||
spec2, err := compiler2.FromPackage(&pkg.DefaultPackage{Name: "alpine", Category: "seed", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
Expect(spec2.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err = ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
Expect(spec.BuildSteps()).To(Equal([]string{"echo artifact5 > /test5", "echo artifact6 > /test6", "chmod +x generate.sh", "./generate.sh"}))
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
|
||||
artifact, err := compiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
artifact2, err := compiler2.Compile(false, spec2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(artifact2.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact2.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := fileHelper.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := fileHelper.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
// will contain both
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("alpine-seed-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec2.Rel("alpine-seed-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := GenerateRepository(
|
||||
WithName("test"),
|
||||
WithDescription("description"),
|
||||
WithType("disk"),
|
||||
WithUrls(tmpdir),
|
||||
WithPriority(1),
|
||||
WithSource(tmpdir),
|
||||
FromMetadata(true), // Enabling from metadata makes the package visible
|
||||
WithTree("../../tests/fixtures/buildable"),
|
||||
WithDatabase(pkg.NewInMemoryDatabase(false)),
|
||||
)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
|
||||
// We check now that the artifact not referenced in the tree
|
||||
// (spec2) is not indexed in the repository
|
||||
repository, err := NewLuetSystemRepositoryFromYaml([]byte(`
|
||||
name: "test"
|
||||
type: "disk"
|
||||
urls:
|
||||
- "`+tmpdir+`"
|
||||
`), pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
repos, err := repository.Sync(true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = repos.GetTree().GetDatabase().FindPackage(spec.GetPackage())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, err = repos.GetTree().GetDatabase().FindPackage(spec2.GetPackage())
|
||||
Expect(err).ToNot(HaveOccurred()) // should NOT throw error
|
||||
})
|
||||
})
|
||||
Context("Matching packages", func() {
|
||||
It("Matches packages in different repositories by priority", func() {
|
||||
@@ -267,18 +380,18 @@ urls:
|
||||
|
||||
a, err := localcompiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(a.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(a.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(a.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
repo, err := dockerStubRepo(tmpdir, "../../tests/fixtures/buildable", repoImage, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(repoImage, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -295,7 +408,7 @@ urls:
|
||||
|
||||
f, err := c.DownloadFile("repository.yaml")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(f)).To(ContainSubstring("name: test"))
|
||||
Expect(fileHelper.Read(f)).To(ContainSubstring("name: test"))
|
||||
|
||||
a, err = c.DownloadArtifact(&artifact.PackageArtifact{
|
||||
Path: "test.tar",
|
||||
@@ -310,7 +423,7 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(a.Unpack(extracted, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(filepath.Join(extracted, "test6"))).To(Equal("artifact6\n"))
|
||||
Expect(fileHelper.Read(filepath.Join(extracted, "test6"))).To(Equal("artifact6\n"))
|
||||
})
|
||||
|
||||
It("generates images of virtual packages", func() {
|
||||
@@ -341,15 +454,15 @@ urls:
|
||||
|
||||
a, err := localcompiler.Compile(false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(a.Path)).To(BeTrue())
|
||||
Expect(fileHelper.Exists(a.Path)).To(BeTrue())
|
||||
Expect(helpers.Untar(a.Path, tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
repo, err := dockerStubRepo(tmpdir, "../../tests/fixtures/virtuals", repoImage, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_SPECFILE))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(repoImage, false, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -366,7 +479,7 @@ urls:
|
||||
|
||||
f, err := c.DownloadFile("repository.yaml")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(f)).To(ContainSubstring("name: test"))
|
||||
Expect(fileHelper.Read(f)).To(ContainSubstring("name: test"))
|
||||
|
||||
a, err = c.DownloadArtifact(&artifact.PackageArtifact{
|
||||
Path: "test.tar",
|
||||
@@ -382,7 +495,7 @@ urls:
|
||||
|
||||
Expect(a.Unpack(extracted, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.DirectoryIsEmpty(extracted)).To(BeFalse())
|
||||
Expect(fileHelper.DirectoryIsEmpty(extracted)).To(BeFalse())
|
||||
content, err := ioutil.ReadFile(filepath.Join(extracted, ".virtual"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
|
@@ -1,16 +1,21 @@
|
||||
package installer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
)
|
||||
|
||||
type System struct {
|
||||
Database pkg.PackageDatabase
|
||||
Target string
|
||||
Database pkg.PackageDatabase
|
||||
Target string
|
||||
fileIndex map[string]pkg.Package
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (s *System) World() (pkg.Packages, error) {
|
||||
@@ -23,8 +28,8 @@ func (s *System) ExecuteFinalizers(packs []pkg.Package) error {
|
||||
var errs error
|
||||
executedFinalizer := map[string]bool{}
|
||||
for _, p := range packs {
|
||||
if helpers.Exists(p.Rel(tree.FinalizerFile)) {
|
||||
out, err := helpers.RenderFiles(p.Rel(tree.FinalizerFile), p.Rel(tree.DefinitionFile))
|
||||
if fileHelper.Exists(p.Rel(tree.FinalizerFile)) {
|
||||
out, err := helpers.RenderFiles(helpers.ChartFile(p.Rel(tree.FinalizerFile)), p.Rel(pkg.PackageDefinitionFile))
|
||||
if err != nil {
|
||||
Warning("Failed rendering finalizer for ", p.HumanReadableString(), err.Error())
|
||||
errs = multierror.Append(errs, err)
|
||||
@@ -51,3 +56,38 @@ func (s *System) ExecuteFinalizers(packs []pkg.Package) error {
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (s *System) buildFileIndex() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
// Check if cache is empty or if it got modified
|
||||
if s.fileIndex == nil { //|| len(s.Database.GetPackages()) != len(s.fileIndex) {
|
||||
s.fileIndex = make(map[string]pkg.Package)
|
||||
for _, p := range s.Database.World() {
|
||||
files, _ := s.Database.GetPackageFiles(p)
|
||||
for _, f := range files {
|
||||
s.fileIndex[f] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *System) Clean() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.fileIndex = nil
|
||||
}
|
||||
|
||||
func (s *System) ExistsPackageFile(file string) (bool, pkg.Package, error) {
|
||||
Debug("Checking if file ", file, "belongs to any package")
|
||||
s.buildFileIndex()
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if p, exists := s.fileIndex[file]; exists {
|
||||
Debug(file, "belongs already to", p.HumanReadableString())
|
||||
|
||||
return exists, p, nil
|
||||
}
|
||||
Debug(file, "doesn't belong to any package")
|
||||
return false, nil, nil
|
||||
}
|
||||
|
70
pkg/installer/system_test.go
Normal file
70
pkg/installer/system_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer_test
|
||||
|
||||
import (
|
||||
|
||||
// . "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("System", func() {
|
||||
Context("Files", func() {
|
||||
var s *System
|
||||
var db pkg.PackageDatabase
|
||||
var a, b *pkg.DefaultPackage
|
||||
|
||||
BeforeEach(func() {
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
s = &System{Database: db}
|
||||
|
||||
a = &pkg.DefaultPackage{Name: "test", Version: "1", Category: "t"}
|
||||
|
||||
db.CreatePackage(a)
|
||||
db.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: a.GetFingerPrint(), Files: []string{"foo", "f"}})
|
||||
|
||||
b = &pkg.DefaultPackage{Name: "test2", Version: "1", Category: "t"}
|
||||
|
||||
db.CreatePackage(b)
|
||||
db.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: b.GetFingerPrint(), Files: []string{"barz", "f"}})
|
||||
})
|
||||
|
||||
It("detects when are already shipped by other packages", func() {
|
||||
r, p, err := s.ExistsPackageFile("foo")
|
||||
Expect(r).To(BeTrue())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p).To(Equal(a))
|
||||
r, p, err = s.ExistsPackageFile("baz")
|
||||
Expect(r).To(BeFalse())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p).To(BeNil())
|
||||
|
||||
r, p, err = s.ExistsPackageFile("f")
|
||||
Expect(r).To(BeTrue())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p).To(Equal(b))
|
||||
r, p, err = s.ExistsPackageFile("barz")
|
||||
Expect(r).To(BeTrue())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p).To(Equal(b))
|
||||
})
|
||||
})
|
||||
})
|
@@ -143,6 +143,7 @@ func (db *InMemoryDatabase) getRevdeps(p Package, visited map[string]interface{}
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
for _, pp := range packs {
|
||||
// db.Lock()
|
||||
list := db.RevDepsDatabase[pp.GetFingerPrint()]
|
||||
@@ -195,6 +196,14 @@ func (db *InMemoryDatabase) CreatePackage(p Package) (string, error) {
|
||||
return ID, nil
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) updateRevDep(k, v string, b Package) {
|
||||
_, ok := db.RevDepsDatabase[k]
|
||||
if !ok {
|
||||
db.RevDepsDatabase[k] = make(map[string]Package)
|
||||
}
|
||||
db.RevDepsDatabase[k][v] = b.Clone()
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) populateCaches(p Package) {
|
||||
pd, _ := p.(*DefaultPackage)
|
||||
|
||||
@@ -226,20 +235,19 @@ func (db *InMemoryDatabase) populateCaches(p Package) {
|
||||
}
|
||||
db.CacheNoVersion[p.GetPackageName()][p.GetVersion()] = nil
|
||||
|
||||
db.Unlock()
|
||||
|
||||
// Updating Revdeps
|
||||
// Given that when we populate the cache we don't have the full db at hand
|
||||
// We cycle over reverse dependency of a package to update their entry if they are matching
|
||||
// the version selector
|
||||
db.Lock()
|
||||
toUpdate, ok := db.RevDepsDatabase[pd.GetPackageName()]
|
||||
if ok {
|
||||
for _, pp := range toUpdate {
|
||||
for _, re := range pp.GetRequires() {
|
||||
if match, _ := pd.VersionMatchSelector(re.GetVersion(), nil); match {
|
||||
_, ok = db.RevDepsDatabase[pd.GetFingerPrint()]
|
||||
if !ok {
|
||||
db.RevDepsDatabase[pd.GetFingerPrint()] = make(map[string]Package)
|
||||
}
|
||||
db.RevDepsDatabase[pd.GetFingerPrint()][pp.GetFingerPrint()] = pp
|
||||
db.updateRevDep(pd.GetFingerPrint(), pp.GetFingerPrint(), pp)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -249,30 +257,12 @@ func (db *InMemoryDatabase) populateCaches(p Package) {
|
||||
for _, re := range pd.GetRequires() {
|
||||
packages, _ := db.FindPackages(re)
|
||||
db.Lock()
|
||||
|
||||
for _, pa := range packages {
|
||||
_, ok := db.RevDepsDatabase[pa.GetFingerPrint()]
|
||||
if !ok {
|
||||
db.RevDepsDatabase[pa.GetFingerPrint()] = make(map[string]Package)
|
||||
}
|
||||
db.RevDepsDatabase[pa.GetFingerPrint()][pd.GetFingerPrint()] = pd
|
||||
_, ok = db.RevDepsDatabase[pa.GetPackageName()]
|
||||
if !ok {
|
||||
db.RevDepsDatabase[pa.GetPackageName()] = make(map[string]Package)
|
||||
}
|
||||
db.RevDepsDatabase[pa.GetPackageName()][pd.GetPackageName()] = pd
|
||||
db.updateRevDep(pa.GetFingerPrint(), pd.GetFingerPrint(), pd)
|
||||
db.updateRevDep(pa.GetPackageName(), pd.GetPackageName(), pd)
|
||||
}
|
||||
_, ok := db.RevDepsDatabase[re.GetFingerPrint()]
|
||||
if !ok {
|
||||
db.RevDepsDatabase[re.GetFingerPrint()] = make(map[string]Package)
|
||||
}
|
||||
db.RevDepsDatabase[re.GetFingerPrint()][pd.GetFingerPrint()] = pd
|
||||
_, ok = db.RevDepsDatabase[re.GetPackageName()]
|
||||
if !ok {
|
||||
db.RevDepsDatabase[re.GetPackageName()] = make(map[string]Package)
|
||||
}
|
||||
db.RevDepsDatabase[re.GetPackageName()][pd.GetPackageName()] = pd
|
||||
|
||||
db.updateRevDep(re.GetFingerPrint(), pd.GetFingerPrint(), pd)
|
||||
db.updateRevDep(re.GetPackageName(), pd.GetPackageName(), pd)
|
||||
db.Unlock()
|
||||
}
|
||||
}
|
||||
|
@@ -21,12 +21,16 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers/docker"
|
||||
"github.com/mudler/luet/pkg/helpers/match"
|
||||
version "github.com/mudler/luet/pkg/versioner"
|
||||
|
||||
gentoo "github.com/Sabayon/pkgs-checker/pkg/gentoo"
|
||||
@@ -118,9 +122,17 @@ type Package interface {
|
||||
SetTreeDir(s string)
|
||||
GetTreeDir() string
|
||||
|
||||
Mark() Package
|
||||
|
||||
JSON() ([]byte, error)
|
||||
}
|
||||
|
||||
const (
|
||||
PackageMetaSuffix = "metadata.yaml"
|
||||
PackageCollectionFile = "collection.yaml"
|
||||
PackageDefinitionFile = "definition.yaml"
|
||||
)
|
||||
|
||||
type Tree interface {
|
||||
GetPackageSet() PackageDatabase
|
||||
Prelude() string // A tree might have a prelude to be able to consume a tree
|
||||
@@ -131,6 +143,31 @@ type Tree interface {
|
||||
|
||||
type Packages []Package
|
||||
|
||||
type DefaultPackages []*DefaultPackage
|
||||
|
||||
type PackageMap map[string]Package
|
||||
|
||||
func (pm PackageMap) String() string {
|
||||
rr := []string{}
|
||||
for _, r := range pm {
|
||||
|
||||
rr = append(rr, r.HumanReadableString())
|
||||
|
||||
}
|
||||
return fmt.Sprint(rr)
|
||||
}
|
||||
|
||||
func (d DefaultPackages) Hash(salt string) string {
|
||||
|
||||
overallFp := ""
|
||||
for _, c := range d {
|
||||
overallFp = overallFp + c.HashFingerprint("join")
|
||||
}
|
||||
h := md5.New()
|
||||
io.WriteString(h, fmt.Sprintf("%s-%s", overallFp, salt))
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
// >> Unmarshallers
|
||||
// DefaultPackageFromYaml decodes a package from yaml bytes
|
||||
func DefaultPackageFromYaml(yml []byte) (DefaultPackage, error) {
|
||||
@@ -185,11 +222,14 @@ func GetRawPackages(yml []byte) (rawPackages, error) {
|
||||
return rawPackages.Packages, nil
|
||||
|
||||
}
|
||||
func DefaultPackagesFromYaml(yml []byte) ([]DefaultPackage, error) {
|
||||
|
||||
var unescaped struct {
|
||||
Packages []DefaultPackage `json:"packages"`
|
||||
}
|
||||
type Collection struct {
|
||||
Packages []DefaultPackage `json:"packages"`
|
||||
}
|
||||
|
||||
func DefaultPackagesFromYAML(yml []byte) ([]DefaultPackage, error) {
|
||||
|
||||
var unescaped Collection
|
||||
source, err := yaml.YAMLToJSON(yml)
|
||||
if err != nil {
|
||||
return []DefaultPackage{}, err
|
||||
@@ -218,7 +258,7 @@ func (t *DefaultPackage) JSON() ([]byte, error) {
|
||||
|
||||
// GetMetadataFilePath returns the canonical name of an artifact metadata file
|
||||
func (d *DefaultPackage) GetMetadataFilePath() string {
|
||||
return d.GetFingerPrint() + ".metadata.yaml"
|
||||
return fmt.Sprintf("%s.%s", d.GetFingerPrint(), PackageMetaSuffix)
|
||||
}
|
||||
|
||||
// DefaultPackage represent a standard package definition
|
||||
@@ -309,7 +349,7 @@ func (p *DefaultPackage) GetPackageName() string {
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) ImageID() string {
|
||||
return helpers.StripInvalidStringsFromImage(p.GetFingerPrint())
|
||||
return docker.StripInvalidStringsFromImage(p.GetFingerPrint())
|
||||
}
|
||||
|
||||
// GetBuildTimestamp returns the package build timestamp
|
||||
@@ -344,19 +384,23 @@ func (p *DefaultPackage) IsHidden() bool {
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) HasLabel(label string) bool {
|
||||
return helpers.MapHasKey(&p.Labels, label)
|
||||
return match.MapHasKey(&p.Labels, label)
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) MatchLabel(r *regexp.Regexp) bool {
|
||||
return helpers.MapMatchRegex(&p.Labels, r)
|
||||
return match.MapMatchRegex(&p.Labels, r)
|
||||
}
|
||||
|
||||
func (p DefaultPackage) IsCollection() bool {
|
||||
return fileHelper.Exists(filepath.Join(p.Path, PackageCollectionFile))
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) HasAnnotation(label string) bool {
|
||||
return helpers.MapHasKey(&p.Annotations, label)
|
||||
return match.MapHasKey(&p.Annotations, label)
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) MatchAnnotation(r *regexp.Regexp) bool {
|
||||
return helpers.MapMatchRegex(&p.Annotations, r)
|
||||
return match.MapMatchRegex(&p.Annotations, r)
|
||||
}
|
||||
|
||||
// AddUse adds a use to a package
|
||||
@@ -492,6 +536,12 @@ func (p *DefaultPackage) Matches(m Package) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) Mark() Package {
|
||||
marked := p.Clone()
|
||||
marked.SetName("@@" + marked.GetName())
|
||||
return marked
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) Expand(definitiondb PackageDatabase) (Packages, error) {
|
||||
var versionsInWorld Packages
|
||||
|
||||
@@ -667,6 +717,39 @@ func (set Packages) Unique() Packages {
|
||||
return result
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) GetRuntimePackage() (*DefaultPackage, error) {
|
||||
var r *DefaultPackage
|
||||
if p.IsCollection() {
|
||||
collectionFile := filepath.Join(p.Path, PackageCollectionFile)
|
||||
dat, err := ioutil.ReadFile(collectionFile)
|
||||
if err != nil {
|
||||
return r, errors.Wrapf(err, "failed while reading '%s'", collectionFile)
|
||||
}
|
||||
coll, err := DefaultPackagesFromYAML(dat)
|
||||
if err != nil {
|
||||
return r, errors.Wrapf(err, "failed while parsing YAML '%s'", collectionFile)
|
||||
}
|
||||
for _, c := range coll {
|
||||
if c.Matches(p) {
|
||||
r = &c
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
definitionFile := filepath.Join(p.Path, PackageDefinitionFile)
|
||||
dat, err := ioutil.ReadFile(definitionFile)
|
||||
if err != nil {
|
||||
return r, errors.Wrapf(err, "failed while reading '%s'", definitionFile)
|
||||
}
|
||||
d, err := DefaultPackageFromYaml(dat)
|
||||
if err != nil {
|
||||
return r, errors.Wrapf(err, "failed while parsing YAML '%s'", definitionFile)
|
||||
}
|
||||
r = &d
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (pack *DefaultPackage) buildFormula(definitiondb PackageDatabase, db PackageDatabase, visited map[string]interface{}) ([]bf.Formula, error) {
|
||||
if _, ok := visited[pack.HumanReadableString()]; ok {
|
||||
return nil, nil
|
||||
|
@@ -260,24 +260,42 @@ func (a PackagesAssertions) TrueLen() int {
|
||||
// and checks it's not the only one. if it's unique it marks it specially - so the hash
|
||||
// which is generated is unique for the selected package
|
||||
func (assertions PackagesAssertions) HashFrom(p pkg.Package) string {
|
||||
return assertions.SaltedHashFrom(p, map[string]string{})
|
||||
}
|
||||
|
||||
func (assertions PackagesAssertions) AssertionHash() string {
|
||||
return assertions.SaltedAssertionHash(map[string]string{})
|
||||
}
|
||||
|
||||
func (assertions PackagesAssertions) SaltedHashFrom(p pkg.Package, salts map[string]string) string {
|
||||
var assertionhash string
|
||||
|
||||
// When we don't have any solution to hash for, we need to generate an UUID by ourselves
|
||||
latestsolution := assertions.Drop(p)
|
||||
if latestsolution.TrueLen() == 0 {
|
||||
assertionhash = assertions.Mark(p).AssertionHash()
|
||||
// Preserve the hash if supplied of marked packages
|
||||
marked := p.Mark()
|
||||
if markedHash, exists := salts[p.GetFingerPrint()]; exists {
|
||||
salts[marked.GetFingerPrint()] = markedHash
|
||||
}
|
||||
assertionhash = assertions.Mark(p).SaltedAssertionHash(salts)
|
||||
} else {
|
||||
assertionhash = latestsolution.AssertionHash()
|
||||
assertionhash = latestsolution.SaltedAssertionHash(salts)
|
||||
}
|
||||
return assertionhash
|
||||
}
|
||||
|
||||
func (assertions PackagesAssertions) AssertionHash() string {
|
||||
func (assertions PackagesAssertions) SaltedAssertionHash(salts map[string]string) string {
|
||||
var fingerprint string
|
||||
for _, assertion := range assertions { // Note: Always order them first!
|
||||
if assertion.Value { // Tke into account only dependencies installed (get fingerprint of subgraph)
|
||||
fingerprint += assertion.ToString() + "\n"
|
||||
salt, exists := salts[assertion.Package.GetFingerPrint()]
|
||||
if exists {
|
||||
fingerprint += assertion.ToString() + salt + "\n"
|
||||
|
||||
} else {
|
||||
fingerprint += assertion.ToString() + "\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
hash := sha256.Sum256([]byte(fingerprint))
|
||||
@@ -316,8 +334,7 @@ func (assertions PackagesAssertions) Mark(p pkg.Package) PackagesAssertions {
|
||||
|
||||
for _, a := range assertions {
|
||||
if a.Package.Matches(p) {
|
||||
marked := a.Package.Clone()
|
||||
marked.SetName("@@" + marked.GetName())
|
||||
marked := a.Package.Mark()
|
||||
a = PackageAssert{Package: marked.(*pkg.DefaultPackage), Value: a.Value, Hash: a.Hash}
|
||||
}
|
||||
ass = append(ass, a)
|
||||
|
@@ -382,6 +382,9 @@ var _ = Describe("Decoder", func() {
|
||||
|
||||
Expect(solution.HashFrom(X)).ToNot(Equal(solution2.HashFrom(F)))
|
||||
Expect(solution3.HashFrom(D)).To(Equal(solution.HashFrom(X)))
|
||||
Expect(solution3.SaltedHashFrom(D, map[string]string{D.GetFingerPrint(): "foo"})).ToNot(Equal(solution3.HashFrom(D)))
|
||||
|
||||
Expect(solution4.SaltedHashFrom(Y, map[string]string{X.GetFingerPrint(): "foo"})).ToNot(Equal(solution4.HashFrom(Y)))
|
||||
|
||||
Expect(empty.AssertionHash()).ToNot(Equal(solution3.HashFrom(D)))
|
||||
Expect(empty.AssertionHash()).ToNot(Equal(solution2.HashFrom(F)))
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// Copyright © 2020-2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
@@ -16,11 +16,16 @@
|
||||
package solver
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/crillab/gophersat/bf"
|
||||
"github.com/crillab/gophersat/explain"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
@@ -51,18 +56,115 @@ const (
|
||||
QLearningResolverType = "qlearning"
|
||||
)
|
||||
|
||||
//. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
// PackageResolver assists PackageSolver on unsat cases
|
||||
type PackageResolver interface {
|
||||
Solve(bf.Formula, PackageSolver) (PackagesAssertions, error)
|
||||
}
|
||||
|
||||
type DummyPackageResolver struct {
|
||||
type Explainer struct{}
|
||||
|
||||
func decodeDimacs(vars map[string]string, dimacs string) (string, error) {
|
||||
res := ""
|
||||
sc := bufio.NewScanner(bytes.NewBufferString(dimacs))
|
||||
lines := strings.Split(dimacs, "\n")
|
||||
linenum := 1
|
||||
SCAN:
|
||||
for sc.Scan() {
|
||||
|
||||
line := sc.Text()
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
switch fields[0] {
|
||||
case "p":
|
||||
continue SCAN
|
||||
default:
|
||||
for i := 0; i < len(fields)-1; i++ {
|
||||
v := fields[i]
|
||||
negative := false
|
||||
if strings.HasPrefix(fields[i], "-") {
|
||||
v = strings.TrimLeft(fields[i], "-")
|
||||
negative = true
|
||||
}
|
||||
variable := vars[v]
|
||||
if negative {
|
||||
res += fmt.Sprintf("!(%s)", variable)
|
||||
} else {
|
||||
res += variable
|
||||
}
|
||||
|
||||
if i != len(fields)-2 {
|
||||
res += fmt.Sprintf(" or ")
|
||||
}
|
||||
}
|
||||
if linenum != len(lines)-1 {
|
||||
res += fmt.Sprintf(" and \n")
|
||||
}
|
||||
}
|
||||
linenum++
|
||||
}
|
||||
if err := sc.Err(); err != nil {
|
||||
return res, fmt.Errorf("could not parse problem: %v", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (*DummyPackageResolver) Solve(bf.Formula, PackageSolver) (PackagesAssertions, error) {
|
||||
return nil, errors.New("Could not satisfy the constraints. Try again by removing deps ")
|
||||
func parseVars(r io.Reader) (map[string]string, error) {
|
||||
sc := bufio.NewScanner(r)
|
||||
res := map[string]string{}
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) == 0 {
|
||||
continue
|
||||
}
|
||||
switch fields[0] {
|
||||
case "c":
|
||||
data := strings.Split(fields[1], "=")
|
||||
res[data[1]] = data[0]
|
||||
|
||||
default:
|
||||
continue
|
||||
|
||||
}
|
||||
}
|
||||
if err := sc.Err(); err != nil {
|
||||
return nil, fmt.Errorf("could not parse problem: %v", err)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Solve tries to find the MUS (minimum unsat) formula from the original problem.
|
||||
// it returns an error with the decoded dimacs
|
||||
func (*Explainer) Solve(f bf.Formula, s PackageSolver) (PackagesAssertions, error) {
|
||||
buf := bytes.NewBufferString("")
|
||||
if err := bf.Dimacs(f, buf); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot extract dimacs from formula")
|
||||
}
|
||||
|
||||
copy := *buf
|
||||
|
||||
pb, err := explain.ParseCNF(©)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse problem")
|
||||
}
|
||||
pb2, err := pb.MUS()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not extract subset")
|
||||
}
|
||||
|
||||
variables, err := parseVars(buf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse variables")
|
||||
}
|
||||
|
||||
res, err := decodeDimacs(variables, pb2.CNF())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not parse dimacs")
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not satisfy the constraints: \n%s", res)
|
||||
}
|
||||
|
||||
type QLearningResolver struct {
|
||||
@@ -103,8 +205,8 @@ func (resolver *QLearningResolver) Solve(f bf.Formula, s PackageSolver) (Package
|
||||
// Info("Using QLearning solver to resolve conflicts. Please be patient.")
|
||||
resolver.Solver = s
|
||||
|
||||
s.SetResolver(&DummyPackageResolver{}) // Set dummy. Otherwise the attempts will run again a QLearning instance.
|
||||
defer s.SetResolver(resolver) // Set back ourselves as resolver
|
||||
s.SetResolver(&Explainer{}) // Set dummy. Otherwise the attempts will run again a QLearning instance.
|
||||
defer s.SetResolver(resolver) // Set back ourselves as resolver
|
||||
|
||||
resolver.Formula = f
|
||||
|
||||
|
@@ -38,7 +38,7 @@ var _ = Describe("Resolver", func() {
|
||||
})
|
||||
|
||||
Context("Conflict set", func() {
|
||||
Context("DummyPackageResolver", func() {
|
||||
Context("Explainer", func() {
|
||||
It("is unsolvable - as we something we ask to install conflict with system stuff", func() {
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{C})
|
||||
@@ -152,7 +152,7 @@ var _ = Describe("Resolver", func() {
|
||||
})
|
||||
})
|
||||
|
||||
Context("DummyPackageResolver", func() {
|
||||
Context("Explainer", func() {
|
||||
It("cannot find a solution", func() {
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{C})
|
||||
@@ -171,6 +171,11 @@ var _ = Describe("Resolver", func() {
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A, D})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(Equal(`could not satisfy the constraints:
|
||||
A-- and
|
||||
C-- and
|
||||
!(A--) or B-- and
|
||||
!(B--) or !(C--)`))
|
||||
|
||||
Expect(len(solution)).To(Equal(0))
|
||||
})
|
||||
|
@@ -64,14 +64,14 @@ type Solver struct {
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Type SolverType
|
||||
Concurrency int
|
||||
Type SolverType `yaml:"type,omitempty"`
|
||||
Concurrency int `yaml:"concurrency,omitempty"`
|
||||
}
|
||||
|
||||
// NewSolver accepts as argument two lists of packages, the first is the initial set,
|
||||
// the second represent all the known packages.
|
||||
func NewSolver(t Options, installed pkg.PackageDatabase, definitiondb pkg.PackageDatabase, solverdb pkg.PackageDatabase) PackageSolver {
|
||||
return NewResolver(t, installed, definitiondb, solverdb, &DummyPackageResolver{})
|
||||
return NewResolver(t, installed, definitiondb, solverdb, &Explainer{})
|
||||
}
|
||||
|
||||
// NewResolver accepts as argument two lists of packages, the first is the initial set,
|
||||
|
@@ -26,6 +26,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -61,7 +62,7 @@ type CompilerRecipe struct {
|
||||
// and the build context required for reproducible builds
|
||||
func (r *CompilerRecipe) Save(path string) error {
|
||||
for _, p := range r.SourcePath {
|
||||
if err := helpers.CopyDir(p, filepath.Join(path, filepath.Base(p))); err != nil {
|
||||
if err := fileHelper.CopyDir(p, filepath.Join(path, filepath.Base(p))); err != nil {
|
||||
return errors.Wrap(err, "while copying source tree")
|
||||
}
|
||||
}
|
||||
@@ -75,6 +76,10 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
c, err := helpers.ChartFiles([]string{filepath.Join(path, "templates")})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//r.Tree().SetPackageSet(pkg.NewBoltDatabase(tmpfile.Name()))
|
||||
// TODO: Handle cleaning after? Cleanup implemented in GetPackageSet().Clean()
|
||||
@@ -85,12 +90,12 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
return errors.Wrap(err, "Error on walk path "+currentpath)
|
||||
}
|
||||
|
||||
if info.Name() != DefinitionFile && info.Name() != CollectionFile {
|
||||
if info.Name() != pkg.PackageDefinitionFile && info.Name() != pkg.PackageCollectionFile {
|
||||
return nil // Skip with no errors
|
||||
}
|
||||
|
||||
switch info.Name() {
|
||||
case DefinitionFile:
|
||||
case pkg.PackageDefinitionFile:
|
||||
|
||||
pack, err := ReadDefinitionFile(currentpath)
|
||||
if err != nil {
|
||||
@@ -102,9 +107,8 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
|
||||
// Instead of rdeps, have a different tree for build deps.
|
||||
compileDefPath := pack.Rel(CompilerDefinitionFile)
|
||||
if helpers.Exists(compileDefPath) {
|
||||
|
||||
dat, err := helpers.RenderFiles(compileDefPath, currentpath)
|
||||
if fileHelper.Exists(compileDefPath) {
|
||||
dat, err := helpers.RenderFiles(append(c, helpers.ChartFile(compileDefPath)...), currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err,
|
||||
"Error templating file "+CompilerDefinitionFile+" from "+
|
||||
@@ -126,14 +130,14 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
return errors.Wrap(err, "Error creating package "+pack.GetName())
|
||||
}
|
||||
|
||||
case CollectionFile:
|
||||
case pkg.PackageCollectionFile:
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
|
||||
packs, err := pkg.DefaultPackagesFromYaml(dat)
|
||||
packs, err := pkg.DefaultPackagesFromYAML(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
@@ -149,14 +153,14 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
|
||||
// Instead of rdeps, have a different tree for build deps.
|
||||
compileDefPath := pack.Rel(CompilerDefinitionFile)
|
||||
if helpers.Exists(compileDefPath) {
|
||||
if fileHelper.Exists(compileDefPath) {
|
||||
|
||||
raw := packsRaw.Find(pack.GetName(), pack.GetCategory(), pack.GetVersion())
|
||||
buildyaml, err := ioutil.ReadFile(compileDefPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
dat, err := helpers.RenderHelm(string(buildyaml), raw, map[string]interface{}{})
|
||||
dat, err := helpers.RenderHelm(append(c, helpers.ChartFileB(buildyaml)...), raw, map[string]interface{}{})
|
||||
if err != nil {
|
||||
return errors.Wrap(err,
|
||||
"Error templating file "+CompilerDefinitionFile+" from "+
|
||||
@@ -182,7 +186,7 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(path, ff)
|
||||
err = filepath.Walk(path, ff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -26,8 +26,9 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -55,14 +56,14 @@ func (r *InstallerRecipe) Save(path string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(dir, DefinitionFile), data, 0644)
|
||||
err = ioutil.WriteFile(filepath.Join(dir, pkg.PackageDefinitionFile), data, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Instead of rdeps, have a different tree for build deps.
|
||||
finalizerPath := p.Rel(FinalizerFile)
|
||||
if helpers.Exists(finalizerPath) { // copy finalizer file from the source tree
|
||||
helpers.CopyFile(finalizerPath, filepath.Join(dir, FinalizerFile))
|
||||
if fileHelper.Exists(finalizerPath) { // copy finalizer file from the source tree
|
||||
fileHelper.CopyFile(finalizerPath, filepath.Join(dir, FinalizerFile))
|
||||
}
|
||||
|
||||
}
|
||||
@@ -71,7 +72,7 @@ func (r *InstallerRecipe) Save(path string) error {
|
||||
|
||||
func (r *InstallerRecipe) Load(path string) error {
|
||||
|
||||
if !helpers.Exists(path) {
|
||||
if !fileHelper.Exists(path) {
|
||||
return errors.New(fmt.Sprintf(
|
||||
"Path %s doesn't exit.", path,
|
||||
))
|
||||
@@ -85,7 +86,7 @@ func (r *InstallerRecipe) Load(path string) error {
|
||||
// the function that handles each file or dir
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
|
||||
if info.Name() != DefinitionFile && info.Name() != CollectionFile {
|
||||
if info.Name() != pkg.PackageDefinitionFile && info.Name() != pkg.PackageCollectionFile {
|
||||
return nil // Skip with no errors
|
||||
}
|
||||
|
||||
@@ -95,7 +96,7 @@ func (r *InstallerRecipe) Load(path string) error {
|
||||
}
|
||||
|
||||
switch info.Name() {
|
||||
case DefinitionFile:
|
||||
case pkg.PackageDefinitionFile:
|
||||
pack, err := pkg.DefaultPackageFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
@@ -108,8 +109,8 @@ func (r *InstallerRecipe) Load(path string) error {
|
||||
return errors.Wrap(err, "Error creating package "+pack.GetName())
|
||||
}
|
||||
|
||||
case CollectionFile:
|
||||
packs, err := pkg.DefaultPackagesFromYaml(dat)
|
||||
case pkg.PackageCollectionFile:
|
||||
packs, err := pkg.DefaultPackagesFromYAML(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
|
@@ -26,15 +26,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
spectooling "github.com/mudler/luet/pkg/spectooling"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
DefinitionFile = "definition.yaml"
|
||||
CollectionFile = "collection.yaml"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func NewGeneralRecipe(db pkg.PackageDatabase) Builder { return &Recipe{Database: db} }
|
||||
@@ -63,7 +59,7 @@ func (r *Recipe) Save(path string) error {
|
||||
dir := filepath.Join(path, p.GetCategory(), p.GetName(), p.GetVersion())
|
||||
os.MkdirAll(dir, os.ModePerm)
|
||||
|
||||
err := WriteDefinitionFile(p, filepath.Join(dir, DefinitionFile))
|
||||
err := WriteDefinitionFile(p, filepath.Join(dir, pkg.PackageDefinitionFile))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -77,7 +73,7 @@ func (r *Recipe) Load(path string) error {
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
if !helpers.Exists(path) {
|
||||
if !fileHelper.Exists(path) {
|
||||
return errors.New(fmt.Sprintf(
|
||||
"Path %s doesn't exit.", path,
|
||||
))
|
||||
@@ -95,7 +91,7 @@ func (r *Recipe) Load(path string) error {
|
||||
// the function that handles each file or dir
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
|
||||
if info.Name() != DefinitionFile && info.Name() != CollectionFile {
|
||||
if info.Name() != pkg.PackageDefinitionFile && info.Name() != pkg.PackageCollectionFile {
|
||||
return nil // Skip with no errors
|
||||
}
|
||||
|
||||
@@ -105,7 +101,7 @@ func (r *Recipe) Load(path string) error {
|
||||
}
|
||||
|
||||
switch info.Name() {
|
||||
case DefinitionFile:
|
||||
case pkg.PackageDefinitionFile:
|
||||
pack, err := pkg.DefaultPackageFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
@@ -117,8 +113,8 @@ func (r *Recipe) Load(path string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error creating package "+pack.GetName())
|
||||
}
|
||||
case CollectionFile:
|
||||
packs, err := pkg.DefaultPackagesFromYaml(dat)
|
||||
case pkg.PackageCollectionFile:
|
||||
packs, err := pkg.DefaultPackagesFromYAML(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
|
17
tests/fixtures/copy/c/a/build.yaml
vendored
Normal file
17
tests/fixtures/copy/c/a/build.yaml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
image: "alpine"
|
||||
|
||||
copy:
|
||||
- package:
|
||||
name: "a"
|
||||
category: "test"
|
||||
version: ">=0"
|
||||
source: /test3
|
||||
destination: /test3
|
||||
- image: "busybox"
|
||||
source: /bin/busybox
|
||||
destination: /busybox
|
||||
|
||||
steps:
|
||||
- mkdir /bina
|
||||
- cp /test3 /result
|
||||
- cp -rf /busybox /bina/busybox
|
3
tests/fixtures/copy/c/a/definition.yaml
vendored
Normal file
3
tests/fixtures/copy/c/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.2"
|
7
tests/fixtures/copy/cat/a/a/build.yaml
vendored
Normal file
7
tests/fixtures/copy/cat/a/a/build.yaml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
3
tests/fixtures/copy/cat/a/a/definition.yaml
vendored
Normal file
3
tests/fixtures/copy/cat/a/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.2"
|
13
tests/fixtures/copy/cat/b-1.1/build.yaml
vendored
Normal file
13
tests/fixtures/copy/cat/b-1.1/build.yaml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "a"
|
||||
version: ">=0"
|
||||
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /newc
|
||||
- echo artifact6 > /newnewc
|
||||
- chmod +x generate.sh
|
||||
- ./generate.sh
|
3
tests/fixtures/copy/cat/b-1.1/definition.yaml
vendored
Normal file
3
tests/fixtures/copy/cat/b-1.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.1"
|
1
tests/fixtures/copy/cat/b-1.1/generate.sh
vendored
Normal file
1
tests/fixtures/copy/cat/b-1.1/generate.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
echo generated > /sonewc
|
6
tests/fixtures/fileconflicts/conflict1/build.yaml
vendored
Normal file
6
tests/fixtures/fileconflicts/conflict1/build.yaml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- mkdir /foo
|
||||
steps:
|
||||
- echo conflict > /foo/test1
|
||||
package_dir: /foo
|
7
tests/fixtures/fileconflicts/conflict1/collection.yaml
vendored
Normal file
7
tests/fixtures/fileconflicts/conflict1/collection.yaml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
packages:
|
||||
- category: "test1"
|
||||
name: "conflict"
|
||||
version: "1.0"
|
||||
- category: "test2"
|
||||
name: "conflict"
|
||||
version: "1.0"
|
6
tests/fixtures/fileconflicts_upgrade/conflict1/build.yaml
vendored
Normal file
6
tests/fixtures/fileconflicts_upgrade/conflict1/build.yaml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- mkdir /foo
|
||||
steps:
|
||||
- echo conflict > /foo/test1
|
||||
package_dir: /foo
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user