Compare commits

..

199 Commits

Author SHA1 Message Date
Ettore Di Giacinto
c58a462e79 🆕 Tag 0.20.8 2021-11-05 23:44:27 +01:00
Ettore Di Giacinto
1e78570c50 Allow to push final images while compiling
Add --push-final-images, --push-final-images-repository and
--push-final-images-force to luet build.

--push-final-images enables pushing final artifact during build. Each
package built will be packed and pushed to the final repository
specified with --push-final-images-repository. By default if no
final-repository is specified and pushing final images is enabled will
default to the cache repository.

--push-final-images-force allows to force-push final images even if
there are already available on the specified registry
2021-11-05 23:03:29 +01:00
Ettore Di Giacinto
0589bead99 Tag 0.20.7 2021-11-04 13:02:23 +01:00
Ettore Di Giacinto
fba420865a 🔧 Preserve suid,sgid and sticky bits when extracting images 2021-11-04 11:34:36 +01:00
Ettore Di Giacinto
9857bea5ff 🎨 Lazy progressbar start 2021-10-31 21:21:08 +01:00
Ettore Di Giacinto
100c313804 Tag 0.20.6 2021-10-31 12:21:15 +01:00
Ettore Di Giacinto
d43b8c4af0 Attach platform data when creating images from tars 2021-10-31 10:48:35 +01:00
Ettore Di Giacinto
384ae8e833 Tag 0.20.5 2021-10-29 11:34:28 +02:00
Ettore Di Giacinto
c7f9708f90 Add CreateTar to image API
Add api call which uses go-container registry to create OCI images from
standard tar archives.

Consume new API when generating final images instead of docker building them
and adapts/add tests as necessary.

This change now allows to carry over xattrs to final images.

Fixes #266
2021-10-29 10:35:03 +02:00
Itxaka
1b35a674ea Print plugin success messages + print plugin location on load (#267)
* report plugin state if succeed

We havbe a state field in the plugin response that its not being used
for anything. This patch makes luet print the state reported from the
plugin if its not empty as a way for plugins to report data on success
to users. If the field is empty it will be ignored.

Signed-off-by: Itxaka <igarcia@suse.com>

* Print plugin path

This patch adds the plugin location to the printed plugin list for a
more rich view of the loaded plugins

Signed-off-by: Itxaka <igarcia@suse.com>
2021-10-28 11:42:56 +02:00
Ettore Di Giacinto
5e8a9c75dc Tag 0.20.4 2021-10-27 12:05:03 +02:00
Ettore Di Giacinto
b5def989ac Drop unused code 2021-10-27 11:22:06 +02:00
Ettore Di Giacinto
fdb49ce70d cli: render table/lists only on terminal output 2021-10-27 11:17:38 +02:00
Ettore Di Giacinto
37cc186c0b delta: trim path when computing src files set
The path contains an ending "/" which wouldn't match when we walk dst as
it's not there.

That had the unpleasant effect of creating empty folders in the
destinations
2021-10-27 11:00:10 +02:00
Ettore Di Giacinto
f2f85a2384 ci: Add back -race 2021-10-26 18:05:34 +02:00
Ettore Di Giacinto
9c17432ee9 Tag 0.20.3 2021-10-26 17:34:03 +02:00
Ettore Di Giacinto
9799b7c94b Add Image reference by pipe, refactor 2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
5a7e97d0fb Update vendor 2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
262d09dfbc Lower message levels 2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
b974f44095 Add cache to avoid RAM consumption
When we have huge file lists we can burst too much into RAM which would
cause OOMs in certain devices. Use instead a smart cache which
automatically drops to disk when necessary.
2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
35fcd868ee Switch to ondisk also when unpacking FS
From benchmarks it seems to be still faster. Add a note for a future
improvement
2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
aea3cdff8d Use ondisk reference for deltas 2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
daa9eb98d2 Walk destination only once when computing delta
Avoid the double pass by constructing the list on the fly
2021-10-26 16:56:49 +02:00
Itxaka
1f0324c452 Log debug before failing (#263)
If a plugin failed, we were skipping the debug info which is kind of
useful :=)

Signed-off-by: Itxaka <igarcia@suse.com>
2021-10-26 11:18:56 +02:00
Ettore Di Giacinto
e705c471eb Tag 0.20.2 2021-10-26 00:30:12 +02:00
Itxaka
7cd455fff4 Set proper error message on plugin failure
Currently we are setting the error message in a no-space full sentence
which is pretty ugly:

| FATA[0000] Pluginluet-cosignat/usr/local/bin/luet-cosignErrorerror while executing plugin: exit status 1

Signed-off-by: Itxaka <igarcia@suse.com>
2021-10-26 00:28:30 +02:00
Ettore Di Giacinto
144c409908 Disable buffer on docker remote
This causes to load otherwise the full tarball into memory
2021-10-25 23:57:09 +02:00
Ettore Di Giacinto
f6bb7a9405 Make sure to pull images before generating artifacts
Fixes #262
2021-10-25 23:56:38 +02:00
Ettore Di Giacinto
9d3af649f1 Tag 0.20.1 2021-10-24 22:31:20 +02:00
Ettore Di Giacinto
1b1ab6225c Use table lookup for checking addition files 2021-10-24 21:55:42 +02:00
Ettore Di Giacinto
bdcf26401c Prepare for tagging 0.20.0 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
21247331e0 Update README 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
b77b71f6cd cmd: Create output build dir if doesn't exist already 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
bb40b5d1b7 update vendor 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
c220eac061 Move bus to api/core 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
67a07e7c5a Drop link to moby fork 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
c897bffdfc Drop untar 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
52ad2b5cfa Fixup config protect 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
6ff22d923c Make default build dir over context temp 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
37a9a3ef55 use containerd to uncompress 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
4a45b5410d Introduce lock for installation
It is used to ensure integrity and that we do install one package at
once. This is to ensure that we extract correctly, and that we are not
too much I/O intensive depending on CPU
2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
6b7e77df65 test: drop --race 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
819271b9bd Fixup tests 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
063f704057 update vendor 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
ebbb3aad27 Use API also when pulling from helpers used in client 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
ad489c2157 tests: pull image before running 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
454a560f4c Take count of os separator in extraction 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
a0e7e9ba08 ci: split integration tests 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
acd685b927 Extract with new image API 2021-10-24 18:26:25 +02:00
Ettore Di Giacinto
ab251fefce update vendor 2021-10-24 18:26:25 +02:00
Ettore Di Giacinto
6a9f19941a Add crane-based methods for extraction
- create a new api package to encapsulate image manipulation
- use new api method to calculate delta

Fixes #258
Fixes #204
Fixes #90
2021-10-24 18:26:08 +02:00
Ettore Di Giacinto
d44befe9ff tests: add context unit tests 2021-10-23 15:26:45 +02:00
Ettore Di Giacinto
73c6cff15b Tag 0.19.2 2021-10-23 12:40:55 +02:00
Ettore Di Giacinto
65892f9bfc ux: Display only success on green 2021-10-23 12:40:55 +02:00
Ettore Di Giacinto
315bfb5a54 Move http timeout to the general configuration
Fixes https://github.com/mudler/luet/issues/250 as now it is documented
in the cli --help too.
2021-10-23 11:41:15 +02:00
Ettore Di Giacinto
57c8236184 fixup: cache miss with docker client 2021-10-23 01:50:01 +02:00
Ettore Di Giacinto
4d60795fdc Tag 0.19.1 2021-10-22 18:44:42 +02:00
Ettore Di Giacinto
6b45b1d61c ux: rework displaying of success messages 2021-10-22 17:55:38 +02:00
Ettore Di Giacinto
5eb5a42bf7 Generate snapshot and push it along
Fixes #260
Fixes #223
2021-10-22 17:55:08 +02:00
Ettore Di Giacinto
0bacdc75f2 Allow clients to pick a specific referenceID
Related to #260
2021-10-22 12:39:36 +02:00
Ettore Di Giacinto
917d0935ad Show progressbar only if terminal is big enough
Fixes #259

The bug is caused by
4c725e56bf/progressbar_printer.go (L190)
which is not taking into account when barCurrentLength is <=0.

As a workaround display it only if the terminal width is big enough.

Now the context drives this kind of runtime state, so we keep everything
tight and we inject only that into the workers
2021-10-22 12:39:07 +02:00
Ettore Di Giacinto
50dfc47bee Tag 0.19.0 2021-10-22 09:25:12 +02:00
Ettore Di Giacinto
8d34a6ebb1 Use the cache to store temporary download files 2021-10-21 23:58:45 +02:00
Ettore Di Giacinto
d58a563d52 uninstall: limit cleanup to s.Target
This was specific to when s.Target differs from /. The helpers creating
the path were considering the full traversal, instead they should stop
at target.

E.g. our rootfs is in /foo/bar, and while we build the list of paths to
prune for /foo/bar/baz/bar, we should stop looking at /foo/bar.
2021-10-21 23:58:45 +02:00
Ettore Di Giacinto
7b56e915fa cleanup now should take into account of folders 2021-10-21 23:58:45 +02:00
Ettore Di Giacinto
a1c669d3ae Define context for scoped operation across core types
It holds necessary state plus additional information relative to the
context which we are being run to (e.g. if we are in a terminal or not).
Besides in the future we can use it also as a contextual logger to
provide more smart logging capabilities.

This also replace the general global configuration instance that
previously was share between the core components.
2021-10-21 23:58:24 +02:00
Ettore Di Giacinto
b9895c9e05 update vendor 2021-10-21 23:58:19 +02:00
Ettore Di Giacinto
fe14d56afe Massive UX rewrite
- Ditch multiple libraries for progressbar, spinner, colors and replace
  with pterm
- Detect when running on terminal and disable automatically spinner
- Add support for multiple progress bars
- Huge rewrite of the configuration part. No more crazy stuff with viper
  CLI commands now correctly overrides default config file as expected
- Limit banner to be displayed on relevant parts

Fixes #211
Fixes #105
Fixes #247
Fixes #233
2021-10-21 23:58:00 +02:00
Ettore Di Giacinto
d4edaa9de8 Adapt unit tests 2021-10-19 21:25:01 +02:00
Ettore Di Giacinto
4700d27f0e Adapt integration tests 2021-10-19 21:25:01 +02:00
Ettore Di Giacinto
a6b6909dc4 Update vendor 2021-10-19 21:25:01 +02:00
Ettore Di Giacinto
5b4e930fc3 Do implement a real cache
Instead of merely storing files into disk use a real cache.

This also makes possible finally to reference artifacts in the cache with the
package checksum, which solves the cache hit checksum failures we had
previously.
2021-10-19 21:25:01 +02:00
Ettore Di Giacinto
2eeb464946 Make repositories arch-aware
Introduce an arch field that can be used to filter repositories based on
the local architecture.

If arch is provided and matching with the current GOARCH then the
repository is enabled
2021-10-19 14:19:53 +02:00
Ettore Di Giacinto
b00c2ff3cc ci: unit tests now need the luet executable 2021-10-15 21:30:40 +02:00
Ettore Di Giacinto
e764b1cd29 Add CLI helpers and BuildTree heuristic
The BuildTree implementation was in parallel-build-tools, which actually makes
sense to share in the compiler as can be used as another way to trigger
builds.

The CLI helpers were split in several CIs
implementations. This moves a new layout where we expose several
packages, as we will refactor things slowly.
2021-10-15 21:14:48 +02:00
Ettore Di Giacinto
619c9aeda0 update vendor 2021-10-14 22:06:48 +02:00
Ettore Di Giacinto
6ea05e59ea Drop imgworker client
Cleanup code which allows to extract images in a privileged way. the
containerd way supersedes it.
2021-10-14 22:06:01 +02:00
Ettore Di Giacinto
9c19a7ec35 ci: trigger image pipeline also when tagging 2021-10-14 15:36:39 +02:00
Ettore Di Giacinto
70866c3281 ci: build multi-arch images 2021-10-14 15:28:25 +02:00
Ettore Di Giacinto
c536aaa53c Tag 0.18.1 2021-10-10 21:27:35 +02:00
Ettore Di Giacinto
d5819bb4cb Fixup benchmark tests 2021-10-10 19:07:46 +02:00
Ettore Di Giacinto
6ba028f0ea Make sure we do compute the best fit
While install calls upgrade which in turns calls a relaxed install on
its results, this doesn't make sure that the new results are at the best
available version. We do iterate here over the results to compute the
best set.

It also expands computeUpgrade with the possibility to selectively
choose which packages to upgrade and which not.
2021-10-10 19:04:55 +02:00
Ettore Di Giacinto
780b7aa610 Tag 0.18.0 2021-10-10 00:29:13 +02:00
Ettore Di Giacinto
9de6b22764 Make singlecore default solver 2021-10-10 00:29:13 +02:00
Ettore Di Giacinto
097e310d3a Drop join from compilation specs
Adapt also hash and integration tests
2021-10-10 00:28:54 +02:00
Ettore Di Giacinto
5a63bbd0d2 Deprecate parallel solver 2021-10-10 00:28:54 +02:00
Ettore Di Giacinto
e64f68d36b Introduce install --relaxed
It introduces a relaxed way to install packages with loose deps. Default
installation now will by default prefer up-to-date packages during
selection.

Also:
- Upgrade now it's used in install so it have to return the full system view also when there is nothing to upgrade
- Avoid checking upgrade upfront if relaxed is on
2021-10-10 00:27:58 +02:00
Daniele Rondina
77b4c9a972 Update vendor Sabayon/pkgs-checker@v0.8.4 (#255)
* Update vendor Sabayon/pkgs-checker@v0.8.4

* Drop golang.org/x/text

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2021-10-06 13:41:14 +02:00
Ettore Di Giacinto
585b72c3d0 Tag 0.17.13 2021-10-06 12:20:05 +02:00
Ettore Di Giacinto
f7aa6c3428 fix: always append templates from tree path
Otherwise shared templates from trees are not working when
--from-repositories is enabled.
2021-10-06 11:08:53 +02:00
Ettore Di Giacinto
2970d8e52e Tag 0.17.12 2021-09-22 09:46:17 +02:00
Ettore Di Giacinto
ff46bc7641 Don't use semver library for ordering by default
Use debian to parse versions and selections.
This covers ordering of versions like -0,-1 in sequence plus many others
not available with semver. It is backward compatible as we do support
the same featureset as before as tests are passing (adding more actually
to cover interesting cases)
2021-09-21 14:16:39 +02:00
Ettore Di Giacinto
e3063985b2 Tag bugfix release 0.17.11 2021-09-17 15:54:35 +02:00
Ettore Di Giacinto
a348fd4835 Replace Yaml with YAML in function, add debug output when extracting runtime data 2021-09-17 14:46:32 +02:00
Ettore Di Giacinto
fc45eae80a create-repo: annotate runtime definition in artifacts 2021-09-17 14:30:06 +02:00
Ettore Di Giacinto
b73ac21004 create-repo: don't inherit build requirements in runtime 2021-09-17 12:06:39 +02:00
Ettore Di Giacinto
bdd51fa221 Tag: Bump to 0.17.10 2021-09-16 14:46:30 +02:00
Ettore Di Giacinto
4039050449 ci: run in same concurrent group due to registry tests 2021-09-16 13:05:28 +02:00
Ettore Di Giacinto
14914f3c8e Update tests for including packages in tree from metadata
Also switches GenerateRepository to functional interface to allow
more parametrization
2021-09-16 12:59:54 +02:00
Ettore Di Giacinto
e4fff77d43 Source packages metadata to update repository tree
This allows to generate trees with `create-repo` by just having
the metadata files, making tree(s) an optional requirement.
2021-09-16 10:56:09 +02:00
Ettore Di Giacinto
972421ae81 Tag 0.17.9 2021-09-06 15:46:58 +02:00
Ettore Di Giacinto
0bd373be2b Merge pull request #252 from Itxaka/consider_provides_names_on_install
installer: Take into consideration provides names
2021-09-06 15:45:25 +02:00
Itxaka
aba89db204 installer: Take into consideration provides names
When installing a package that has a provides values, the installer only
checks the original package against the resolved package. This leads to
the requires keyword not working as expected as the solved final package
is never gonna match the name of the old/superseeded package.

This patch checks the asked-to-install package name against the list of
names that the matched name provides.

Signed-off-by: Itxaka <igarcia@suse.com>
2021-09-06 14:58:50 +02:00
Itxaka
178690842f Show the package name on uninstall error (#251)
* Show the package name on uninstall error

Currently there is a generic message of error when uninstalling if one
of the provided packages is not found in the system. This is quite
obnoxious if you are providing luet a big list to uninstall as you dont
know which package is not installed and whats preventing you from
running the uninstall command.

This pathc is a very simple change that prints the packag name along
with the error to provide more info to the end user

Signed-off-by: Itxaka <igarcia@suse.com>

* Update pkg/installer/installer.go

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2021-08-26 14:45:19 +02:00
Ettore Di Giacinto
58f4997a0f Makefile: multiarch-build should only build 2021-08-19 16:41:27 +02:00
Ettore Di Giacinto
5bb65e5b30 Get goreleaser from GH actions 2021-08-19 16:22:20 +02:00
Ettore Di Giacinto
4e918e6bd1 ci: fixup getting goreleaser before build 2021-08-19 11:53:59 +02:00
Daniele Rondina
0f545952cd cmd/config: simplify code (#246) 2021-08-11 16:26:34 +02:00
Ettore Di Giacinto
3402641241 Tag 0.17.8 2021-08-11 14:33:30 +02:00
Ettore Di Giacinto
b81d33f182 Fixup luet tree pkglist/images while having shared templates 2021-08-11 12:52:16 +02:00
Daniele Rondina
0cc8930708 Finalizer envs (#242)
* Allow to define envs for finalizer

Fixes: #241

* tests: Add integration test for finalizer with envs
2021-08-11 11:18:16 +02:00
Ettore Di Giacinto
db784597d7 Move unpack where it belongs
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-11 10:36:41 +02:00
Ettore Di Giacinto
35eb63a31c Create dest-dir if doesn't exist while unpacking without snapshotter
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-11 10:36:41 +02:00
Ettore Di Giacinto
16bb93e165 Don't check file conflicts while running installops
We already check at beginning during install/upgrade. Also, better
to not fail while upgrade has already started for a check that we already did.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-11 10:36:41 +02:00
Itxaka
220f8700ce Update vendor and go.mod (#245)
Due to https://github.com/mudler/luet/pull/244 being rebased on top of
master instead of develop there is a mismatch in the dependencies.

This should fix it.

Signed-off-by: Itxaka <igarcia@suse.com>
2021-08-11 10:35:48 +02:00
Daniele Rondina
540e8151ad compiler: Speedup & troubleshooting debug messages (#240)
* compiler: Speedup stripFromRootfs and add debug log

* compiler: on stripFromRootfs show files removed with debug
2021-08-11 10:06:52 +02:00
Itxaka
4adc0dc9b9 Use goreleaser to build and release (#244)
Instead of using gox on one side and an action to release, we can merge
them together with goreleaser which will build for extra targets (arm,
mips if needed in the future) and it also takes care of creating
checksums, a source archive, and a changelog and creating a release with
all the artifacts.

All binaries should respect the old naming convention, so any scripts
out there should still work.

Signed-off-by: Itxaka <igarcia@suse.com>
2021-08-11 08:30:55 +02:00
Ettore Di Giacinto
0a4fe57f33 Tag 0.17.7 2021-08-09 16:18:51 +02:00
Ettore Di Giacinto
ccf83b0d5f Avoid deadlocking by FindPackages calls 2021-08-09 15:13:53 +02:00
Ettore Di Giacinto
57cefc7d6b Fixup data race when updating revdeps 2021-08-09 14:09:10 +02:00
Ettore Di Giacinto
97ff647f07 Turn tmpdir in abs path if required
Fixes #239
2021-08-09 13:17:16 +02:00
Ettore Di Giacinto
b7ac1e03d5 Add test for file conflicts on upgrade of the same package
This allows to be certain that we don't raise a conflict error while
upgrading the same package
2021-08-09 13:02:08 +02:00
Ettore Di Giacinto
ff092db97d Increase http timeout to 120s by default 2021-08-09 12:58:01 +02:00
Ettore Di Giacinto
2789f59f53 Add --values flag also to luet tree images 2021-08-09 12:57:36 +02:00
Ettore Di Giacinto
10ae872a3e Simplify revdep code
Isolate common code into a function and also fix a sublte bug hiding in
that code.

We need to stab a copy of the package inside our PackageMap to avoid
having symlinks pointing at the same values when iterating over
requires. ( e.g. happened in this case:
https://github.com/rancher-sandbox/cOS-toolkit/pull/467#issuecomment-895060115
)
2021-08-09 12:47:27 +02:00
Ettore Di Giacinto
65a55e242e Tag 0.17.6 2021-08-07 18:04:48 +02:00
Ettore Di Giacinto
77c4bf1fd1 update vendor 2021-08-07 14:46:05 +02:00
Ettore Di Giacinto
4d6cccb2fa Give explaination when formulas are unsat
Fixes #168

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-07 14:46:05 +02:00
Ettore Di Giacinto
ec7be63418 Tag 0.17.5 2021-08-05 09:02:35 +02:00
Ettore Di Giacinto
33b1c63815 Allow to have shared templates across packages
This changeset allows to have shared templates in a static folder
"templates" present in each luet tree. If the directory is present, it
gets scanned and templated accordingly on top of each package. This
allows to use such folder to store custom blocks to share between
packages.

This is still experimental and subject to change, this is just a first
pass version to provide the feature. It needs to be refined still as it
would be more elegant to use the helm engine properly and map our
structure to the engine instead of adapting it roughly.

Fixes #224
2021-08-04 16:35:28 +02:00
Ettore Di Giacinto
86bd6c5fc0 Tag 0.17.4 2021-08-04 11:55:23 +02:00
Ettore Di Giacinto
658612fcf3 Check file conflicts also on packages that are going to be swapped out
Fixes #237
2021-08-04 10:43:12 +02:00
Ettore Di Giacinto
7128c88da6 Tag 0.17.3 2021-08-03 18:15:40 +02:00
Ettore Di Giacinto
74402fae81 Re-organize common CLI code
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-03 16:48:34 +02:00
Ettore Di Giacinto
9d1594c036 Check if the system-target path supplied is absolute
docker.Untar
(https://github.com/mudler/luet/blob/master/vendor/github.com/docker/docker/pkg/archive/archive.go#L942) requires absolute paths.
We didn't do any input validation before, assuming the path passed by
were absolute since they were coming from YAML configuration files, now
that this is not the truth anymore we need to sanitize the input.

With this change we check if the given path is absolute or relative, if
it's relative we calculate the absolute path and use it in place.
2021-08-03 16:22:59 +02:00
Ettore Di Giacinto
75906c4198 Add missing check for error 2021-08-03 13:36:38 +02:00
Ettore Di Giacinto
cb032dc714 Allow to manipulate requires/conflicts/provides
This allows to manipulate requires, conflicts and provides with
templating in build time.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-03 13:27:44 +02:00
Ettore Di Giacinto
2c7e495fa1 Add http timeout to grab
grab.NewClient() doesn't set a specific timeout
https://github.com/cavaliercoder/grab/blob/v2.0.0/client.go#L37 even if
the project advertize "default sane settings".

We default to 30, and allow to set it up with HTTP_TIMEOUT

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-03 11:57:44 +02:00
Ettore Di Giacinto
db8bf2b85e Tag 0.17.2 2021-07-30 13:33:29 +02:00
Ettore Di Giacinto
5eb586ddb0 Speedup conflict check by caching in memory files 2021-07-30 12:40:55 +02:00
Ettore Di Giacinto
f9747cdf87 Tag 0.17.1 2021-07-29 15:07:32 +02:00
Ettore Di Giacinto
becac7d853 Add tests 2021-07-29 11:03:02 +02:00
Ettore Di Giacinto
5aa5bffb48 Add reinstall command
Fixes #51
2021-07-29 11:03:02 +02:00
Ettore Di Giacinto
9aa3159787 Check for file conflicts before install
Fixes #88
2021-07-29 11:03:02 +02:00
Ettore Di Giacinto
9cb6e65bb6 Don't display banner on cleanup 2021-07-29 11:03:02 +02:00
Ettore Di Giacinto
92b243d7aa Add git-chglog template and config 2021-07-12 10:26:22 +02:00
Ettore Di Giacinto
29ec19a8a1 Tag 0.17.0 2021-07-11 11:55:35 +02:00
Ettore Di Giacinto
440e07c418 Display version banner only on specific commands
Avoid cumbersome text on areas that can be parsed by machines

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-11 11:00:59 +02:00
Ettore Di Giacinto
acf74f5896 config_protect: don't protect files during uninstall by default
Otherwise during uninstall we would retain the files which are
protected. We introduced a specific flag for it to pass during
uninstall, but for now we choose semplicity and expected default first.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-11 10:40:36 +02:00
Ettore Di Giacinto
1ee1894ffa ci: split integration/unit tests 2021-07-09 17:15:17 +02:00
Ettore Di Giacinto
c8573f9535 Adapt hash tests
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 17:15:17 +02:00
Ettore Di Giacinto
76b70ebeb4 Add dirhash to package signatures
While this breaks current hashing, it ties also the spec content to the
hash, in this way if we change something in the spec folder, it breaks
the hashing for the package.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 12:37:38 +02:00
Ettore Di Giacinto
2efb17a06c Add specific field to resolve final images
This commit starts deprecation of `join` keyword in favor of
`requires_final_images` as boolean in the compilation spec.

The change is driven by two reasons: syntax and guarantee unique hashes.

- the hashtree when computing a hash it analizes the requires field of
  each spec, ignoring the join field
- the join field doesn't add much value. Having it separate suggests
  that a spec can contain both `requires` and `join`, but that's not
  actually true. We just act differently on the same list.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 12:37:38 +02:00
Ettore Di Giacinto
64ab3711ca Display version and license on each run
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 12:25:55 +02:00
Ettore Di Giacinto
eb5d7ba35b Merge branch 'master' into develop 2021-07-09 12:25:04 +02:00
Daniele Rondina
b6b91cfd7a compiler: Avoid generation of delta if there are only prelude steps (#232) 2021-07-09 12:23:32 +02:00
Daniele Rondina
4d8a9a544b cmd/database/create: Avoid creation of the same package multiple times (#222) 2021-06-25 13:09:32 +02:00
Ettore Di Giacinto
654b5b48cd Tag 0.16.7 2021-06-17 07:55:01 +02:00
Ettore Di Giacinto
92e18d5782 Support priv/unpriv image extraction
Optionally add back privileged extraction which can be enabled with
LUET_PRIVILEGED_EXTRACT=true

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-06-16 23:30:28 +02:00
Ettore Di Giacinto
8780e4f16f Unpack using our moby fork
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-06-16 19:16:02 +02:00
Ettore Di Giacinto
a7b4ae67c9 Tag 0.16.6 2021-06-04 16:23:51 +02:00
Ettore Di Giacinto
68edfd58e7 Keep emitting plugin events 2021-06-04 12:08:57 +02:00
Ettore Di Giacinto
0658020c60 Update go.mod and vendor 2021-06-04 11:15:10 +02:00
Ettore Di Giacinto
c3b552103f Return a specific struct for the image
And drop imgworker
2021-06-04 11:09:41 +02:00
Darren Shepherd
796967cc9d Allow luet install to work inside a non-privileged container
This switches from using the containerd snapshotter to go-containerregistry
library which requires no additional privileges beyond root file system
access.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-06-04 10:29:57 +02:00
Ettore Di Giacinto
5cccc34f32 Tag 0.16.5 2021-06-01 22:54:10 +02:00
Ettore Di Giacinto
5ef1d04055 Create CONTRIBUTING.md 2021-06-01 17:13:23 +02:00
Itxaka
1bd4d520a4 Add 2 new events for image unpacking (#226)
* Reduce possibility of circular dependency

Just by adding an import for bus to anything in the helper dir, we would
run into a circular dependency due to how things are structured. That
means that we cannot set any events for unpacking or docker helper
pulling an image.

This commit tries to work around this by doing several things.
 - Remove full imports of the helper module by segmentating some modules
   into their own submodule, like docker or match so just using a small match
   function doesnt bring the whole module
 - Removing a simple function to check if a dir exists from importing
   the full helper module and instead write the function (5 lines)
 - Using logrus in the bus module instead of logger, which avoids a
   circular dependency

Signed-off-by: Itxaka <igarcia@suse.com>

* Add two new events for unpacking an image

Both pre and post unpacking an image

Signed-off-by: Itxaka <igarcia@suse.com>
2021-06-01 16:43:31 +02:00
Daniele Rondina
b12c7678d4 get_luet_root.sh: Fix installation on alpine/busybox env (#218)
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2021-05-27 17:17:49 +02:00
Ettore Di Giacinto
32a99a4a49 Update readme with badges and a small logo 2021-05-26 17:57:41 +02:00
Ettore Di Giacinto
56e9c6f82e Tag 0.16.4 2021-05-25 14:55:55 +02:00
Ettore Di Giacinto
92ea69a2b9 Don't always generate artifact
Distinguish rather if we should generate artifact of the deps of a
package or not, and ensure that we just force artifact generation for
targets when computing the join image.

Fixes #217
2021-05-25 13:28:14 +02:00
Ettore Di Giacinto
838899aa83 Respect create-repo --force-push with metadata files
Don't force push package metadata with docker repository.

We were pushing regardless .metadata.yaml files even if packages were
not, also when calling create-repo without --force-push.
2021-05-25 12:13:23 +02:00
Ettore Di Giacinto
76695b2fc8 docker: check during extraction if image exists and pull if needed 2021-05-25 11:04:06 +02:00
Ettore Di Giacinto
5c84e5b0a7 Tag 0.16.3 2021-05-24 22:23:57 +02:00
Ettore Di Giacinto
06fa8b1c87 fixup: jointag index 2021-05-24 22:13:27 +02:00
Ettore Di Giacinto
ff153f367f fixup: enhance output of copy tag when cycling over packages 2021-05-24 21:35:22 +02:00
Ettore Di Giacinto
459676397c Enhance multi-stage output 2021-05-24 19:40:39 +02:00
Ettore Di Giacinto
93057fbf6d Respect PackageTargetOnly during multi-stage copy image generation 2021-05-24 19:40:17 +02:00
Ettore Di Giacinto
5e1a7c50df fixup: display hash of join image 2021-05-23 18:22:14 +02:00
Ettore Di Giacinto
0ceaf09615 fixup: propagate outputpath 2021-05-23 17:36:09 +02:00
Ettore Di Giacinto
0dc78ebe41 Tag 0.16.2 2021-05-22 14:49:58 +02:00
Ettore Di Giacinto
27c2e3c51f fixup: make sure the outputdir exists 2021-05-21 22:25:18 +02:00
Ettore Di Giacinto
e83f600ed3 Tag 0.16.1 2021-05-21 21:28:33 +02:00
Ettore Di Giacinto
6344e47eb3 fixup: resolve multistage and join also when building deps 2021-05-21 19:47:23 +02:00
Ettore Di Giacinto
8b1c5558b2 Tag 0.16.0 2021-05-21 16:10:28 +02:00
Ettore Di Giacinto
c277ac0f94 Add join keyword to generate parent image from final artifacts
A new keyword `join` is introduced to generate the parent image. It
takes precedence over a `requires` or a `image` already defined in a
spec.

It will generate all the artifacts from the packages listed and join
them in a single image which will be used as parent for the package
build process.

This is a change which invalidates priorly generated hashes.

Fixes #173
2021-05-21 14:52:48 +02:00
Ettore Di Giacinto
d8c8c2194f Tag 0.15.0 2021-05-20 13:53:53 +02:00
Ettore Di Giacinto
4494385f5b Fixup test rewording match 2021-05-20 11:17:39 +02:00
Ettore Di Giacinto
85a7968ecc Check if we have the image locally and extract directly it 2021-05-20 10:29:41 +02:00
Ettore Di Giacinto
1ba987b0f1 cmd/tree: consume Hashtree to display image hashes 2021-05-19 17:43:14 +02:00
Ettore Di Giacinto
c72b5be364 Update vendor 2021-05-19 17:34:54 +02:00
Ettore Di Giacinto
1ef18ed2c5 Add salted method for assertion hashing
- Add the spec Hash as salt for image hashes
- Add tests and adapt existing ones
- Use a signature to build a spec hash

Fixes: #207
2021-05-19 17:34:01 +02:00
Ettore Di Giacinto
4b1b711a5c Add single suite tests for spec and artifact
They went missing when the refactoring of the compiler occurred
2021-05-19 17:34:01 +02:00
Ettore Di Giacinto
7f047e4fc2 Multi-stage builds from hash images
Implement multi-stage copy from images part of the build cache of a
package.

Note, this is not the final images where are we copying files from, but
the underlying build container.

Skipping the test on img backend because it fails when pulling external
images during multi-stage build...

Fixes #190
2021-05-18 12:16:35 +02:00
Ettore Di Giacinto
356350f724 Tag 0.14.7 2021-05-17 19:11:44 +02:00
Ettore Di Giacinto
9d2ee1b760 Add hashtree and extract hash logic from compiler
Add unit tests, consume imagehashtree in compiler and cleanup

Fixes: #203
2021-05-17 15:22:08 +02:00
Ettore Di Giacinto
fd12227d53 plugins: Make execution fail if loaded plugins are erroring, print debug output on emitted responses
Now plugins failing to answer make execution fail, adapt tests
2021-05-17 15:17:03 +02:00
Ettore Di Giacinto
1e617b0c67 Add copy field
Initial implementation that just works with standard docker image
references

Related: #190
2021-05-17 15:16:15 +02:00
3598 changed files with 215098 additions and 352729 deletions

49
.chglog/CHANGELOG.tpl.md Normal file
View File

@@ -0,0 +1,49 @@
{{ if .Versions -}}
{{ if .Unreleased.CommitGroups -}}
{{ range .Unreleased.CommitGroups -}}
### {{ .Title }}
{{ range .Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
{{ end }}
{{ end -}}
{{ else }}
{{ range .Unreleased.Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
{{ end }}
{{ end -}}
{{ end -}}
{{ range .Versions }}
<a name="{{ .Tag.Name }}"></a>
{{ if .CommitGroups -}}
{{ range .CommitGroups -}}
### {{ .Title }}
{{ range .Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
{{ end }}
{{ end -}}
{{ else }}
{{ range .Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }} (https://github.com/mudler/luet/commit/{{.Hash.Short}})
{{ end }}
{{ end -}}
{{- if .NoteGroups -}}
{{ range .NoteGroups -}}
### {{ .Title }}
{{ range .Notes }}
{{ .Body }}
{{ end }}
{{ end -}}
{{ end -}}
{{ end -}}
{{- if .Versions }}
[Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD
{{ range .Versions -}}
{{ if .Tag.Previous -}}
[{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}
{{ end -}}
{{ end -}}
{{ end -}}

27
.chglog/config.yml Executable file
View File

@@ -0,0 +1,27 @@
style: github
template: CHANGELOG.tpl.md
info:
title: CHANGELOG
repository_url: https://github.com/mudler/luet
options:
commits:
# filters:
# Type:
# - feat
# - fix
# - perf
# - refactor
commit_groups:
title_maps:
feat: Features
fix: Bug Fixes
perf: Performance Improvements
refactor: Code Refactoring
ci: Continous Integration
header:
pattern: "(.*)"
pattern_maps:
- Subject
notes:
keywords:
- BREAKING CHANGE

67
.github/workflows/image.yml vendored Normal file
View File

@@ -0,0 +1,67 @@
---
name: 'build container images'
on:
push:
branches:
- master
tags:
- '*'
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=quay.io/luet/base
VERSION=latest
SHORTREF=${GITHUB_SHA::8}
# If this is git tag, use the tag name as a docker tag
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
fi
TAGS="${DOCKER_IMAGE}:${VERSION},${DOCKER_IMAGE}:${SHORTREF}"
# If the VERSION looks like a version number, assume that
# this is the most recent version of the image and also
# tag it 'latest'.
if [[ $VERSION =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:latest"
fi
# Set output parameters.
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@master
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@master
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: quay.io
username: ${{ secrets.DOCKER_RELEASE_USERNAME }}
password: ${{ secrets.DOCKER_RELEASE_PASSWORD }}
- name: Build
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64,linux/arm
push: true
tags: ${{ steps.prep.outputs.tags }}

77
.github/workflows/pr.yml vendored Normal file
View File

@@ -0,0 +1,77 @@
on: pull_request
name: Build and Test
jobs:
tests-integration-img:
strategy:
matrix:
go-version: [1.16.x]
platform: [ubuntu-latest]
runs-on: ${{ matrix.platform }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Tests with Img backend
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
tests-integration:
strategy:
matrix:
go-version: [1.16.x]
platform: [ubuntu-latest]
runs-on: ${{ matrix.platform }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
- name: Tests
run: sudo -E env "PATH=$PATH" make test-integration
tests-unit:
strategy:
matrix:
go-version: [1.16.x]
platform: [ubuntu-latest]
runs-on: ${{ matrix.platform }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
make build
sudo cp -rf luet /usr/bin/luet
- name: Install GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
install-only: true
- name: Build
run: sudo -E env "PATH=$PATH" make multiarch-build-small
- name: Tests
run: sudo -E env "PATH=$PATH" make test-coverage

87
.github/workflows/push.yml vendored Normal file
View File

@@ -0,0 +1,87 @@
on: push
concurrency:
group: registries-tests
name: Build on push
jobs:
tests-integration-img:
name: Integration tests with img
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Login to quay with img
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Tests with Img backend
run: |
sudo -E env "PATH=$PATH" \
env "LUET_BACKEND=img" \
make test-integration
tests-integration:
name: Integration tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
- name: Tests
run: |
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-integration
tests-unit:
name: Unit tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
make build
sudo cp -rf luet /usr/bin/luet
- name: Install GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
install-only: true
- name: Build test
run: sudo -E env "PATH=$PATH" make multiarch-build-small
- name: Tests
run: |
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-coverage

View File

@@ -1,14 +1,20 @@
on: push
name: Build and release on push
on:
push:
tags:
- '*' # only test and release when a tag is pushed
concurrency:
group: registries-tests
name: Test and Release on tag
jobs:
release:
name: Test and Release
tests-integration-img:
name: Integration tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.14.x
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
@@ -17,31 +23,89 @@ jobs:
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Build test
run: sudo -E env "PATH=$PATH" make multiarch-build-small
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Login to quay with img
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Tests with Img backend
run: |
sudo -E env "PATH=$PATH" \
env "LUET_BACKEND=img" \
make test-integration
sudo -E env "PATH=$PATH" \
env "LUET_BACKEND=img" \
make test-integration
tests-integration:
name: Integration tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
- name: Tests
run: |
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-integration test-coverage
- name: Build
run: sudo -E env "PATH=$PATH" make multiarch-build-small && sudo chmod -R 777 release/
- name: Release
uses: fnkr/github-action-ghr@v1
if: startsWith(github.ref, 'refs/tags/')
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-integration
tests-unit:
name: Unit tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
make build
sudo cp -rf luet /usr/bin/luet
- name: Tests
run: |
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-coverage
release:
name: Release
runs-on: ubuntu-latest
needs: [ "tests-integration-img", "tests-integration","tests-unit" ]
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
version: latest
args: release --rm-dist
env:
GHR_PATH: release/
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,30 +0,0 @@
on: pull_request
name: Build and Test
jobs:
test:
strategy:
matrix:
go-version: [1.14.x]
platform: [ubuntu-latest]
runs-on: ${{ matrix.platform }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Build
run: sudo -E env "PATH=$PATH" make multiarch-build-small
- name: Tests with Img backend
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
- name: Tests
run: sudo -E env "PATH=$PATH" make test-integration test-coverage

3
.gitignore vendored
View File

@@ -1,4 +1,5 @@
*.swp
luet
tests/integration/shunit2
tests/integration/bin
tests/integration/bin
release/

43
.goreleaser.yml Normal file
View File

@@ -0,0 +1,43 @@
before:
hooks:
- go mod tidy
dist: release
source:
enabled: true
name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
checksum:
name_template: '{{ .ProjectName }}-{{ .Tag }}-checksums.txt'
builds:
-
env:
- CGO_ENABLED=0
ldflags:
- -s -w
- -X "github.com/mudler/luet/cmd.BuildTime={{ time "2006-01-02 15:04:05 MST" }}"
- -X "github.com/mudler/luet/cmd.BuildCommit={{ .FullCommit }}"
goos:
- linux
goarch:
- amd64
- arm
- arm64
- 386
goarm:
- 6
- 7
archives:
- format: binary # this removes the tar of the archives, leaving the binaries alone
name_template: luet-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
- '^Merge pull request'
release:
header: |
Luet is a multi-platform Package Manager based off from containers - it uses Docker (and others) to build packages.
It has zero dependencies and it is well suitable for "from scratch" environments.
It can also version entire rootfs and enables delivery of OTA-alike updates, making it a perfect fit for the Edge computing era and IoT embedded device

53
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,53 @@
We love your input! We want to make contributing to this project as easy and transparent as possible, whether it's:
- Reporting a bug
- Discussing the current state of the code
- Submitting a fix
- Proposing new features
- Becoming a maintainer
## We Develop with Github
We use github to host code, to track issues and feature requests, as well as accept pull requests.
## Stay in touch
Join us in [slack](https://luet.slack.com/join/shared_invite/enQtOTQxMjcyNDQ0MDUxLWQ5ODVlNTI1MTYzNDRkYzkyYmM1YWE5YjM0NTliNDEzNmQwMTkxNDRhNDIzM2Y5NDBlOTZjZTYxYWQyNDE4YzY#/) and hang out with the community! It will be much easier to get started and do your first steps in contributing to the project.
## All Code Changes Happen Through Pull Requests
Pull requests are the best way to propose changes to the codebase. We actively welcome your pull requests:
1. Fork the repo you want to contribute to and create your branch from `develop`.
2. If you've added code that should be tested, add tests.
3. If you've changed APIs, update the [documentation](https://github.com/Luet-lab/docs).
4. Ensure the test suite passes.
5. Make sure your code lints.
6. Issue that pull request!
## Any contributions you make will be under the Software License of the repository
In short, when you submit code changes, your submissions are understood to be under the same License that covers the project. Feel free to contact the maintainers if that's a concern.
## Report bugs using Github's [issues](https://github.com/mudler/luet/issues)
We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/mudler/luet/issues/new); it's that easy!
## Write bug reports with detail, background, and sample code
Try to be as more descriptive as possible. When opening a new issue you will be prompted to choose between a bug or a feature request, with a small template to fill details with. Be specific!
**Great Bug Reports** tend to have:
- A quick summary and/or background
- Steps to reproduce
- Be specific!
- Give sample code if you can.
- What you expected would happen
- What actually happens
- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work)
People *love* thorough bug reports.
## License
By contributing, you agree that your contributions will be licensed under the project Licenses.
## References
This document was adapted from the open-source contribution guidelines from https://gist.github.com/briandk/3d2e8b3ec8daf5a27a62

View File

@@ -6,10 +6,6 @@ override LDFLAGS += -X "github.com/mudler/luet/cmd.BuildCommit=$(shell git rev-p
NAME ?= luet
PACKAGE_NAME ?= $(NAME)
PACKAGE_CONFLICT ?= $(PACKAGE_NAME)-beta
REVISION := $(shell git rev-parse --short HEAD || echo dev)
VERSION := $(shell git describe --tags || echo $(REVISION))
VERSION := $(shell echo $(VERSION) | sed -e 's/^v//g')
BUILD_PLATFORMS ?= -osarch="linux/amd64" -osarch="linux/386" -osarch="linux/arm"
ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
.PHONY: all
@@ -89,10 +85,10 @@ test-docker:
bash -c "make test"
multiarch-build:
CGO_ENABLED=0 gox $(BUILD_PLATFORMS) -ldflags '$(LDFLAGS)' -output="release/$(NAME)-$(VERSION)-{{.OS}}-{{.Arch}}"
goreleaser build --snapshot --rm-dist
multiarch-build-small:
@$(MAKE) LDFLAGS+="-s -w" multiarch-build
for file in $(ROOT_DIR)/release/* ; do \
@$(MAKE) multiarch-build
for file in $(ROOT_DIR)/release/**/* ; do \
upx --brute -1 $${file} ; \
done
done

View File

@@ -1,23 +1,28 @@
<p align="center">
<img width=150 height=150 src="https://user-images.githubusercontent.com/2420543/119691600-0293d700-be4b-11eb-827f-49ff1174a07a.png">
</p>
# luet - Container-based Package manager
[![Docker Repository on Quay](https://quay.io/repository/luet/base/status "Docker Repository on Quay")](https://quay.io/repository/luet/base)
[![Go Report Card](https://goreportcard.com/badge/github.com/mudler/luet)](https://goreportcard.com/report/github.com/mudler/luet)
[![Build Status](https://travis-ci.org/mudler/luet.svg?branch=master)](https://travis-ci.org/mudler/luet)
[![Build and release on push](https://github.com/mudler/luet/actions/workflows/release.yml/badge.svg)](https://github.com/mudler/luet/actions/workflows/release.yml)
[![GoDoc](https://godoc.org/github.com/mudler/luet?status.svg)](https://godoc.org/github.com/mudler/luet)
[![codecov](https://codecov.io/gh/mudler/luet/branch/master/graph/badge.svg)](https://codecov.io/gh/mudler/luet)
[![asciicast](https://asciinema.org/a/388348.svg)](https://asciinema.org/a/388348)
Luet is a multi-platform Package Manager based off from containers - it uses Docker (and others) to build packages. It has zero dependencies and it is well suitable for "from scratch" environments. It can also version entire rootfs and enables delivery of OTA-alike updates, making it a perfect fit for the Edge computing era and IoT embedded devices.
It offers a simple [specfile format](https://luet-lab.github.io/docs/docs/concepts/packages/specfile/) in YAML notation to define both [packages](https://luet-lab.github.io/docs/docs/concepts/packages/) and [rootfs](https://luet-lab.github.io/docs/docs/concepts/packages/#package-layers). As it is based on containers, it can be also used to build stages for Linux From Scratch installations and it can build and track updates for those systems.
It is written entirely in Golang and where used as package manager, it can run in from scratch environment, with zero dependencies.
[![asciicast](https://asciinema.org/a/388348.svg)](https://asciinema.org/a/388348)
## In a glance
- Luet can reuse Gentoo's portage tree hierarchy, and it is heavily inspired from it.
- It builds, installs, uninstalls and perform upgrades on machines
- It builds from containers, but installs, uninstalls and perform upgrades on machines
- Installer doesn't depend on anything ( 0 dep installer !), statically built
- You can install it aside also with your current distro package manager, and start building and distributing your packages
- [Support for packages as "layers"](https://luet-lab.github.io/docs/docs/concepts/packages/specfile/#building-strategies)
@@ -25,6 +30,7 @@ It is written entirely in Golang and where used as package manager, it can run i
- Support for [collections](https://luet-lab.github.io/docs/docs/concepts/packages/collections/) and [templated package definitions](https://luet-lab.github.io/docs/docs/concepts/packages/templates/)
- [Can be extended with Plugins and Extensions](https://luet-lab.github.io/docs/docs/concepts/plugins-and-extensions/)
- [Can build packages in Kubernetes (experimental)](https://github.com/mudler/luet-k8s)
- Uses containerd/go-containerregistry to manipulate images - works also daemonless with the img backend
## Install

View File

@@ -21,8 +21,8 @@ import (
b64 "encoding/base64"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/box"
. "github.com/mudler/luet/pkg/logger"
"github.com/spf13/cobra"
)
@@ -54,18 +54,18 @@ func NewBoxExecCommand() *cobra.Command {
args = ss
}
Info("Executing", args, "in", rootfs)
util.DefaultContext.Info("Executing", args, "in", rootfs)
b := box.NewBox(entrypoint, args, mounts, envs, rootfs, stdin, stdout, stderr)
err := b.Run()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
},
}
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
ans.Flags().String("rootfs", path, "Rootfs path")
ans.Flags().Bool("stdin", false, "Attach to stdin")

View File

@@ -21,15 +21,15 @@ import (
"github.com/ghodss/yaml"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types/artifact"
"github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/compiler/types/artifact"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
"github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/compiler/types/options"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
fileHelpers "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
tree "github.com/mudler/luet/pkg/tree"
@@ -74,7 +74,7 @@ Build packages specifying multiple definition trees:
viper.BindPFlag("compression", cmd.Flags().Lookup("compression"))
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
viper.BindPFlag("values", cmd.Flags().Lookup("values"))
util.BindValuesFlags(cmd)
viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
viper.BindPFlag("image-repository", cmd.Flags().Lookup("image-repository"))
@@ -83,26 +83,24 @@ Build packages specifying multiple definition trees:
viper.BindPFlag("wait", cmd.Flags().Lookup("wait"))
viper.BindPFlag("keep-images", cmd.Flags().Lookup("keep-images"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
LuetCfg.Viper.BindPFlag("general.show_build_output", cmd.Flags().Lookup("live-output"))
LuetCfg.Viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
util.BindSolverFlags(cmd)
viper.BindPFlag("general.show_build_output", cmd.Flags().Lookup("live-output"))
viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
},
Run: func(cmd *cobra.Command, args []string) {
treePaths := viper.GetStringSlice("tree")
dst := viper.GetString("destination")
concurrency := LuetCfg.GetGeneral().Concurrency
concurrency := util.DefaultContext.Config.GetGeneral().Concurrency
backendType := viper.GetString("backend")
privileged := viper.GetBool("privileged")
revdeps := viper.GetBool("revdeps")
all := viper.GetBool("all")
compressionType := viper.GetString("compression")
imageRepository := viper.GetString("image-repository")
values := viper.GetStringSlice("values")
values := util.ValuesFlags()
wait := viper.GetBool("wait")
push := viper.GetBool("push")
pull := viper.GetBool("pull")
@@ -112,14 +110,16 @@ Build packages specifying multiple definition trees:
onlyTarget, _ := cmd.Flags().GetBool("only-target-package")
full, _ := cmd.Flags().GetBool("full")
rebuild, _ := cmd.Flags().GetBool("rebuild")
pushFinalImages, _ := cmd.Flags().GetBool("push-final-images")
pushFinalImagesRepository, _ := cmd.Flags().GetString("push-final-images-repository")
pushFinalImagesForce, _ := cmd.Flags().GetBool("push-final-images-force")
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
var results Results
backendArgs := viper.GetStringSlice("backend-args")
out, _ := cmd.Flags().GetString("output")
if out != "terminal" {
LuetCfg.GetLogging().SetLogLevel("error")
util.DefaultContext.Config.GetLogging().SetLogLevel("error")
}
pretend, _ := cmd.Flags().GetBool("pretend")
fromRepo, _ := cmd.Flags().GetBool("from-repositories")
@@ -127,7 +127,7 @@ Build packages specifying multiple definition trees:
compilerSpecs := compilerspec.NewLuetCompilationspecs()
var db pkg.PackageDatabase
compilerBackend, err := compiler.NewBackend(backendType)
compilerBackend, err := compiler.NewBackend(util.DefaultContext, backendType)
helpers.CheckErr(err)
db = pkg.NewInMemoryDatabase(false)
@@ -136,67 +136,72 @@ Build packages specifying multiple definition trees:
generalRecipe := tree.NewCompilerRecipe(db)
if fromRepo {
if err := installer.LoadBuildTree(generalRecipe, db, LuetCfg); err != nil {
Warning("errors while loading trees from repositories", err.Error())
if err := installer.LoadBuildTree(generalRecipe, db, util.DefaultContext); err != nil {
util.DefaultContext.Warning("errors while loading trees from repositories", err.Error())
}
}
for _, src := range treePaths {
Info("Loading tree", src)
util.DefaultContext.Info("Loading tree", src)
helpers.CheckErr(generalRecipe.Load(src))
}
Info("Building in", dst)
util.DefaultContext.Info("Building in", dst)
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
if !fileHelpers.Exists(dst) {
os.MkdirAll(dst, 0600)
util.DefaultContext.Debug("Creating destination folder", dst)
}
opts := util.SetSolverConfig(util.DefaultContext)
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
LuetCfg.GetGeneral().ShowBuildOutput = LuetCfg.Viper.GetBool("general.show_build_output")
util.DefaultContext.Config.GetGeneral().ShowBuildOutput = viper.GetBool("general.show_build_output")
opts := &LuetSolverOptions{
Type: stype,
LearnRate: float32(rate),
Discount: float32(discount),
MaxAttempts: attempts,
}
util.DefaultContext.Debug("Solver", opts.CompactString())
Debug("Solver", opts.CompactString())
opts.Options = solver.Options{Type: solver.SingleCoreSimple, Concurrency: concurrency}
if concurrent {
opts.Options = solver.Options{Type: solver.ParallelSimple, Concurrency: concurrency}
} else {
opts.Options = solver.Options{Type: solver.SingleCoreSimple, Concurrency: concurrency}
}
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(),
options.NoDeps(nodeps),
compileropts := []options.Option{options.NoDeps(nodeps),
options.WithBackendType(backendType),
options.PushImages(push),
options.WithBuildValues(values),
options.WithPullRepositories(pullRepo),
options.WithPushRepository(imageRepository),
options.Rebuild(rebuild),
options.WithTemplateFolder(util.TemplateFolders(util.DefaultContext, fromRepo, treePaths)),
options.WithSolverOptions(*opts),
options.Wait(wait),
options.OnlyTarget(onlyTarget),
options.PullFirst(pull),
options.KeepImg(keepImages),
options.OnlyDeps(onlydeps),
options.WithContext(util.DefaultContext),
options.BackendArgs(backendArgs),
options.Concurrency(concurrency),
options.WithCompressionType(compression.Implementation(compressionType)),
)
options.WithCompressionType(compression.Implementation(compressionType))}
if pushFinalImages {
compileropts = append(compileropts, options.EnablePushFinalImages)
if pushFinalImagesForce {
compileropts = append(compileropts, options.ForcePushFinalImages)
}
if pushFinalImagesRepository != "" {
compileropts = append(compileropts, options.WithFinalRepository(pushFinalImagesRepository))
} else if imageRepository != "" {
compileropts = append(compileropts, options.WithFinalRepository(imageRepository))
}
}
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(), compileropts...)
if full {
specs, err := luetCompiler.FromDatabase(generalRecipe.GetDatabase(), true, dst)
if err != nil {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
}
for _, spec := range specs {
Info(":package: Selecting ", spec.GetPackage().GetName(), spec.GetPackage().GetVersion())
util.DefaultContext.Info(":package: Selecting ", spec.GetPackage().GetName(), spec.GetPackage().GetVersion())
compilerSpecs.Add(spec)
}
@@ -204,12 +209,12 @@ Build packages specifying multiple definition trees:
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
spec, err := luetCompiler.FromPackage(pack)
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
spec.SetOutputPath(dst)
@@ -221,9 +226,9 @@ Build packages specifying multiple definition trees:
for _, p := range w {
spec, err := luetCompiler.FromPackage(p)
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
Info(":package: Selecting ", p.GetName(), p.GetVersion())
util.DefaultContext.Info(":package: Selecting ", p.GetName(), p.GetVersion())
spec.SetOutputPath(dst)
compilerSpecs.Add(spec)
}
@@ -247,11 +252,12 @@ Build packages specifying multiple definition trees:
}
for _, sp := range toCalculate {
packs, err := luetCompiler.ComputeDepTree(sp)
ht := compiler.NewHashTree(generalRecipe.GetDatabase())
hashTree, err := ht.Query(luetCompiler, sp)
if err != nil {
errs = append(errs, err)
}
for _, p := range packs {
for _, p := range hashTree.Dependencies {
results.Packages = append(results.Packages,
PackageResult{
Name: p.Package.GetName(),
@@ -281,7 +287,7 @@ Build packages specifying multiple definition trees:
fmt.Println(string(j2))
case "terminal":
for _, p := range results.Packages {
Info(p.String())
util.DefaultContext.Info(p.String())
}
}
} else {
@@ -290,12 +296,12 @@ Build packages specifying multiple definition trees:
}
if len(errs) != 0 {
for _, e := range errs {
Error("Error: " + e.Error())
util.DefaultContext.Error("Error: " + e.Error())
}
Fatal("Bailing out")
util.DefaultContext.Fatal("Bailing out")
}
for _, a := range artifact {
Info("Artifact generated:", a.Path)
util.DefaultContext.Info("Artifact generated:", a.Path)
}
},
}
@@ -303,7 +309,7 @@ Build packages specifying multiple definition trees:
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
buildCmd.Flags().StringSliceP("tree", "t", []string{path}, "Path of the tree to use.")
@@ -311,6 +317,11 @@ func init() {
buildCmd.Flags().Bool("privileged", true, "Privileged (Keep permissions)")
buildCmd.Flags().Bool("revdeps", false, "Build with revdeps")
buildCmd.Flags().Bool("all", false, "Build all specfiles in the tree")
buildCmd.Flags().Bool("push-final-images", false, "Push final images while building")
buildCmd.Flags().Bool("push-final-images-force", false, "Override existing images")
buildCmd.Flags().String("push-final-images-repository", "", "Repository where to push final images to")
buildCmd.Flags().Bool("full", false, "Build all packages (optimized)")
buildCmd.Flags().StringSlice("values", []string{}, "Build values file to interpolate with each package")
buildCmd.Flags().StringSliceP("backend-args", "a", []string{}, "Backend args")
@@ -330,7 +341,7 @@ func init() {
buildCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
buildCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
buildCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
buildCmd.Flags().Bool("live-output", LuetCfg.GetGeneral().ShowBuildOutput, "Enable live output of the build phase.")
buildCmd.Flags().Bool("live-output", util.DefaultContext.Config.GetGeneral().ShowBuildOutput, "Enable live output of the build phase.")
buildCmd.Flags().Bool("from-repositories", false, "Consume the user-defined repositories to pull specfiles from")
buildCmd.Flags().Bool("rebuild", false, "To combine with --pull. Allows to rebuild the target package even if an image is available, against a local values file")
buildCmd.Flags().Bool("pretend", false, "Just print what packages will be compiled")

View File

@@ -16,14 +16,13 @@
package cmd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
. "github.com/mudler/luet/pkg/config"
config "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
"github.com/spf13/cobra"
)
@@ -33,46 +32,33 @@ var cleanupCmd = &cobra.Command{
Short: "Clean packages cache.",
Long: `remove downloaded packages tarballs and clean cache directory`,
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
util.BindSystemFlags(cmd)
},
Run: func(cmd *cobra.Command, args []string) {
var cleaned int = 0
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := config.LuetCfg.Viper.GetString("system.rootfs")
engine := config.LuetCfg.Viper.GetString("system.database_engine")
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
util.SetSystemConfig(util.DefaultContext)
// Check if cache dir exists
if helpers.Exists(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) {
if fileHelper.Exists(util.DefaultContext.Config.GetSystem().GetSystemPkgsCacheDirPath()) {
files, err := ioutil.ReadDir(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath())
files, err := ioutil.ReadDir(util.DefaultContext.Config.GetSystem().GetSystemPkgsCacheDirPath())
if err != nil {
Fatal("Error on read cachedir ", err.Error())
util.DefaultContext.Fatal("Error on read cachedir ", err.Error())
}
for _, file := range files {
if file.IsDir() {
continue
}
if LuetCfg.GetGeneral().Debug {
Info("Removing ", file.Name())
}
util.DefaultContext.Debug("Removing ", file.Name())
err := os.RemoveAll(
filepath.Join(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), file.Name()))
filepath.Join(util.DefaultContext.Config.GetSystem().GetSystemPkgsCacheDirPath(), file.Name()))
if err != nil {
Fatal("Error on removing", file.Name())
util.DefaultContext.Fatal("Error on removing", file.Name())
}
cleaned++
}
}
Info("Cleaned: ", cleaned, "packages.")
util.DefaultContext.Info(fmt.Sprintf("Cleaned: %d files from %s", cleaned, util.DefaultContext.Config.GetSystem().GetSystemPkgsCacheDirPath()))
},
}

View File

@@ -18,8 +18,7 @@ package cmd
import (
"fmt"
config "github.com/mudler/luet/pkg/config"
installer "github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/cmd/util"
"github.com/spf13/cobra"
)
@@ -30,46 +29,12 @@ var configCmd = &cobra.Command{
Long: `Show luet configuration`,
Aliases: []string{"c"},
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(config.LuetCfg.GetLogging())
fmt.Println(config.LuetCfg.GetGeneral())
fmt.Println(config.LuetCfg.GetSystem())
if len(config.LuetCfg.CacheRepositories) > 0 {
fmt.Println("repetitors:")
for _, r := range config.LuetCfg.CacheRepositories {
fmt.Println(" - ", r.String())
}
}
if len(config.LuetCfg.SystemRepositories) > 0 {
fmt.Println("repositories:")
for _, r := range config.LuetCfg.SystemRepositories {
fmt.Println(" - ", r.String())
}
data, err := util.DefaultContext.Config.YAML()
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
if len(config.LuetCfg.RepositoriesConfDir) > 0 {
fmt.Println("repos_confdir:")
for _, dir := range config.LuetCfg.RepositoriesConfDir {
fmt.Println(" - ", dir)
}
}
if len(config.LuetCfg.ConfigProtectConfDir) > 0 {
// Load config protect configs
installer.LoadConfigProtectConfs(config.LuetCfg)
fmt.Println("config_protect_confdir:")
for _, dir := range config.LuetCfg.ConfigProtectConfDir {
fmt.Println(" - ", dir)
}
if len(config.LuetCfg.GetConfigProtectConfFiles()) > 0 {
fmt.Println("protect_conf_files:")
for _, file := range config.LuetCfg.GetConfigProtectConfFiles() {
fmt.Println(" - ", file.String())
}
}
}
fmt.Println(string(data))
},
}

View File

@@ -19,9 +19,9 @@ import (
"path/filepath"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/compiler/types/compression"
. "github.com/mudler/luet/pkg/config"
installer "github.com/mudler/luet/pkg/installer"
// . "github.com/mudler/luet/pkg/logger"
@@ -69,7 +69,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
viper.BindPFlag("meta-filename", cmd.Flags().Lookup("meta-filename"))
viper.BindPFlag("reset-revision", cmd.Flags().Lookup("reset-revision"))
viper.BindPFlag("repo", cmd.Flags().Lookup("repo"))
viper.BindPFlag("from-metadata", cmd.Flags().Lookup("from-metadata"))
viper.BindPFlag("force-push", cmd.Flags().Lookup("force-push"))
viper.BindPFlag("push-images", cmd.Flags().Lookup("push-images"))
@@ -80,7 +80,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
treePaths := viper.GetStringSlice("tree")
dst := viper.GetString("output")
packages := viper.GetString("packages")
name := viper.GetString("name")
descr := viper.GetString("descr")
urls := viper.GetStringSlice("urls")
@@ -96,14 +96,26 @@ Create a repository from the metadata description defined in the luet.yaml confi
treeFile := installer.NewDefaultTreeRepositoryFile()
metaFile := installer.NewDefaultMetaRepositoryFile()
compilerBackend, err := compiler.NewBackend(backendType)
compilerBackend, err := compiler.NewBackend(util.DefaultContext, backendType)
helpers.CheckErr(err)
force := viper.GetBool("force-push")
imagePush := viper.GetBool("push-images")
opts := []installer.RepositoryOption{
installer.WithSource(viper.GetString("packages")),
installer.WithPushImages(imagePush),
installer.WithForce(force),
installer.FromRepository(fromRepo),
installer.WithImagePrefix(dst),
installer.WithDatabase(pkg.NewInMemoryDatabase(false)),
installer.WithCompilerBackend(compilerBackend),
installer.FromMetadata(viper.GetBool("from-metadata")),
installer.WithContext(util.DefaultContext),
}
if source_repo != "" {
// Search for system repository
lrepo, err := LuetCfg.GetSystemRepository(source_repo)
lrepo, err := util.DefaultContext.Config.GetSystemRepository(source_repo)
helpers.CheckErr(err)
if len(treePaths) <= 0 {
@@ -114,27 +126,28 @@ Create a repository from the metadata description defined in the luet.yaml confi
t = lrepo.Type
}
repo, err = installer.GenerateRepository(lrepo.Name,
lrepo.Description, t,
lrepo.Urls,
lrepo.Priority,
packages,
treePaths,
pkg.NewInMemoryDatabase(false),
compilerBackend,
dst,
imagePush,
force,
fromRepo,
LuetCfg)
helpers.CheckErr(err)
opts = append(opts,
installer.WithName(lrepo.Name),
installer.WithDescription(lrepo.Description),
installer.WithType(t),
installer.WithUrls(lrepo.Urls...),
installer.WithPriority(lrepo.Priority),
installer.WithTree(treePaths...),
)
} else {
repo, err = installer.GenerateRepository(name, descr, t, urls, 1, packages,
treePaths, pkg.NewInMemoryDatabase(false), compilerBackend, dst, imagePush, force, fromRepo, LuetCfg)
helpers.CheckErr(err)
opts = append(opts,
installer.WithName(name),
installer.WithDescription(descr),
installer.WithType(t),
installer.WithUrls(urls...),
installer.WithTree(treePaths...),
)
}
repo, err = installer.GenerateRepository(opts...)
helpers.CheckErr(err)
if treetype != "" {
treeFile.SetCompressionType(compression.Implementation(treetype))
}
@@ -154,7 +167,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
repo.SetRepositoryFile(installer.REPOFILE_TREE_KEY, treeFile)
repo.SetRepositoryFile(installer.REPOFILE_META_KEY, metaFile)
err = repo.Write(dst, reset, true)
err = repo.Write(util.DefaultContext, dst, reset, true)
helpers.CheckErr(err)
},
@@ -177,6 +190,7 @@ func init() {
createrepoCmd.Flags().Bool("force-push", false, "Force overwrite of docker images if already present online")
createrepoCmd.Flags().Bool("push-images", false, "Enable/Disable docker image push for docker repositories")
createrepoCmd.Flags().Bool("from-metadata", false, "Consider metadata files from the packages folder while indexing the new tree")
createrepoCmd.Flags().String("tree-compression", "gzip", "Compression alg: none, gzip, zstd")
createrepoCmd.Flags().String("tree-filename", installer.TREE_TARBALL, "Repository tree filename")

View File

@@ -18,13 +18,11 @@ package cmd_database
import (
"io/ioutil"
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
"github.com/mudler/luet/cmd/util"
artifact "github.com/mudler/luet/pkg/api/core/types/artifact"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
. "github.com/mudler/luet/pkg/config"
"github.com/spf13/cobra"
)
@@ -45,43 +43,39 @@ The yaml must contain the package definition, and the file list at least.
For reference, inspect a "metadata.yaml" file generated while running "luet build"`,
Args: cobra.OnlyValidArgs,
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
util.BindSystemFlags(cmd)
},
Run: func(cmd *cobra.Command, args []string) {
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine")
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
systemDB := LuetCfg.GetSystemDB()
util.SetSystemConfig(util.DefaultContext)
systemDB := util.DefaultContext.Config.GetSystemDB()
for _, a := range args {
dat, err := ioutil.ReadFile(a)
if err != nil {
Fatal("Failed reading ", a, ": ", err.Error())
util.DefaultContext.Fatal("Failed reading ", a, ": ", err.Error())
}
art, err := artifact.NewPackageArtifactFromYaml(dat)
if err != nil {
Fatal("Failed reading yaml ", a, ": ", err.Error())
util.DefaultContext.Fatal("Failed reading yaml ", a, ": ", err.Error())
}
files := art.Files
if _, err := systemDB.CreatePackage(art.CompileSpec.GetPackage()); err != nil {
Fatal("Failed to create ", a, ": ", err.Error())
}
if err := systemDB.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: art.CompileSpec.GetPackage().GetFingerPrint(), Files: files}); err != nil {
Fatal("Failed setting package files for ", a, ": ", err.Error())
// Check if the package is already present
if p, err := systemDB.FindPackage(art.CompileSpec.GetPackage()); err == nil && p.GetName() != "" {
util.DefaultContext.Fatal("Package", art.CompileSpec.GetPackage().HumanReadableString(),
" already present.")
}
Info(art.CompileSpec.GetPackage().HumanReadableString(), " created")
if _, err := systemDB.CreatePackage(art.CompileSpec.GetPackage()); err != nil {
util.DefaultContext.Fatal("Failed to create ", a, ": ", err.Error())
}
if err := systemDB.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: art.CompileSpec.GetPackage().GetFingerPrint(), Files: files}); err != nil {
util.DefaultContext.Fatal("Failed setting package files for ", a, ": ", err.Error())
}
util.DefaultContext.Info(art.CompileSpec.GetPackage().HumanReadableString(), " created")
}
},

View File

@@ -19,10 +19,9 @@ import (
"fmt"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"gopkg.in/yaml.v2"
. "github.com/mudler/luet/pkg/config"
"github.com/spf13/cobra"
)
@@ -38,22 +37,13 @@ To return also files:
$ luet database get --files system/foo`,
Args: cobra.OnlyValidArgs,
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
util.BindSystemFlags(cmd)
},
Run: func(cmd *cobra.Command, args []string) {
showFiles, _ := cmd.Flags().GetBool("files")
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine")
util.SetSystemConfig(util.DefaultContext)
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
systemDB := LuetCfg.GetSystemDB()
systemDB := util.DefaultContext.Config.GetSystemDB()
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)

View File

@@ -16,10 +16,8 @@
package cmd_database
import (
. "github.com/mudler/luet/pkg/logger"
helpers "github.com/mudler/luet/cmd/helpers"
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
"github.com/spf13/cobra"
)
@@ -36,34 +34,25 @@ This commands takes multiple packages as arguments and prunes their entries from
`,
Args: cobra.OnlyValidArgs,
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
util.BindSystemFlags(cmd)
},
Run: func(cmd *cobra.Command, args []string) {
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine")
util.SetSystemConfig(util.DefaultContext)
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
systemDB := LuetCfg.GetSystemDB()
systemDB := util.DefaultContext.Config.GetSystemDB()
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
if err := systemDB.RemovePackage(pack); err != nil {
Fatal("Failed removing ", a, ": ", err.Error())
util.DefaultContext.Fatal("Failed removing ", a, ": ", err.Error())
}
if err := systemDB.RemovePackageFiles(pack); err != nil {
Fatal("Failed removing files for ", a, ": ", err.Error())
util.DefaultContext.Fatal("Failed removing files for ", a, ": ", err.Error())
}
}

View File

@@ -20,8 +20,8 @@ import (
b64 "encoding/base64"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/box"
. "github.com/mudler/luet/pkg/logger"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@@ -55,12 +55,12 @@ var execCmd = &cobra.Command{
args = ss
}
Info("Executing", args, "in", rootfs)
util.DefaultContext.Info("Executing", args, "in", rootfs)
b := box.NewBox(entrypoint, args, mounts, envs, rootfs, stdin, stdout, stderr)
err := b.Exec()
if err != nil {
Fatal(errors.Wrap(err, fmt.Sprintf("entrypoint: %s rootfs: %s", entrypoint, rootfs)))
util.DefaultContext.Fatal(errors.Wrap(err, fmt.Sprintf("entrypoint: %s rootfs: %s", entrypoint, rootfs)))
}
},
}
@@ -68,7 +68,7 @@ var execCmd = &cobra.Command{
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
execCmd.Hidden = true
execCmd.Flags().String("rootfs", path, "Rootfs path")

View File

@@ -22,11 +22,9 @@ import (
"regexp"
"strings"
. "github.com/mudler/luet/pkg/logger"
_gentoo "github.com/Sabayon/pkgs-checker/pkg/gentoo"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
version "github.com/mudler/luet/pkg/versioner"
)
func CreateRegexArray(rgx []string) ([]*regexp.Regexp, error) {
@@ -56,67 +54,77 @@ func packageData(p string) (string, string) {
}
return cat, name
}
func ParsePackageStr(p string) (*pkg.DefaultPackage, error) {
if !(strings.HasPrefix(p, "=") || strings.HasPrefix(p, ">") ||
strings.HasPrefix(p, "<")) {
ver := ">=0"
cat := ""
name := ""
func packageHasGentooSelector(v string) bool {
return (strings.HasPrefix(v, "=") || strings.HasPrefix(v, ">") ||
strings.HasPrefix(v, "<"))
}
if strings.Contains(p, "@") {
packageinfo := strings.Split(p, "@")
ver = packageinfo[1]
cat, name = packageData(packageinfo[0])
} else {
cat, name = packageData(p)
}
func gentooVersion(gp *_gentoo.GentooPackage) string {
return &pkg.DefaultPackage{
Name: name,
Category: cat,
Version: ver,
Uri: make([]string, 0),
}, nil
condition := gp.Condition.String()
if condition == "=" {
condition = ""
}
gp, err := _gentoo.ParsePackageStr(p)
if err != nil {
return nil, err
}
if gp.Version == "" {
gp.Version = "0"
gp.Condition = _gentoo.PkgCondGreaterEqual
}
pkgVersion := ""
pkgVersion := fmt.Sprintf("%s%s%s",
condition,
gp.Version,
gp.VersionSuffix,
)
if gp.VersionBuild != "" {
pkgVersion = fmt.Sprintf("%s%s%s+%s",
version.PkgSelectorConditionFromInt(gp.Condition.Int()).String(),
condition,
gp.Version,
gp.VersionSuffix,
gp.VersionBuild,
)
}
return pkgVersion
}
func ParsePackageStr(p string) (*pkg.DefaultPackage, error) {
if packageHasGentooSelector(p) {
gp, err := _gentoo.ParsePackageStr(p)
if err != nil {
return nil, err
}
if gp.Version == "" {
gp.Version = "0"
gp.Condition = _gentoo.PkgCondGreaterEqual
}
return &pkg.DefaultPackage{
Name: gp.Name,
Category: gp.Category,
Version: gentooVersion(gp),
Uri: make([]string, 0),
}, nil
}
ver := ">=0"
cat := ""
name := ""
if strings.Contains(p, "@") {
packageinfo := strings.Split(p, "@")
ver = packageinfo[1]
cat, name = packageData(packageinfo[0])
} else {
pkgVersion = fmt.Sprintf("%s%s%s",
version.PkgSelectorConditionFromInt(gp.Condition.Int()).String(),
gp.Version,
gp.VersionSuffix,
)
cat, name = packageData(p)
}
pack := &pkg.DefaultPackage{
Name: gp.Name,
Category: gp.Category,
Version: pkgVersion,
return &pkg.DefaultPackage{
Name: name,
Category: cat,
Version: ver,
Uri: make([]string, 0),
}
return pack, nil
}, nil
}
func CheckErr(err error) {
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
}

View File

@@ -18,15 +18,11 @@ package cmd_helpers_test
import (
"testing"
. "github.com/mudler/luet/cmd"
config "github.com/mudler/luet/pkg/config"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestSolver(t *testing.T) {
RegisterFailHandler(Fail)
LoadConfig(config.LuetCfg)
RunSpecs(t, "CLI helpers test Suite")
}

View File

@@ -15,15 +15,16 @@
package cmd
import (
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/solver"
helpers "github.com/mudler/luet/cmd/helpers"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var installCmd = &cobra.Command{
@@ -47,17 +48,12 @@ To force install a package:
`,
Aliases: []string{"i"},
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Run: func(cmd *cobra.Command, args []string) {
var toInstall pkg.Packages
@@ -65,63 +61,53 @@ To force install a package:
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toInstall = append(toInstall, pack)
}
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
force := LuetCfg.Viper.GetBool("force")
nodeps := LuetCfg.Viper.GetBool("nodeps")
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
yes := LuetCfg.Viper.GetBool("yes")
force := viper.GetBool("force")
nodeps := viper.GetBool("nodeps")
onlydeps := viper.GetBool("onlydeps")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
finalizerEnvs, _ := cmd.Flags().GetStringArray("finalizer-env")
relax, _ := cmd.Flags().GetBool("relax")
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine")
util.SetSystemConfig(util.DefaultContext)
util.SetSolverConfig(util.DefaultContext)
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
if concurrent {
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
} else {
LuetCfg.GetSolverOptions().Implementation = solver.SingleCoreSimple
}
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
repos := installer.SystemRepositories(LuetCfg)
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
installer.LoadConfigProtectConfs(LuetCfg)
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
// Load finalizer runtime environments
err := util.SetCliFinalizerEnvs(util.DefaultContext, finalizerEnvs)
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: nodeps,
Force: force,
OnlyDeps: onlydeps,
PreserveSystemEssentialData: true,
DownloadOnly: downloadOnly,
Ask: !yes,
Relaxed: relax,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
})
inst.Repositories(repos)
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
err := inst.Install(toInstall, system)
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
err = inst.Install(toInstall, system)
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
@@ -131,16 +117,20 @@ func init() {
installCmd.Flags().String("system-target", "", "System rootpath")
installCmd.Flags().String("system-engine", "", "System DB engine")
installCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
installCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
installCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
installCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
installCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
installCmd.Flags().Bool("nodeps", false, "Don't consider package dependencies (harmful!)")
installCmd.Flags().Bool("relax", false, "Relax installation constraints")
installCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
installCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
installCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
installCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
installCmd.Flags().Bool("download-only", false, "Download only")
installCmd.Flags().StringArray("finalizer-env", []string{},
"Set finalizer environment in the format key=value.")
RootCmd.AddCommand(installCmd)
}

View File

@@ -20,11 +20,10 @@ import (
"time"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/pkg/compiler/types/artifact"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types/artifact"
"github.com/mudler/luet/pkg/compiler/types/compression"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@@ -53,17 +52,17 @@ Afterwards, you can use the content generated and associate it with a tree and a
dst := viper.GetString("destination")
compressionType := viper.GetString("compression")
concurrency := LuetCfg.GetGeneral().Concurrency
concurrency := util.DefaultContext.Config.GetGeneral().Concurrency
if len(args) != 1 {
Fatal("You must specify a package name")
util.DefaultContext.Fatal("You must specify a package name")
}
packageName := args[0]
p, err := helpers.ParsePackageStr(packageName)
if err != nil {
Fatal("Invalid package string ", packageName, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", packageName, ": ", err.Error())
}
spec := &compilerspec.LuetCompilationSpec{Package: p}
@@ -71,18 +70,18 @@ Afterwards, you can use the content generated and associate it with a tree and a
a.CompressionType = compression.Implementation(compressionType)
err = a.Compress(sourcePath, concurrency)
if err != nil {
Fatal("failed compressing ", packageName, ": ", err.Error())
util.DefaultContext.Fatal("failed compressing ", packageName, ": ", err.Error())
}
a.CompileSpec = spec
filelist, err := a.FileList()
if err != nil {
Fatal("failed generating file list for ", packageName, ": ", err.Error())
util.DefaultContext.Fatal("failed generating file list for ", packageName, ": ", err.Error())
}
a.Files = filelist
a.CompileSpec.GetPackage().SetBuildTimestamp(time.Now().String())
err = a.WriteYaml(dst)
err = a.WriteYAML(dst)
if err != nil {
Fatal("failed writing metadata yaml file for ", packageName, ": ", err.Error())
util.DefaultContext.Fatal("failed writing metadata yaml file for ", packageName, ": ", err.Error())
}
},
}
@@ -90,7 +89,7 @@ Afterwards, you can use the content generated and associate it with a tree and a
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
packCmd.Flags().String("source", path, "Source folder")
packCmd.Flags().String("destination", path, "Destination folder")

View File

@@ -15,22 +15,19 @@
package cmd
import (
"github.com/mudler/luet/cmd/util"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var reclaimCmd = &cobra.Command{
Use: "reclaim",
Short: "Reclaim packages to Luet database from available repositories",
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
util.BindSystemFlags(cmd)
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
},
Long: `Reclaim tries to find association between packages in the online repositories and the system one.
@@ -39,39 +36,24 @@ var reclaimCmd = &cobra.Command{
It scans the target file system, and if finds a match with a package available in the repositories, it marks as installed in the system database.
`,
Run: func(cmd *cobra.Command, args []string) {
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine")
util.SetSystemConfig(util.DefaultContext)
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
force := viper.GetBool("force")
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
repos := installer.Repositories{}
for _, repo := range LuetCfg.SystemRepositories {
if !repo.Enable {
continue
}
r := installer.NewSystemRepository(repo)
repos = append(repos, r)
}
force := LuetCfg.Viper.GetBool("force")
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
Force: force,
PreserveSystemEssentialData: true,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
})
inst.Repositories(repos)
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
err := inst.Reclaim(system)
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}

114
cmd/reinstall.go Normal file
View File

@@ -0,0 +1,114 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/solver"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var reinstallCmd = &cobra.Command{
Use: "reinstall <pkg1> <pkg2> <pkg3>",
Short: "reinstall a set of packages",
Long: `Reinstall a group of packages in the system:
$ luet reinstall -y system/busybox shells/bash system/coreutils ...
`,
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("for", cmd.Flags().Lookup("for"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Run: func(cmd *cobra.Command, args []string) {
var toUninstall pkg.Packages
var toAdd pkg.Packages
force := viper.GetBool("force")
onlydeps := viper.GetBool("onlydeps")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
util.SetSystemConfig(util.DefaultContext)
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toUninstall = append(toUninstall, pack)
toAdd = append(toAdd, pack)
}
util.SetSolverConfig(util.DefaultContext)
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: true,
Force: force,
OnlyDeps: onlydeps,
PreserveSystemEssentialData: true,
Ask: !yes,
DownloadOnly: downloadOnly,
Context: util.DefaultContext,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
})
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
err := inst.Swap(toUninstall, toAdd, system)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
func init() {
reinstallCmd.Flags().String("system-dbpath", "", "System db path")
reinstallCmd.Flags().String("system-target", "", "System rootpath")
reinstallCmd.Flags().String("system-engine", "", "System DB engine")
reinstallCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
reinstallCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
reinstallCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
reinstallCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
reinstallCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
reinstallCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
reinstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
reinstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
reinstallCmd.Flags().Bool("download-only", false, "Download only")
RootCmd.AddCommand(reinstallCmd)
}

View File

@@ -15,15 +15,17 @@
package cmd
import (
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/solver"
helpers "github.com/mudler/luet/cmd/helpers"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var replaceCmd = &cobra.Command{
@@ -35,47 +37,32 @@ var replaceCmd = &cobra.Command{
$ luet replace -y system/busybox ... --for shells/bash --for system/coreutils ...
`,
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
LuetCfg.Viper.BindPFlag("for", cmd.Flags().Lookup("for"))
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("for", cmd.Flags().Lookup("for"))
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Run: func(cmd *cobra.Command, args []string) {
var toUninstall pkg.Packages
var toAdd pkg.Packages
f := LuetCfg.Viper.GetStringSlice("for")
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
force := LuetCfg.Viper.GetBool("force")
nodeps := LuetCfg.Viper.GetBool("nodeps")
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
yes := LuetCfg.Viper.GetBool("yes")
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine")
f := viper.GetStringSlice("for")
force := viper.GetBool("force")
nodeps := viper.GetBool("nodeps")
onlydeps := viper.GetBool("onlydeps")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
util.SetSystemConfig(util.DefaultContext)
util.SetSolverConfig(util.DefaultContext)
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toUninstall = append(toUninstall, pack)
}
@@ -83,53 +70,35 @@ var replaceCmd = &cobra.Command{
for _, a := range f {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toAdd = append(toAdd, pack)
}
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
repos := installer.Repositories{}
for _, repo := range LuetCfg.SystemRepositories {
if !repo.Enable {
continue
}
r := installer.NewSystemRepository(repo)
repos = append(repos, r)
}
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
if concurrent {
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
} else {
LuetCfg.GetSolverOptions().Implementation = solver.SingleCoreSimple
}
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
installer.LoadConfigProtectConfs(LuetCfg)
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: nodeps,
Force: force,
OnlyDeps: onlydeps,
PreserveSystemEssentialData: true,
Ask: !yes,
DownloadOnly: downloadOnly,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
})
inst.Repositories(repos)
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
err := inst.Swap(toUninstall, toAdd, system)
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
@@ -140,7 +109,7 @@ func init() {
replaceCmd.Flags().String("system-target", "", "System rootpath")
replaceCmd.Flags().String("system-engine", "", "System DB engine")
replaceCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
replaceCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
replaceCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
replaceCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
replaceCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")

View File

@@ -22,10 +22,10 @@ import (
"strconv"
"time"
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
installer "github.com/mudler/luet/pkg/installer"
"github.com/pterm/pterm"
. "github.com/logrusorgru/aurora"
"github.com/spf13/cobra"
)
@@ -43,7 +43,7 @@ func NewRepoListCommand() *cobra.Command {
quiet, _ := cmd.Flags().GetBool("quiet")
repoType, _ := cmd.Flags().GetString("type")
for _, repo := range LuetCfg.SystemRepositories {
for _, repo := range util.DefaultContext.Config.SystemRepositories {
if enable && !repo.Enable {
continue
}
@@ -58,17 +58,17 @@ func NewRepoListCommand() *cobra.Command {
fmt.Println(repo.Name)
} else {
if repo.Enable {
repoColor = Bold(BrightGreen(repo.Name)).String()
repoColor = pterm.LightGreen(repo.Name)
} else {
repoColor = Bold(BrightRed(repo.Name)).String()
repoColor = pterm.LightRed(repo.Name)
}
if repo.Description != "" {
repoText = Yellow(repo.Description).String()
repoText = pterm.LightYellow(repo.Description)
} else {
repoText = Yellow(repo.Urls[0]).String()
repoText = pterm.LightYellow(repo.Urls[0])
}
repobasedir := LuetCfg.GetSystem().GetRepoDatabaseDirPath(repo.Name)
repobasedir := util.DefaultContext.Config.GetSystem().GetRepoDatabaseDirPath(repo.Name)
if repo.Cached {
r := installer.NewSystemRepository(repo)
@@ -76,8 +76,8 @@ func NewRepoListCommand() *cobra.Command {
installer.REPOSITORY_SPECFILE))
if localRepo != nil {
tsec, _ := strconv.ParseInt(localRepo.GetLastUpdate(), 10, 64)
repoRevision = Bold(Red(localRepo.GetRevision())).String() +
" - " + Bold(Green(time.Unix(tsec, 0).String())).String()
repoRevision = pterm.LightRed(localRepo.GetRevision()) +
" - " + pterm.LightGreen(time.Unix(tsec, 0).String())
}
}

View File

@@ -17,9 +17,8 @@
package cmd_repo
import (
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/logger"
"github.com/spf13/cobra"
)
@@ -45,32 +44,28 @@ $> luet repo update repo1 repo2
if len(args) > 0 {
for _, rname := range args {
repo, err := LuetCfg.GetSystemRepository(rname)
repo, err := util.DefaultContext.Config.GetSystemRepository(rname)
if err != nil && !ignore {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
} else if err != nil {
continue
}
r := installer.NewSystemRepository(*repo)
Spinner(32)
_, err = r.Sync(force)
_, err = r.Sync(util.DefaultContext, force)
if err != nil && !ignore {
Fatal("Error on sync repository " + rname + ": " + err.Error())
util.DefaultContext.Fatal("Error on sync repository " + rname + ": " + err.Error())
}
SpinnerStop()
}
} else {
for _, repo := range LuetCfg.SystemRepositories {
for _, repo := range util.DefaultContext.Config.SystemRepositories {
if repo.Cached && repo.Enable {
r := installer.NewSystemRepository(repo)
Spinner(32)
_, err := r.Sync(force)
_, err := r.Sync(util.DefaultContext, force)
if err != nil && !ignore {
Fatal("Error on sync repository " + r.GetName() + ": " + err.Error())
util.DefaultContext.Fatal("Error on sync repository " + r.GetName() + ": " + err.Error())
}
SpinnerStop()
}
}
}

View File

@@ -18,32 +18,28 @@ package cmd
import (
"fmt"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"github.com/marcsauter/single"
bus "github.com/mudler/luet/pkg/bus"
"github.com/mudler/luet/cmd/util"
bus "github.com/mudler/luet/pkg/api/core/bus"
extensions "github.com/mudler/cobra-extensions"
config "github.com/mudler/luet/pkg/config"
helpers "github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
repo "github.com/mudler/luet/pkg/repository"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var cfgFile string
var Verbose bool
var LockedCommands = []string{"install", "uninstall", "upgrade"}
const (
LuetCLIVersion = "0.14.6"
LuetCLIVersion = "0.20.8"
LuetEnvPrefix = "LUET"
)
var license = []string{
"Luet Copyright (C) 2019-2021 Ettore Di Giacinto",
"This program comes with ABSOLUTELY NO WARRANTY.",
"This is free software, and you are welcome to redistribute it under certain conditions.",
}
// Build time and commit information.
//
// ⚠️ WARNING: should only be set by "-ldflags".
@@ -52,6 +48,10 @@ var (
BuildCommit string
)
func version() string {
return fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime)
}
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "luet",
@@ -79,98 +79,48 @@ To build a package, from a tree definition:
$ luet build --tree tree/path package
`,
Version: fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime),
Version: version(),
PersistentPreRun: func(cmd *cobra.Command, args []string) {
err := LoadConfig(config.LuetCfg)
err := util.InitContext(util.DefaultContext)
if err != nil {
Fatal("failed to load configuration:", err.Error())
util.DefaultContext.Error("failed to load configuration:", err.Error())
}
util.DisplayVersionBanner(util.DefaultContext, util.IntroScreen, version, license)
// Initialize tmpdir prefix. TODO: Move this with LoadConfig
// directly on sub command to ensure the creation only when it's
// needed.
err = config.LuetCfg.GetSystem().InitTmpDir()
err = util.DefaultContext.Config.GetSystem().InitTmpDir()
if err != nil {
Fatal("failed on init tmp basedir:", err.Error())
util.DefaultContext.Fatal("failed on init tmp basedir:", err.Error())
}
viper.BindPFlag("plugin", cmd.Flags().Lookup("plugin"))
plugin := viper.GetStringSlice("plugin")
bus.Manager.Load(plugin...).Register()
bus.Manager.Initialize(util.DefaultContext, plugin...)
if len(bus.Manager.Plugins) != 0 {
Info(":lollipop:Enabled plugins:")
util.DefaultContext.Info(":lollipop:Enabled plugins:")
for _, p := range bus.Manager.Plugins {
Info("\t:arrow_right:", p.Name)
util.DefaultContext.Info(fmt.Sprintf("\t:arrow_right: %s (at %s)", p.Name, p.Executable))
}
}
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
// Cleanup all tmp directories used by luet
err := config.LuetCfg.GetSystem().CleanupTmpDir()
err := util.DefaultContext.Config.GetSystem().CleanupTmpDir()
if err != nil {
Warning("failed on cleanup tmpdir:", err.Error())
util.DefaultContext.Warning("failed on cleanup tmpdir:", err.Error())
}
},
SilenceErrors: true,
}
func LoadConfig(c *config.LuetConfig) error {
// If a config file is found, read it in.
c.Viper.ReadInConfig()
err := c.Viper.Unmarshal(&config.LuetCfg)
if err != nil {
return err
}
noSpinner := c.Viper.GetBool("no_spinner")
InitAurora()
if !noSpinner {
NewSpinner()
}
Debug("Using config file:", c.Viper.ConfigFileUsed())
if c.GetLogging().EnableLogFile && c.GetLogging().Path != "" {
// Init zap logger
err = ZapLogger()
if err != nil {
return err
}
}
// Load repositories
err = repo.LoadRepositories(c)
if err != nil {
return err
}
return nil
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
if os.Getenv("LUET_NOLOCK") != "true" {
if len(os.Args) > 1 {
for _, lockedCmd := range LockedCommands {
if os.Args[1] == lockedCmd {
s := single.New("luet")
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
Fatal("another instance of the app is already running, exiting")
} else if err != nil {
// Another error occurred, might be worth handling it as well
Fatal("failed to acquire exclusive app lock:", err.Error())
}
defer s.TryUnlock()
break
}
}
}
}
util.HandleLock(util.DefaultContext)
if err := RootCmd.Execute(); err != nil {
fmt.Println(err)
@@ -179,95 +129,5 @@ func Execute() {
}
func init() {
cobra.OnInitialize(initConfig)
pflags := RootCmd.PersistentFlags()
pflags.StringVar(&cfgFile, "config", "", "config file (default is $HOME/.luet.yaml)")
pflags.BoolP("debug", "d", false, "verbose output")
pflags.Bool("fatal", false, "Enables Warnings to exit")
pflags.Bool("enable-logfile", false, "Enable log to file")
pflags.Bool("no-spinner", false, "Disable spinner.")
pflags.Bool("color", config.LuetCfg.GetLogging().Color, "Enable/Disable color.")
pflags.Bool("emoji", config.LuetCfg.GetLogging().EnableEmoji, "Enable/Disable emoji.")
pflags.Bool("skip-config-protect", config.LuetCfg.ConfigProtectSkip,
"Disable config protect analysis.")
pflags.StringP("logfile", "l", config.LuetCfg.GetLogging().Path,
"Logfile path. Empty value disable log to file.")
pflags.StringSlice("plugin", []string{}, "A list of runtime plugins to load")
// os/user doesn't work in from scratch environments.
// Check if i can retrieve user informations.
_, err := user.Current()
if err != nil {
Warning("failed to retrieve user identity:", err.Error())
}
pflags.Bool("same-owner", config.LuetCfg.GetGeneral().SameOwner, "Maintain same owner on uncompress.")
pflags.Int("concurrency", runtime.NumCPU(), "Concurrency")
config.LuetCfg.Viper.BindPFlag("logging.color", pflags.Lookup("color"))
config.LuetCfg.Viper.BindPFlag("logging.enable_emoji", pflags.Lookup("emoji"))
config.LuetCfg.Viper.BindPFlag("logging.enable_logfile", pflags.Lookup("enable-logfile"))
config.LuetCfg.Viper.BindPFlag("logging.path", pflags.Lookup("logfile"))
config.LuetCfg.Viper.BindPFlag("general.concurrency", pflags.Lookup("concurrency"))
config.LuetCfg.Viper.BindPFlag("general.debug", pflags.Lookup("debug"))
config.LuetCfg.Viper.BindPFlag("general.fatal_warnings", pflags.Lookup("fatal"))
config.LuetCfg.Viper.BindPFlag("general.same_owner", pflags.Lookup("same-owner"))
config.LuetCfg.Viper.BindPFlag("plugin", pflags.Lookup("plugin"))
// Currently I maintain this only from cli.
config.LuetCfg.Viper.BindPFlag("no_spinner", pflags.Lookup("no-spinner"))
config.LuetCfg.Viper.BindPFlag("config_protect_skip", pflags.Lookup("skip-config-protect"))
// Extensions must be binary with the "luet-" prefix to be able to be shown in the help.
// we also accept extensions in the relative path where luet is being started, "extensions/"
exts := extensions.Discover("luet", "extensions")
for _, ex := range exts {
cobraCmd := ex.CobraCommand()
RootCmd.AddCommand(cobraCmd)
}
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
// Luet support these priorities on read configuration file:
// - command line option (if available)
// - $PWD/.luet.yaml
// - $HOME/.luet.yaml
// - /etc/luet/luet.yaml
//
// Note: currently a single viper instance support only one config name.
viper.SetEnvPrefix(LuetEnvPrefix)
viper.SetConfigType("yaml")
if cfgFile != "" { // enable ability to specify config file via flag
viper.SetConfigFile(cfgFile)
} else {
// Retrieve pwd directory
pwdDir, err := os.Getwd()
if err != nil {
Error(err)
os.Exit(1)
}
homeDir := helpers.GetHomeDir()
if helpers.Exists(filepath.Join(pwdDir, ".luet.yaml")) || (homeDir != "" && helpers.Exists(filepath.Join(homeDir, ".luet.yaml"))) {
viper.AddConfigPath(".")
if homeDir != "" {
viper.AddConfigPath(homeDir)
}
viper.SetConfigName(".luet")
} else {
viper.SetConfigName("luet")
viper.AddConfigPath("/etc/luet")
}
}
viper.AutomaticEnv() // read in environment variables that match
// Create EnvKey Replacer for handle complex structure
replacer := strings.NewReplacer(".", "__")
viper.SetEnvKeyReplacer(replacer)
viper.SetTypeByDefaultValue(true)
util.InitViper(util.DefaultContext, RootCmd)
}

View File

@@ -19,13 +19,13 @@ import (
"strings"
"github.com/ghodss/yaml"
"github.com/jedib0t/go-pretty/table"
"github.com/jedib0t/go-pretty/v6/list"
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
type PackageResult struct {
@@ -46,28 +46,47 @@ func (r PackageResult) String() string {
return fmt.Sprintf("%s/%s-%s required for %s", r.Category, r.Name, r.Version, r.Target)
}
var rows table.Row = table.Row{"Package", "Category", "Name", "Version", "Repository", "Description", "License", "URI"}
var rows []string = []string{"Package", "Category", "Name", "Version", "Repository", "License"}
func packageToRow(repo string, p pkg.Package) table.Row {
return table.Row{p.HumanReadableString(), p.GetCategory(), p.GetName(), p.GetVersion(), repo, p.GetDescription(), p.GetLicense(), strings.Join(p.GetURI(), "\n")}
func packageToRow(repo string, p pkg.Package) []string {
return []string{p.HumanReadableString(), p.GetCategory(), p.GetName(), p.GetVersion(), repo, p.GetLicense()}
}
func packageToList(l list.Writer, repo string, p pkg.Package) {
l.AppendItem(p.HumanReadableString())
l.Indent()
l.AppendItem(fmt.Sprintf("Category: %s", p.GetCategory()))
l.AppendItem(fmt.Sprintf("Name: %s", p.GetName()))
l.AppendItem(fmt.Sprintf("Version: %s", p.GetVersion()))
l.AppendItem(fmt.Sprintf("Description: %s", p.GetDescription()))
l.AppendItem(fmt.Sprintf("Repository: %s ", repo))
l.AppendItem(fmt.Sprintf("Uri: %s ", strings.Join(p.GetURI(), "\n")))
l.UnIndent()
func packageToList(l *util.ListWriter, repo string, p pkg.Package) {
l.AppendItem(pterm.BulletListItem{
Level: 0, Text: p.HumanReadableString(),
TextStyle: pterm.NewStyle(pterm.FgCyan), Bullet: ">", BulletStyle: pterm.NewStyle(pterm.FgYellow),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Category: %s", p.GetCategory()),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Name: %s", p.GetName()),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Version: %s", p.GetVersion()),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Description: %s", p.GetDescription()),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Repository: %s ", repo),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Uri: %s ", strings.Join(p.GetURI(), " ")),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
}
func searchLocally(term string, l list.Writer, t table.Writer, label, labelMatch, revdeps, hidden bool) Results {
func searchLocally(term string, l *util.ListWriter, t *util.TableWriter, label, labelMatch, revdeps, hidden bool) Results {
var results Results
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
var err error
iMatches := pkg.Packages{}
@@ -80,7 +99,7 @@ func searchLocally(term string, l list.Writer, t table.Writer, label, labelMatch
}
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
for _, pack := range iMatches {
@@ -124,31 +143,23 @@ func searchLocally(term string, l list.Writer, t table.Writer, label, labelMatch
return results
}
func searchOnline(term string, l list.Writer, t table.Writer, label, labelMatch, revdeps, hidden bool) Results {
func searchOnline(term string, l *util.ListWriter, t *util.TableWriter, label, labelMatch, revdeps, hidden bool) Results {
var results Results
repos := installer.Repositories{}
for _, repo := range LuetCfg.SystemRepositories {
if !repo.Enable {
continue
}
r := installer.NewSystemRepository(repo)
repos = append(repos, r)
}
inst := installer.NewLuetInstaller(
installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
},
)
inst.Repositories(repos)
synced, err := inst.SyncRepositories(false)
synced, err := inst.SyncRepositories()
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
Info("--- Search results (" + term + "): ---")
util.DefaultContext.Info("--- Search results (" + term + "): ---")
matches := []installer.PackageMatch{}
if label {
@@ -199,15 +210,15 @@ func searchOnline(term string, l list.Writer, t table.Writer, label, labelMatch,
}
return results
}
func searchLocalFiles(term string, l list.Writer, t table.Writer) Results {
func searchLocalFiles(term string, l *util.ListWriter, t *util.TableWriter) Results {
var results Results
Info("--- Search results (" + term + "): ---")
util.DefaultContext.Info("--- Search results (" + term + "): ---")
matches, _ := LuetCfg.GetSystemDB().FindPackageByFile(term)
matches, _ := util.DefaultContext.Config.GetSystemDB().FindPackageByFile(term)
for _, pack := range matches {
t.AppendRow(packageToRow("system", pack))
packageToList(l, "system", pack)
f, _ := LuetCfg.GetSystemDB().GetPackageFiles(pack)
f, _ := util.DefaultContext.Config.GetSystemDB().GetPackageFiles(pack)
results.Packages = append(results.Packages,
PackageResult{
Name: pack.GetName(),
@@ -222,31 +233,23 @@ func searchLocalFiles(term string, l list.Writer, t table.Writer) Results {
return results
}
func searchFiles(term string, l list.Writer, t table.Writer) Results {
func searchFiles(term string, l *util.ListWriter, t *util.TableWriter) Results {
var results Results
repos := installer.Repositories{}
for _, repo := range LuetCfg.SystemRepositories {
if !repo.Enable {
continue
}
r := installer.NewSystemRepository(repo)
repos = append(repos, r)
}
inst := installer.NewLuetInstaller(
installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
},
)
inst.Repositories(repos)
synced, err := inst.SyncRepositories(false)
synced, err := inst.SyncRepositories()
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
Info("--- Search results (" + term + "): ---")
util.DefaultContext.Info("--- Search results (" + term + "): ---")
matches := []installer.PackageMatch{}
@@ -306,55 +309,38 @@ Search can also return results in the terminal in different ways: as terminal ou
`,
Aliases: []string{"s"},
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
},
Run: func(cmd *cobra.Command, args []string) {
var results Results
if len(args) > 1 {
Fatal("Wrong number of arguments (expected 1)")
util.DefaultContext.Fatal("Wrong number of arguments (expected 1)")
} else if len(args) == 0 {
args = []string{"."}
}
hidden, _ := cmd.Flags().GetBool("hidden")
installed := LuetCfg.Viper.GetBool("installed")
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
installed := viper.GetBool("installed")
searchWithLabel, _ := cmd.Flags().GetBool("by-label")
searchWithLabelMatch, _ := cmd.Flags().GetBool("by-label-regex")
revdeps, _ := cmd.Flags().GetBool("revdeps")
tableMode, _ := cmd.Flags().GetBool("table")
files, _ := cmd.Flags().GetBool("files")
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine")
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
util.SetSystemConfig(util.DefaultContext)
util.SetSolverConfig(util.DefaultContext)
out, _ := cmd.Flags().GetString("output")
if out != "terminal" {
LuetCfg.GetLogging().SetLogLevel("error")
util.DefaultContext.Config.GetLogging().SetLogLevel(types.FatalLevel)
}
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
l := list.NewWriter()
t := table.NewWriter()
t.AppendHeader(rows)
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
l := &util.ListWriter{}
t := &util.TableWriter{}
t.AppendRow(rows)
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
switch {
case files && installed:
@@ -367,16 +353,6 @@ Search can also return results in the terminal in different ways: as terminal ou
results = searchLocally(args[0], l, t, searchWithLabel, searchWithLabelMatch, revdeps, hidden)
}
t.AppendFooter(rows)
t.SetStyle(table.StyleColoredBright)
l.SetStyle(list.StyleConnectedRounded)
if tableMode {
Info(t.Render())
} else {
Info(l.Render())
}
y, err := yaml.Marshal(results)
if err != nil {
fmt.Printf("err: %v\n", err)
@@ -392,8 +368,13 @@ Search can also return results in the terminal in different ways: as terminal ou
return
}
fmt.Println(string(j2))
default:
if tableMode {
t.Render()
} else {
l.Render()
}
}
},
}
@@ -403,7 +384,7 @@ func init() {
searchCmd.Flags().String("system-engine", "", "System DB engine")
searchCmd.Flags().Bool("installed", false, "Search between system packages")
searchCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
searchCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
searchCmd.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
searchCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
searchCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")

View File

@@ -18,7 +18,7 @@ import (
"net/http"
"os"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@@ -41,15 +41,15 @@ var serverepoCmd = &cobra.Command{
http.Handle("/", http.FileServer(http.Dir(dir)))
Info("Serving ", dir, " on HTTP port: ", port)
Fatal(http.ListenAndServe(address+":"+port, nil))
util.DefaultContext.Info("Serving ", dir, " on HTTP port: ", port)
util.DefaultContext.Fatal(http.ListenAndServe(address+":"+port, nil))
},
}
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
serverepoCmd.Flags().String("dir", path, "Packages folder (output from build)")
serverepoCmd.Flags().String("port", "9090", "Listening port")

View File

@@ -19,7 +19,7 @@ package cmd_tree
import (
"fmt"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
spectooling "github.com/mudler/luet/pkg/spectooling"
tree "github.com/mudler/luet/pkg/tree"
version "github.com/mudler/luet/pkg/versioner"
@@ -36,7 +36,7 @@ func NewTreeBumpCommand() *cobra.Command {
PreRun: func(cmd *cobra.Command, args []string) {
df, _ := cmd.Flags().GetString("definition-file")
if df == "" {
Fatal("Mandatory definition.yaml path missing.")
util.DefaultContext.Fatal("Mandatory definition.yaml path missing.")
}
},
Run: func(cmd *cobra.Command, args []string) {
@@ -45,34 +45,34 @@ func NewTreeBumpCommand() *cobra.Command {
pkgVersion, _ := cmd.Flags().GetString("pkg-version")
pack, err := tree.ReadDefinitionFile(spec)
if err != nil {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
}
if pkgVersion != "" {
validator := &version.WrappedVersioner{}
err := validator.Validate(pkgVersion)
if err != nil {
Fatal("Invalid version string: " + err.Error())
util.DefaultContext.Fatal("Invalid version string: " + err.Error())
}
pack.SetVersion(pkgVersion)
} else {
// Retrieve version build section with Gentoo parser
err = pack.BumpBuildVersion()
if err != nil {
Fatal("Error on increment build version: " + err.Error())
util.DefaultContext.Fatal("Error on increment build version: " + err.Error())
}
}
if toStdout {
data, err := spectooling.NewDefaultPackageSanitized(&pack).Yaml()
if err != nil {
Fatal("Error on yaml conversion: " + err.Error())
util.DefaultContext.Fatal("Error on yaml conversion: " + err.Error())
}
fmt.Println(string(data))
} else {
err = tree.WriteDefinitionFile(&pack, spec)
if err != nil {
Fatal("Error on write definition file: " + err.Error())
util.DefaultContext.Fatal("Error on write definition file: " + err.Error())
}
fmt.Printf("Bumped package %s/%s-%s.\n", pack.Category, pack.Name, pack.Version)

View File

@@ -20,14 +20,13 @@ import (
"fmt"
"os"
//. "github.com/mudler/luet/pkg/config"
"github.com/ghodss/yaml"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/options"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
tree "github.com/mudler/luet/pkg/tree"
@@ -44,12 +43,13 @@ func NewTreeImageCommand() *cobra.Command {
PreRun: func(cmd *cobra.Command, args []string) {
t, _ := cmd.Flags().GetStringArray("tree")
if len(t) == 0 {
Fatal("Mandatory tree param missing.")
util.DefaultContext.Fatal("Mandatory tree param missing.")
}
if len(args) != 1 {
Fatal("Expects one package as parameter")
util.DefaultContext.Fatal("Expects one package as parameter")
}
util.BindValuesFlags(cmd)
viper.BindPFlag("image-repository", cmd.Flags().Lookup("image-repository"))
},
@@ -59,10 +59,11 @@ func NewTreeImageCommand() *cobra.Command {
treePath, _ := cmd.Flags().GetStringArray("tree")
imageRepository := viper.GetString("image-repository")
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
values := util.ValuesFlags()
out, _ := cmd.Flags().GetString("output")
if out != "terminal" {
LuetCfg.GetLogging().SetLogLevel("error")
util.DefaultContext.Config.GetLogging().SetLogLevel("error")
}
reciper := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
@@ -70,18 +71,21 @@ func NewTreeImageCommand() *cobra.Command {
for _, t := range treePath {
err := reciper.Load(t)
if err != nil {
Fatal("Error on load tree ", err)
util.DefaultContext.Fatal("Error on load tree ", err)
}
}
compilerBackend := backend.NewSimpleDockerBackend()
compilerBackend := backend.NewSimpleDockerBackend(util.DefaultContext)
opts := *LuetCfg.GetSolverOptions()
opts := *util.DefaultContext.Config.GetSolverOptions()
opts.Options = solver.Options{Type: solver.SingleCoreSimple, Concurrency: 1}
luetCompiler := compiler.NewLuetCompiler(
compilerBackend,
reciper.GetDatabase(),
options.WithBuildValues(values),
options.WithContext(util.DefaultContext),
options.WithPushRepository(imageRepository),
options.WithPullRepositories(pullRepo),
options.WithTemplateFolder(util.TemplateFolders(util.DefaultContext, false, treePath)),
options.WithSolverOptions(opts),
)
@@ -89,16 +93,21 @@ func NewTreeImageCommand() *cobra.Command {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
spec, err := luetCompiler.FromPackage(pack)
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
asserts, err := luetCompiler.ComputeDepTree(spec)
for _, assertion := range asserts { //highly dependent on the order
ht := compiler.NewHashTree(reciper.GetDatabase())
hashtree, err := ht.Query(luetCompiler, spec)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
for _, assertion := range hashtree.Solution { //highly dependent on the order
//buildImageHash := imageRepository + ":" + assertion.Hash.BuildHash
currentPackageImageHash := imageRepository + ":" + assertion.Hash.PackageHash
@@ -135,7 +144,7 @@ func NewTreeImageCommand() *cobra.Command {
}
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
ans.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
ans.Flags().StringArrayP("tree", "t", []string{path}, "Path of the tree to use.")

View File

@@ -21,11 +21,9 @@ import (
"os"
"sort"
//. "github.com/mudler/luet/pkg/config"
"github.com/ghodss/yaml"
helpers "github.com/mudler/luet/cmd/helpers"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
tree "github.com/mudler/luet/pkg/tree"
@@ -75,13 +73,13 @@ func NewTreePkglistCommand() *cobra.Command {
PreRun: func(cmd *cobra.Command, args []string) {
t, _ := cmd.Flags().GetStringArray("tree")
if len(t) == 0 {
Fatal("Mandatory tree param missing.")
util.DefaultContext.Fatal("Mandatory tree param missing.")
}
revdeps, _ := cmd.Flags().GetBool("revdeps")
deps, _ := cmd.Flags().GetBool("deps")
if revdeps && deps {
Fatal("Both revdeps and deps option used. Choice only one.")
util.DefaultContext.Fatal("Both revdeps and deps option used. Choice only one.")
}
},
@@ -98,7 +96,7 @@ func NewTreePkglistCommand() *cobra.Command {
out, _ := cmd.Flags().GetString("output")
if out != "terminal" {
LuetCfg.GetLogging().SetLogLevel("error")
util.DefaultContext.Config.GetLogging().SetLogLevel("error")
}
var reciper tree.Builder
@@ -111,7 +109,7 @@ func NewTreePkglistCommand() *cobra.Command {
for _, t := range treePath {
err := reciper.Load(t)
if err != nil {
Fatal("Error on load tree ", err)
util.DefaultContext.Fatal("Error on load tree ", err)
}
}
@@ -126,11 +124,11 @@ func NewTreePkglistCommand() *cobra.Command {
regExcludes, err := helpers.CreateRegexArray(excludes)
if err != nil {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
}
regMatches, err := helpers.CreateRegexArray(matches)
if err != nil {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
}
plist := make([]string, 0)
@@ -167,78 +165,79 @@ func NewTreePkglistCommand() *cobra.Command {
}
}
if addPkg {
if revdeps {
packs, _ := reciper.GetDatabase().GetRevdeps(p)
for _, revdep := range packs {
if !addPkg {
continue
}
if revdeps {
packs, _ := reciper.GetDatabase().GetRevdeps(p)
for i := range packs {
revdep := packs[i]
if full {
pkgstr = pkgDetail(revdep)
} else if verbose {
pkgstr = revdep.HumanReadableString()
} else {
pkgstr = fmt.Sprintf("%s/%s", revdep.GetCategory(), revdep.GetName())
}
plist = append(plist, pkgstr)
results.Packages = append(results.Packages, TreePackageResult{
Name: revdep.GetName(),
Version: revdep.GetVersion(),
Category: revdep.GetCategory(),
Path: revdep.GetPath(),
})
}
} else if deps {
solution, err := depSolver.Install(pkg.Packages{p})
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
ass := solution.SearchByName(p.GetPackageName())
solution, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
for _, pa := range solution {
if pa.Value {
// Exclude itself
if pa.Package.GetName() == p.GetName() && pa.Package.GetCategory() == p.GetCategory() {
continue
}
if full {
pkgstr = pkgDetail(revdep)
pkgstr = pkgDetail(pa.Package)
} else if verbose {
pkgstr = revdep.HumanReadableString()
pkgstr = pa.Package.HumanReadableString()
} else {
pkgstr = fmt.Sprintf("%s/%s", revdep.GetCategory(), revdep.GetName())
pkgstr = fmt.Sprintf("%s/%s", pa.Package.GetCategory(), pa.Package.GetName())
}
plist = append(plist, pkgstr)
results.Packages = append(results.Packages, TreePackageResult{
Name: revdep.GetName(),
Version: revdep.GetVersion(),
Category: revdep.GetCategory(),
Path: revdep.GetPath(),
Name: pa.Package.GetName(),
Version: pa.Package.GetVersion(),
Category: pa.Package.GetCategory(),
Path: pa.Package.GetPath(),
})
}
} else if deps {
Spinner(32)
solution, err := depSolver.Install(pkg.Packages{p})
if err != nil {
Fatal(err.Error())
}
ass := solution.SearchByName(p.GetPackageName())
solution, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
if err != nil {
Fatal(err.Error())
}
SpinnerStop()
for _, pa := range solution {
if pa.Value {
// Exclude itself
if pa.Package.GetName() == p.GetName() && pa.Package.GetCategory() == p.GetCategory() {
continue
}
if full {
pkgstr = pkgDetail(pa.Package)
} else if verbose {
pkgstr = pa.Package.HumanReadableString()
} else {
pkgstr = fmt.Sprintf("%s/%s", pa.Package.GetCategory(), pa.Package.GetName())
}
plist = append(plist, pkgstr)
results.Packages = append(results.Packages, TreePackageResult{
Name: pa.Package.GetName(),
Version: pa.Package.GetVersion(),
Category: pa.Package.GetCategory(),
Path: pa.Package.GetPath(),
})
}
}
} else {
plist = append(plist, pkgstr)
results.Packages = append(results.Packages, TreePackageResult{
Name: p.GetName(),
Version: p.GetVersion(),
Category: p.GetCategory(),
Path: p.GetPath(),
})
}
} else {
plist = append(plist, pkgstr)
results.Packages = append(results.Packages, TreePackageResult{
Name: p.GetName(),
Version: p.GetVersion(),
Category: p.GetCategory(),
Path: p.GetPath(),
})
}
}
y, err := yaml.Marshal(results)
@@ -269,7 +268,7 @@ func NewTreePkglistCommand() *cobra.Command {
}
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
ans.Flags().BoolP("buildtime", "b", false, "Build time match")
ans.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")

View File

@@ -26,8 +26,8 @@ import (
"sync"
helpers "github.com/mudler/luet/cmd/helpers"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
tree "github.com/mudler/luet/pkg/tree"
@@ -104,7 +104,7 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
} else {
errstr = "No packages"
}
Error(fmt.Sprintf("[%9s] %s/%s-%s: Broken. No versions could be found by database %s",
util.DefaultContext.Error(fmt.Sprintf("[%9s] %s/%s-%s: Broken. No versions could be found by database %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
errstr,
@@ -134,7 +134,7 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
p.GetCategory(), p.GetName(), p.GetVersion(),
err.Error(),
)
Error(errstr)
util.DefaultContext.Error(errstr)
return errors.New(errstr)
}
@@ -173,7 +173,7 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
}
}
Info(fmt.Sprintf("[%9s] Checking package ", checkType)+
util.DefaultContext.Info(fmt.Sprintf("[%9s] Checking package ", checkType)+
fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(), p.GetVersion()),
"with", len(p.GetRequires()), "dependencies and", len(p.GetConflicts()), "conflicts.")
@@ -201,7 +201,7 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
} else {
errstr = "No packages"
}
Error(fmt.Sprintf("[%9s] %s/%s-%s: Broken Dep %s/%s-%s - %s",
util.DefaultContext.Error(fmt.Sprintf("[%9s] %s/%s-%s: Broken Dep %s/%s-%s - %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
@@ -221,12 +221,12 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
} else {
Debug(fmt.Sprintf("[%9s] Find packages for dep", checkType),
util.DefaultContext.Debug(fmt.Sprintf("[%9s] Find packages for dep", checkType),
fmt.Sprintf("%s/%s-%s", r.GetCategory(), r.GetName(), r.GetVersion()))
if opts.WithSolver {
Info(fmt.Sprintf("[%9s] :soap: [%2d/%2d] %s/%s-%s: %s/%s-%s",
util.DefaultContext.Info(fmt.Sprintf("[%9s] :soap: [%2d/%2d] %s/%s-%s: %s/%s-%s",
checkType,
idx+1, len(all),
p.GetCategory(), p.GetName(), p.GetVersion(),
@@ -236,15 +236,15 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
// Check if the solver is already been done for the deep
_, err := cacheDeps.Get(r.HashFingerprint(""))
if err == nil {
Debug(fmt.Sprintf("[%9s] :direct_hit: Cache Hit for dep", checkType),
util.DefaultContext.Debug(fmt.Sprintf("[%9s] :direct_hit: Cache Hit for dep", checkType),
fmt.Sprintf("%s/%s-%s", r.GetCategory(), r.GetName(), r.GetVersion()))
continue
}
Spinner(32)
util.DefaultContext.Spinner()
solution, err := depSolver.Install(pkg.Packages{r})
ass := solution.SearchByName(r.GetPackageName())
SpinnerStop()
util.DefaultContext.SpinnerStop()
if err == nil {
if ass == nil {
@@ -255,7 +255,7 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
r.GetCategory(), r.GetName(), r.GetVersion(),
))
if LuetCfg.GetGeneral().Debug {
if util.DefaultContext.Config.GetGeneral().Debug {
for idx, pa := range solution {
fmt.Println(fmt.Sprintf("[%9s] %s/%s-%s: solution %d: %s",
checkType,
@@ -264,7 +264,7 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
}
}
Error(ans.Error())
util.DefaultContext.Error(ans.Error())
opts.IncrBrokenDeps()
validpkg = false
} else {
@@ -274,7 +274,7 @@ func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, recipe
if err != nil {
Error(fmt.Sprintf("[%9s] %s/%s-%s: solver broken for dep %s/%s-%s - %s",
util.DefaultContext.Error(fmt.Sprintf("[%9s] %s/%s-%s: solver broken for dep %s/%s-%s - %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
@@ -376,28 +376,28 @@ func initOpts(opts *ValidateOpts, onlyRuntime, onlyBuildtime, withSolver bool, t
opts.BuildtimeCacheDeps = pkg.NewInMemoryDatabase(false).(*pkg.InMemoryDatabase)
for _, treePath := range treePaths {
Info(fmt.Sprintf("Loading :deciduous_tree: %s...", treePath))
util.DefaultContext.Info(fmt.Sprintf("Loading :deciduous_tree: %s...", treePath))
if opts.BuildtimeReciper != nil {
err = opts.BuildtimeReciper.Load(treePath)
if err != nil {
Fatal("Error on load tree ", err)
util.DefaultContext.Fatal("Error on load tree ", err)
}
}
if opts.RuntimeReciper != nil {
err = opts.RuntimeReciper.Load(treePath)
if err != nil {
Fatal("Error on load tree ", err)
util.DefaultContext.Fatal("Error on load tree ", err)
}
}
}
opts.RegExcludes, err = helpers.CreateRegexArray(opts.Excludes)
if err != nil {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
}
opts.RegMatches, err = helpers.CreateRegexArray(opts.Matches)
if err != nil {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
}
}
@@ -417,16 +417,16 @@ func NewTreeValidateCommand() *cobra.Command {
onlyBuildtime, _ := cmd.Flags().GetBool("only-buildtime")
if len(treePaths) < 1 {
Fatal("Mandatory tree param missing.")
util.DefaultContext.Fatal("Mandatory tree param missing.")
}
if onlyRuntime && onlyBuildtime {
Fatal("Both --only-runtime and --only-buildtime options are not possibile.")
util.DefaultContext.Fatal("Both --only-runtime and --only-buildtime options are not possibile.")
}
},
Run: func(cmd *cobra.Command, args []string) {
var reciper tree.Builder
concurrency := LuetCfg.GetGeneral().Concurrency
concurrency := util.DefaultContext.Config.GetGeneral().Concurrency
withSolver, _ := cmd.Flags().GetBool("with-solver")
onlyRuntime, _ := cmd.Flags().GetBool("only-runtime")
@@ -472,18 +472,18 @@ func NewTreeValidateCommand() *cobra.Command {
// fmt.Println("Broken packages:", brokenPkgs, "(", brokenDeps, "deps ).")
if len(stringerrs) != 0 {
Error(fmt.Sprintf("Found %d broken packages and %d broken deps.",
util.DefaultContext.Error(fmt.Sprintf("Found %d broken packages and %d broken deps.",
opts.BrokenPkgs, opts.BrokenDeps))
Fatal("Errors: " + strconv.Itoa(len(stringerrs)))
util.DefaultContext.Fatal("Errors: " + strconv.Itoa(len(stringerrs)))
} else {
Info("All good! :white_check_mark:")
util.DefaultContext.Info("All good! :white_check_mark:")
os.Exit(0)
}
},
}
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
ans.Flags().Bool("only-runtime", false, "Check only runtime dependencies.")
ans.Flags().Bool("only-buildtime", false, "Check only buildtime dependencies.")

View File

@@ -16,13 +16,14 @@ package cmd
import (
helpers "github.com/mudler/luet/cmd/helpers"
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var uninstallCmd = &cobra.Command{
@@ -31,16 +32,11 @@ var uninstallCmd = &cobra.Command{
Long: `Uninstall packages`,
Aliases: []string{"rm", "un"},
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Run: func(cmd *cobra.Command, args []string) {
toRemove := []pkg.Package{}
@@ -48,47 +44,34 @@ var uninstallCmd = &cobra.Command{
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toRemove = append(toRemove, pack)
}
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
force := LuetCfg.Viper.GetBool("force")
force := viper.GetBool("force")
nodeps, _ := cmd.Flags().GetBool("nodeps")
full, _ := cmd.Flags().GetBool("full")
checkconflicts, _ := cmd.Flags().GetBool("conflictscheck")
fullClean, _ := cmd.Flags().GetBool("full-clean")
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
yes := LuetCfg.Viper.GetBool("yes")
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine")
yes := viper.GetBool("yes")
keepProtected, _ := cmd.Flags().GetBool("keep-protected-files")
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
util.SetSystemConfig(util.DefaultContext)
util.SetSolverConfig(util.DefaultContext)
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
if concurrent {
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
} else {
LuetCfg.GetSolverOptions().Implementation = solver.SingleCoreSimple
}
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
util.DefaultContext.Config.ConfigProtectSkip = !keepProtected
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
installer.LoadConfigProtectConfs(LuetCfg)
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: nodeps,
Force: force,
FullUninstall: full,
@@ -96,12 +79,13 @@ var uninstallCmd = &cobra.Command{
CheckConflicts: checkconflicts,
Ask: !yes,
PreserveSystemEssentialData: true,
Context: util.DefaultContext,
})
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
if err := inst.Uninstall(system, toRemove...); err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
@@ -112,7 +96,7 @@ func init() {
uninstallCmd.Flags().String("system-target", "", "System rootpath")
uninstallCmd.Flags().String("system-engine", "", "System DB engine")
uninstallCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
uninstallCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
uninstallCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
uninstallCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
uninstallCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
@@ -123,6 +107,7 @@ func init() {
uninstallCmd.Flags().Bool("full-clean", false, "(experimental) Uninstall packages and all the other deps/revdeps of it.")
uninstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
uninstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
uninstallCmd.Flags().BoolP("keep-protected-files", "k", false, "Keep package protected files around")
RootCmd.AddCommand(uninstallCmd)
}

View File

@@ -15,12 +15,13 @@
package cmd
import (
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/pkg/solver"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var upgradeCmd = &cobra.Command{
@@ -28,67 +29,36 @@ var upgradeCmd = &cobra.Command{
Short: "Upgrades the system",
Aliases: []string{"u"},
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", installCmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", installCmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Long: `Upgrades packages in parallel`,
Run: func(cmd *cobra.Command, args []string) {
repos := installer.Repositories{}
for _, repo := range LuetCfg.SystemRepositories {
if !repo.Enable {
continue
}
r := installer.NewSystemRepository(repo)
repos = append(repos, r)
}
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
force := LuetCfg.Viper.GetBool("force")
force := viper.GetBool("force")
nodeps, _ := cmd.Flags().GetBool("nodeps")
full, _ := cmd.Flags().GetBool("full")
universe, _ := cmd.Flags().GetBool("universe")
clean, _ := cmd.Flags().GetBool("clean")
sync, _ := cmd.Flags().GetBool("sync")
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
yes := LuetCfg.Viper.GetBool("yes")
dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
if concurrent {
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
} else {
LuetCfg.GetSolverOptions().Implementation = solver.SingleCoreSimple
}
util.SetSystemConfig(util.DefaultContext)
opts := util.SetSolverConfig(util.DefaultContext)
Debug("Solver", LuetCfg.GetSolverOptions().String())
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
util.DefaultContext.Debug("Solver", opts.CompactString())
// Load config protect configs
installer.LoadConfigProtectConfs(LuetCfg)
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
Force: force,
FullUninstall: full,
NoDeps: nodeps,
@@ -98,12 +68,13 @@ var upgradeCmd = &cobra.Command{
PreserveSystemEssentialData: true,
Ask: !yes,
DownloadOnly: downloadOnly,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
})
inst.Repositories(repos)
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
if err := inst.Upgrade(system); err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
@@ -113,7 +84,7 @@ func init() {
upgradeCmd.Flags().String("system-target", "", "System rootpath")
upgradeCmd.Flags().String("system-engine", "", "System DB engine")
upgradeCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
upgradeCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
upgradeCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
upgradeCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
upgradeCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")

View File

@@ -1,5 +1,4 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
// Copyright © 2020-2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
@@ -17,11 +16,82 @@
package cmd
import (
. "github.com/mudler/luet/cmd/util"
"fmt"
"os"
"path/filepath"
"github.com/docker/docker/api/types"
"github.com/docker/go-units"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/helpers/docker"
"github.com/spf13/cobra"
)
func NewUnpackCommand() *cobra.Command {
c := &cobra.Command{
Use: "unpack image path",
Short: "Unpack a docker image natively",
Long: `unpack doesn't need the docker daemon to run, and unpacks a docker image in the specified directory:
luet util unpack golang:alpine /alpine
`,
PreRun: func(cmd *cobra.Command, args []string) {
if len(args) != 2 {
util.DefaultContext.Fatal("Expects an image and a path")
}
},
Run: func(cmd *cobra.Command, args []string) {
image := args[0]
destination, err := filepath.Abs(args[1])
if err != nil {
util.DefaultContext.Error("Invalid path %s", destination)
os.Exit(1)
}
verify, _ := cmd.Flags().GetBool("verify")
user, _ := cmd.Flags().GetString("auth-username")
pass, _ := cmd.Flags().GetString("auth-password")
authType, _ := cmd.Flags().GetString("auth-type")
server, _ := cmd.Flags().GetString("auth-server-address")
identity, _ := cmd.Flags().GetString("auth-identity-token")
registryToken, _ := cmd.Flags().GetString("auth-registry-token")
util.DefaultContext.Info("Downloading", image, "to", destination)
auth := &types.AuthConfig{
Username: user,
Password: pass,
ServerAddress: server,
Auth: authType,
IdentityToken: identity,
RegistryToken: registryToken,
}
info, err := docker.DownloadAndExtractDockerImage(util.DefaultContext, image, destination, auth, verify)
if err != nil {
util.DefaultContext.Error(err.Error())
os.Exit(1)
}
util.DefaultContext.Info(fmt.Sprintf("Pulled: %s %s", info.Target.Digest, info.Name))
util.DefaultContext.Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.Target.Size))))
},
}
c.Flags().String("auth-username", "", "Username to authenticate to registry/notary")
c.Flags().String("auth-password", "", "Password to authenticate to registry")
c.Flags().String("auth-type", "", "Auth type")
c.Flags().String("auth-server-address", "", "Authentication server address")
c.Flags().String("auth-identity-token", "", "Authentication identity token")
c.Flags().String("auth-registry-token", "", "Authentication registry token")
c.Flags().Bool("verify", false, "Verify signed images to notary before to pull")
return c
}
var utilGroup = &cobra.Command{
Use: "util [command] [OPTIONS]",
Short: "General luet internal utilities exposed",

166
cmd/util/cli.go Normal file
View File

@@ -0,0 +1,166 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package util
import (
"errors"
"os"
"path/filepath"
"strings"
"github.com/marcsauter/single"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/installer"
)
var DefaultContext = types.NewContext()
var lockedCommands = []string{"install", "uninstall", "upgrade"}
var bannerCommands = []string{"install", "build", "uninstall", "upgrade"}
func BindSystemFlags(cmd *cobra.Command) {
viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
}
func BindSolverFlags(cmd *cobra.Command) {
viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
}
func BindValuesFlags(cmd *cobra.Command) {
viper.BindPFlag("values", cmd.Flags().Lookup("values"))
}
func ValuesFlags() []string {
return viper.GetStringSlice("values")
}
func SetSystemConfig(ctx *types.Context) {
dbpath := viper.GetString("system.database_path")
rootfs := viper.GetString("system.rootfs")
engine := viper.GetString("system.database_engine")
ctx.Config.System.DatabaseEngine = engine
ctx.Config.System.DatabasePath = dbpath
ctx.Config.System.SetRootFS(rootfs)
}
func SetSolverConfig(ctx *types.Context) (c *types.LuetSolverOptions) {
stype := viper.GetString("solver.type")
discount := viper.GetFloat64("solver.discount")
rate := viper.GetFloat64("solver.rate")
attempts := viper.GetInt("solver.max_attempts")
ctx.Config.GetSolverOptions().Type = stype
ctx.Config.GetSolverOptions().LearnRate = float32(rate)
ctx.Config.GetSolverOptions().Discount = float32(discount)
ctx.Config.GetSolverOptions().MaxAttempts = attempts
return &types.LuetSolverOptions{
Type: stype,
LearnRate: float32(rate),
Discount: float32(discount),
MaxAttempts: attempts,
}
}
func SetCliFinalizerEnvs(ctx *types.Context, finalizerEnvs []string) error {
if len(finalizerEnvs) > 0 {
for _, v := range finalizerEnvs {
idx := strings.Index(v, "=")
if idx < 0 {
return errors.New("Found invalid runtime finalizer environment: " + v)
}
ctx.Config.SetFinalizerEnv(v[0:idx], v[idx+1:])
}
}
return nil
}
// TemplateFolders returns the default folders which holds shared template between packages in a given tree path
func TemplateFolders(ctx *types.Context, fromRepo bool, treePaths []string) []string {
templateFolders := []string{}
for _, t := range treePaths {
templateFolders = append(templateFolders, filepath.Join(t, "templates"))
}
if fromRepo {
for _, s := range installer.SystemRepositories(ctx.Config.SystemRepositories) {
templateFolders = append(templateFolders, filepath.Join(s.TreePath, "templates"))
}
}
return templateFolders
}
func IntroScreen() {
luetLogo, _ := pterm.DefaultBigText.WithLetters(
pterm.NewLettersFromStringWithStyle("LU", pterm.NewStyle(pterm.FgLightMagenta)),
pterm.NewLettersFromStringWithStyle("ET", pterm.NewStyle(pterm.FgLightBlue))).
Srender()
pterm.DefaultCenter.Print(luetLogo)
pterm.DefaultCenter.Print(pterm.DefaultHeader.WithFullWidth().WithBackgroundStyle(pterm.NewStyle(pterm.BgLightBlue)).WithMargin(10).Sprint("Luet - 0-deps container-based package manager"))
}
func HandleLock(c *types.Context) {
if os.Getenv("LUET_NOLOCK") != "true" {
if len(os.Args) > 1 {
for _, lockedCmd := range lockedCommands {
if os.Args[1] == lockedCmd {
s := single.New("luet")
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
c.Fatal("another instance of the app is already running, exiting")
} else if err != nil {
// Another error occurred, might be worth handling it as well
c.Fatal("failed to acquire exclusive app lock:", err.Error())
}
defer s.TryUnlock()
break
}
}
}
}
}
func DisplayVersionBanner(c *types.Context, banner func(), version func() string, license []string) {
display := false
if len(os.Args) > 1 {
for _, c := range bannerCommands {
if os.Args[1] == c {
display = true
}
}
}
if display {
banner()
pterm.DefaultCenter.Print(version())
for _, l := range license {
pterm.DefaultCenter.Print(l)
}
}
}

207
cmd/util/config.go Normal file
View File

@@ -0,0 +1,207 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package util
import (
"fmt"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
extensions "github.com/mudler/cobra-extensions"
"github.com/mudler/luet/pkg/api/core/types"
helpers "github.com/mudler/luet/pkg/helpers"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
LuetEnvPrefix = "LUET"
)
var cfgFile string
// initConfig reads in config file and ENV variables if set.
func initConfig() {
setDefaults(viper.GetViper())
// Luet support these priorities on read configuration file:
// - command line option (if available)
// - $PWD/.luet.yaml
// - $HOME/.luet.yaml
// - /etc/luet/luet.yaml
//
// Note: currently a single viper instance support only one config name.
viper.SetEnvPrefix(LuetEnvPrefix)
viper.SetConfigType("yaml")
if cfgFile != "" { // enable ability to specify config file via flag
viper.SetConfigFile(cfgFile)
} else {
// Retrieve pwd directory
pwdDir, err := os.Getwd()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
homeDir := helpers.GetHomeDir()
if fileHelper.Exists(filepath.Join(pwdDir, ".luet.yaml")) || (homeDir != "" && fileHelper.Exists(filepath.Join(homeDir, ".luet.yaml"))) {
viper.AddConfigPath(".")
if homeDir != "" {
viper.AddConfigPath(homeDir)
}
viper.SetConfigName(".luet")
} else {
viper.SetConfigName("luet")
viper.AddConfigPath("/etc/luet")
}
}
viper.AutomaticEnv() // read in environment variables that match
// Create EnvKey Replacer for handle complex structure
replacer := strings.NewReplacer(".", "__")
viper.SetEnvKeyReplacer(replacer)
viper.SetTypeByDefaultValue(true)
// If a config file is found, read it in.
viper.ReadInConfig()
}
// InitContext inits the context by parsing the configurations from viper
// this is meant to be run before each command to be able to parse any override from
// the CLI/ENV
func InitContext(ctx *types.Context) (err error) {
err = viper.Unmarshal(&ctx.Config)
if err != nil {
return
}
// Inits the context with the configurations loaded
// It reads system repositories, sets logging, and all the
// context which is required to perform luet actions
err = ctx.Init()
if err != nil {
return
}
// no_spinner is not mapped in our configs
ctx.NoSpinner = viper.GetBool("no_spinner")
return
}
func setDefaults(viper *viper.Viper) {
viper.SetDefault("logging.level", "info")
viper.SetDefault("logging.enable_logfile", false)
viper.SetDefault("logging.path", "/var/log/luet.log")
viper.SetDefault("logging.json_format", false)
viper.SetDefault("logging.enable_emoji", true)
viper.SetDefault("logging.color", true)
viper.SetDefault("general.concurrency", runtime.NumCPU())
viper.SetDefault("general.debug", false)
viper.SetDefault("general.show_build_output", false)
viper.SetDefault("general.fatal_warnings", false)
viper.SetDefault("general.http_timeout", 360)
u, err := user.Current()
// os/user doesn't work in from scratch environments
if err != nil || (u != nil && u.Uid == "0") {
viper.SetDefault("general.same_owner", true)
} else {
viper.SetDefault("general.same_owner", false)
}
viper.SetDefault("system.database_engine", "boltdb")
viper.SetDefault("system.database_path", "/var/cache/luet")
viper.SetDefault("system.rootfs", "/")
viper.SetDefault("system.tmpdir_base", filepath.Join(os.TempDir(), "tmpluet"))
viper.SetDefault("system.pkgs_cache_path", "packages")
viper.SetDefault("repos_confdir", []string{"/etc/luet/repos.conf.d"})
viper.SetDefault("config_protect_confdir", []string{"/etc/luet/config.protect.d"})
viper.SetDefault("config_protect_skip", false)
// TODO: Set default to false when we are ready for migration.
viper.SetDefault("config_from_host", true)
viper.SetDefault("cache_repositories", []string{})
viper.SetDefault("system_repositories", []string{})
viper.SetDefault("finalizer_envs", make(map[string]string))
viper.SetDefault("solver.type", "")
viper.SetDefault("solver.rate", 0.7)
viper.SetDefault("solver.discount", 1.0)
viper.SetDefault("solver.max_attempts", 9000)
}
// InitViper inits a new viper
// this is meant to be run just once at beginning to setup the root command
func InitViper(ctx *types.Context, RootCmd *cobra.Command) {
cobra.OnInitialize(initConfig)
pflags := RootCmd.PersistentFlags()
pflags.StringVar(&cfgFile, "config", "", "config file (default is $HOME/.luet.yaml)")
pflags.BoolP("debug", "d", false, "verbose output")
pflags.Bool("fatal", false, "Enables Warnings to exit")
pflags.Bool("enable-logfile", false, "Enable log to file")
pflags.Bool("no-spinner", false, "Disable spinner.")
pflags.Bool("color", ctx.Config.GetLogging().Color, "Enable/Disable color.")
pflags.Bool("emoji", ctx.Config.GetLogging().EnableEmoji, "Enable/Disable emoji.")
pflags.Bool("skip-config-protect", ctx.Config.ConfigProtectSkip,
"Disable config protect analysis.")
pflags.StringP("logfile", "l", ctx.Config.GetLogging().Path,
"Logfile path. Empty value disable log to file.")
pflags.StringSlice("plugin", []string{}, "A list of runtime plugins to load")
// os/user doesn't work in from scratch environments.
// Check if i can retrieve user informations.
_, err := user.Current()
if err != nil {
ctx.Warning("failed to retrieve user identity:", err.Error())
}
pflags.Bool("same-owner", ctx.Config.GetGeneral().SameOwner, "Maintain same owner on uncompress.")
pflags.Int("concurrency", runtime.NumCPU(), "Concurrency")
pflags.Int("http-timeout", ctx.Config.General.HTTPTimeout, "Default timeout for http(s) requests")
viper.BindPFlag("logging.color", pflags.Lookup("color"))
viper.BindPFlag("logging.enable_emoji", pflags.Lookup("emoji"))
viper.BindPFlag("logging.enable_logfile", pflags.Lookup("enable-logfile"))
viper.BindPFlag("logging.path", pflags.Lookup("logfile"))
viper.BindPFlag("general.concurrency", pflags.Lookup("concurrency"))
viper.BindPFlag("general.debug", pflags.Lookup("debug"))
viper.BindPFlag("general.fatal_warnings", pflags.Lookup("fatal"))
viper.BindPFlag("general.same_owner", pflags.Lookup("same-owner"))
viper.BindPFlag("plugin", pflags.Lookup("plugin"))
viper.BindPFlag("general.http_timeout", pflags.Lookup("http-timeout"))
// Currently I maintain this only from cli.
viper.BindPFlag("no_spinner", pflags.Lookup("no-spinner"))
viper.BindPFlag("config_protect_skip", pflags.Lookup("skip-config-protect"))
// Extensions must be binary with the "luet-" prefix to be able to be shown in the help.
// we also accept extensions in the relative path where luet is being started, "extensions/"
exts := extensions.Discover("luet", "extensions")
for _, ex := range exts {
cobraCmd := ex.CobraCommand()
RootCmd.AddCommand(cobraCmd)
}
}

42
cmd/util/search.go Normal file
View File

@@ -0,0 +1,42 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package util
import "github.com/pterm/pterm"
type TableWriter struct {
td pterm.TableData
}
func (l *TableWriter) AppendRow(item []string) {
l.td = append(l.td, item)
}
func (l *TableWriter) Render() {
pterm.DefaultTable.WithHasHeader().WithData(l.td).Render()
}
type ListWriter struct {
bb []pterm.BulletListItem
}
func (l *ListWriter) AppendItem(item pterm.BulletListItem) {
l.bb = append(l.bb, item)
}
func (l *ListWriter) Render() {
pterm.DefaultBulletList.WithItems(l.bb).Render()
}

View File

@@ -1,98 +0,0 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package util
import (
"fmt"
"os"
"path/filepath"
"github.com/docker/docker/api/types"
"github.com/docker/go-units"
"github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
"github.com/spf13/cobra"
)
func NewUnpackCommand() *cobra.Command {
c := &cobra.Command{
Use: "unpack image path",
Short: "Unpack a docker image natively",
Long: `unpack doesn't need the docker daemon to run, and unpacks a docker image in the specified directory:
luet util unpack golang:alpine /alpine
`,
PreRun: func(cmd *cobra.Command, args []string) {
if len(args) != 2 {
Fatal("Expects an image and a path")
}
},
Run: func(cmd *cobra.Command, args []string) {
image := args[0]
destination, err := filepath.Abs(args[1])
if err != nil {
Error("Invalid path %s", destination)
os.Exit(1)
}
verify, _ := cmd.Flags().GetBool("verify")
user, _ := cmd.Flags().GetString("auth-username")
pass, _ := cmd.Flags().GetString("auth-password")
authType, _ := cmd.Flags().GetString("auth-type")
server, _ := cmd.Flags().GetString("auth-server-address")
identity, _ := cmd.Flags().GetString("auth-identity-token")
registryToken, _ := cmd.Flags().GetString("auth-registry-token")
temp, err := config.LuetCfg.GetSystem().TempDir("contentstore")
if err != nil {
Fatal("Cannot create a tempdir", err.Error())
}
Info("Downloading", image, "to", destination)
auth := &types.AuthConfig{
Username: user,
Password: pass,
ServerAddress: server,
Auth: authType,
IdentityToken: identity,
RegistryToken: registryToken,
}
info, err := helpers.DownloadAndExtractDockerImage(temp, image, destination, auth, verify)
if err != nil {
Error(err.Error())
os.Exit(1)
}
Info(fmt.Sprintf("Pulled: %s %s", info.Target.Digest, info.Name))
Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.ContentSize))))
},
}
c.Flags().String("auth-username", "", "Username to authenticate to registry/notary")
c.Flags().String("auth-password", "", "Password to authenticate to registry")
c.Flags().String("auth-type", "", "Auth type")
c.Flags().String("auth-server-address", "", "Authentication server address")
c.Flags().String("auth-identity-token", "", "Authentication identity token")
c.Flags().String("auth-registry-token", "", "Authentication registry token")
c.Flags().Bool("verify", false, "Verify signed images to notary before to pull")
return c
}

View File

@@ -7,7 +7,7 @@ fi
set -ex
export LUET_NOLOCK=true
LUET_VERSION=$(curl -s https://api.github.com/repos/mudler/luet/releases/latest | ( grep -oP '"tag_name": "\K(.*)(?=")' || echo "0.9.24" ))
LUET_VERSION=$(curl -s https://api.github.com/repos/mudler/luet/releases/latest | grep tag_name | awk '{ print $2 }' | sed -e 's/\"//g' -e 's/,//g' || echo "0.9.24" )
LUET_ROOTFS=${LUET_ROOTFS:-/}
LUET_DATABASE_PATH=${LUET_DATABASE_PATH:-/var/luet/db}
LUET_DATABASE_ENGINE=${LUET_DATABASE_ENGINE:-boltdb}

View File

@@ -99,6 +99,14 @@
# If set to false rootfs path is used as prefix.
# config_from_host: true
#
#
# ------------------------------------------------
# Finalizer Environment Variables
# -----------------------------------------------
# finalizer_envs:
# - key: "BUILD_ISO"
# value: "1"
#
# System repositories
# ---------------------------------------------
# In alternative to define repositories files

88
go.mod
View File

@@ -1,71 +1,77 @@
module github.com/mudler/luet
go 1.14
go 1.16
require (
github.com/DataDog/zstd v1.4.4 // indirect
github.com/Sabayon/pkgs-checker v0.8.1
github.com/DataDog/zstd v1.4.5 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1 // indirect
github.com/Sabayon/pkgs-checker v0.8.4
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154
github.com/briandowns/spinner v1.12.1-0.20201220203425-e201aaea0a31
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24
github.com/containerd/containerd v1.5.2
github.com/crillab/gophersat v1.3.2-0.20210701121804-72b19f5b6b38
github.com/docker/cli v20.10.7+incompatible
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
github.com/docker/docker v20.10.7+incompatible
github.com/docker/go-units v0.4.0
github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd
github.com/genuinetools/img v0.5.11
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/ghodss/yaml v1.0.0
github.com/google/go-containerregistry v0.2.1
github.com/go-sql-driver/mysql v1.6.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/go-containerregistry v0.6.0
github.com/google/renameio v1.0.0
github.com/google/uuid v1.3.0 // indirect
github.com/gookit/color v1.5.0
github.com/gorilla/mux v1.7.4 // indirect
github.com/hashicorp/go-multierror v1.0.0
github.com/hashicorp/go-version v1.2.1
github.com/imdario/mergo v0.3.8
github.com/jedib0t/go-pretty v4.3.0+incompatible
github.com/jedib0t/go-pretty/v6 v6.0.5
github.com/hashicorp/go-version v1.3.0
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
github.com/klauspost/compress v1.8.3
github.com/klauspost/pgzip v1.2.1
github.com/klauspost/compress v1.13.0
github.com/klauspost/pgzip v1.2.5
github.com/knqyf263/go-deb-version v0.0.0-20190517075300-09fca494f03d
github.com/kyokomi/emoji v2.1.0+incompatible
github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0
github.com/moby/buildkit v0.7.2
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.1
github.com/mitchellh/mapstructure v1.4.2 // indirect
github.com/moby/moby v20.10.9+incompatible
github.com/moby/sys/mount v0.2.0 // indirect
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
github.com/mudler/docker-companion v0.4.6-0.20200418093252-41846f112d87
github.com/mudler/go-pluggable v0.0.0-20210513155700-54c6443073af
github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
github.com/onsi/ginkgo v1.14.2
github.com/onsi/gomega v1.10.3
github.com/onsi/ginkgo v1.16.4
github.com/onsi/gomega v1.16.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578
github.com/pelletier/go-toml v1.9.4 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible
github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f
github.com/pkg/errors v0.9.1
github.com/schollz/progressbar/v3 v3.7.1
github.com/sirupsen/logrus v1.7.0
github.com/spf13/cobra v1.1.1
github.com/spf13/viper v1.7.1
github.com/pterm/pterm v0.12.32-0.20211002183613-ada9ef6790c3
github.com/rancher-sandbox/gofilecache v0.0.0-20210330135715-becdeff5df15
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/cobra v1.2.1
github.com/spf13/viper v1.8.1
github.com/theupdateframework/notary v0.7.0
go.etcd.io/bbolt v1.3.5
go.uber.org/atomic v1.5.1 // indirect
go.uber.org/multierr v1.4.0
go.uber.org/zap v1.13.0
google.golang.org/grpc v1.29.1
gopkg.in/yaml.v2 v2.3.0
go.uber.org/multierr v1.6.0
go.uber.org/zap v1.17.0
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
golang.org/x/mod v0.4.2
golang.org/x/oauth2 v0.0.0-20210810183815-faf39c7919d5 // indirect
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
google.golang.org/genproto v0.0.0-20210811021853-ddbe55d93216 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/ini.v1 v1.63.2 // indirect
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.3.4
)
replace github.com/docker/docker => github.com/Luet-lab/moby v17.12.0-ce-rc1.0.20200605210607-749178b8f80d+incompatible
replace github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55
replace github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
replace github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4

1049
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,28 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package client_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestClient(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Client API Suite")
}

131
pkg/api/client/search.go Normal file
View File

@@ -0,0 +1,131 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package client
import (
"encoding/json"
"fmt"
"strings"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/mudler/luet/pkg/api/client/utils"
)
func TreePackages(treedir string) (searchResult SearchResult, err error) {
var res []byte
res, err = utils.RunSHOUT("tree", fmt.Sprintf("luet tree pkglist --tree %s --output json", treedir))
if err != nil {
fmt.Println(string(res))
return
}
json.Unmarshal(res, &searchResult)
return
}
func imageAvailable(image string) bool {
_, err := crane.Digest(image)
return err == nil
}
type SearchResult struct {
Packages []Package
}
type Package struct {
Name, Category, Version, Path string
}
func (p Package) String() string {
return fmt.Sprintf("%s/%s@%s", p.Category, p.Name, p.Version)
}
func (p Package) Image(repository string) string {
return fmt.Sprintf("%s:%s-%s-%s", repository, p.Name, p.Category, strings.ReplaceAll(p.Version, "+", "-"))
}
func (p Package) ImageTag() string {
// ${name}-${category}-${version//+/-}
return fmt.Sprintf("%s-%s-%s", p.Name, p.Category, strings.ReplaceAll(p.Version, "+", "-"))
}
func (p Package) ImageMetadata(repository string) string {
return fmt.Sprintf("%s.metadata.yaml", p.Image(repository))
}
func (p Package) ImageAvailable(repository string) bool {
return imageAvailable(p.Image(repository))
}
func (p Package) Equal(pp Package) bool {
if p.Name == pp.Name && p.Category == pp.Category && p.Version == pp.Version {
return true
}
return false
}
func (p Package) EqualS(s string) bool {
if s == fmt.Sprintf("%s/%s", p.Category, p.Name) {
return true
}
return false
}
func (p Package) EqualSV(s string) bool {
if s == fmt.Sprintf("%s/%s@%s", p.Category, p.Name, p.Version) {
return true
}
return false
}
func (p Package) EqualNoV(pp Package) bool {
if p.Name == pp.Name && p.Category == pp.Category {
return true
}
return false
}
func (s SearchResult) FilterByCategory(cat string) SearchResult {
new := SearchResult{Packages: []Package{}}
for _, r := range s.Packages {
if r.Category == cat {
new.Packages = append(new.Packages, r)
}
}
return new
}
func (s SearchResult) FilterByName(name string) SearchResult {
new := SearchResult{Packages: []Package{}}
for _, r := range s.Packages {
if !strings.Contains(r.Name, name) {
new.Packages = append(new.Packages, r)
}
}
return new
}
type Packages []Package
func (p Packages) Exist(pp Package) bool {
for _, pi := range p {
if pp.Equal(pi) {
return true
}
}
return false
}

View File

@@ -0,0 +1,47 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package client_test
import (
. "github.com/mudler/luet/pkg/api/client"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Client CLI API", func() {
Context("Reads a package tree from the luet CLI", func() {
It("Correctly detect packages", func() {
t, err := TreePackages("../../../tests/fixtures/alpine")
Expect(err).ToNot(HaveOccurred())
Expect(t).ToNot(BeNil())
Expect(len(t.Packages)).To(Equal(1))
Expect(t.Packages[0].Name).To(Equal("alpine"))
Expect(t.Packages[0].Category).To(Equal("seed"))
Expect(t.Packages[0].Version).To(Equal("1.0"))
Expect(t.Packages[0].ImageAvailable("foo")).To(BeFalse())
Expect(t.Packages[0].Equal(t.Packages[0])).To(BeTrue())
Expect(t.Packages[0].Equal(Package{})).To(BeFalse())
Expect(t.Packages[0].EqualNoV(Package{Name: "alpine", Category: "seed"})).To(BeTrue())
Expect(t.Packages[0].EqualS("seed/alpine")).To(BeTrue())
Expect(t.Packages[0].EqualS("seed/alpinev")).To(BeFalse())
Expect(t.Packages[0].EqualSV("seed/alpine@1.0")).To(BeTrue())
Expect(t.Packages[0].Image("foo")).To(Equal("foo:alpine-seed-1.0"))
Expect(Packages(t.Packages).Exist(t.Packages[0])).To(BeTrue())
Expect(Packages(t.Packages).Exist(Package{})).To(BeFalse())
})
})
})

View File

@@ -0,0 +1,37 @@
package utils
import (
"log"
"os"
"os/exec"
"strings"
)
func RunSHOUT(stepName, bashFragment string) ([]byte, error) {
cmd := exec.Command("sh", "-s")
cmd.Stdin = strings.NewReader(bashWrap(bashFragment))
cmd.Env = os.Environ()
// log.Printf("Running in background: %v", stepName)
return cmd.CombinedOutput()
}
func RunSH(stepName, bashFragment string) error {
cmd := exec.Command("sh", "-s")
cmd.Stdin = strings.NewReader(bashWrap(bashFragment))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = os.Environ()
log.Printf("Running: %v (%v)", stepName, bashFragment)
return cmd.Run()
}
func bashWrap(cmd string) string {
return `
set -o errexit
set -o nounset
` + cmd + `
`
}

111
pkg/api/core/bus/events.go Normal file
View File

@@ -0,0 +1,111 @@
package bus
import (
"fmt"
"github.com/mudler/go-pluggable"
"github.com/mudler/luet/pkg/api/core/types"
)
var (
// Package events
// EventPackageInstall is the event fired when a new package is being installed
EventPackageInstall pluggable.EventType = "package.install"
// EventPackageUnInstall is the event fired when a new package is being uninstalled
EventPackageUnInstall pluggable.EventType = "package.uninstall"
// Package build
// EventPackagePreBuild is the event fired before a package is being built
EventPackagePreBuild pluggable.EventType = "package.pre.build"
// EventPackagePreBuildArtifact is the event fired before a package tarball is being generated
EventPackagePreBuildArtifact pluggable.EventType = "package.pre.build_artifact"
// EventPackagePostBuildArtifact is the event fired after a package tarball is generated
EventPackagePostBuildArtifact pluggable.EventType = "package.post.build_artifact"
// EventPackagePostBuild is the event fired after a package was built
EventPackagePostBuild pluggable.EventType = "package.post.build"
// Image build
// EventImagePreBuild is the event fired before a image is being built
EventImagePreBuild pluggable.EventType = "image.pre.build"
// EventImagePrePull is the event fired before a image is being pulled
EventImagePrePull pluggable.EventType = "image.pre.pull"
// EventImagePrePush is the event fired before a image is being pushed
EventImagePrePush pluggable.EventType = "image.pre.push"
// EventImagePostBuild is the event fired after an image is being built
EventImagePostBuild pluggable.EventType = "image.post.build"
// EventImagePostPull is the event fired after an image is being pulled
EventImagePostPull pluggable.EventType = "image.post.pull"
// EventImagePostPush is the event fired after an image is being pushed
EventImagePostPush pluggable.EventType = "image.post.push"
// Repository events
// EventRepositoryPreBuild is the event fired before a repository is being built
EventRepositoryPreBuild pluggable.EventType = "repository.pre.build"
// EventRepositoryPostBuild is the event fired after a repository was built
EventRepositoryPostBuild pluggable.EventType = "repository.post.build"
// Image unpack
// EventImagePreUnPack is the event fired before unpacking an image to a local dir
EventImagePreUnPack pluggable.EventType = "image.pre.unpack"
// EventImagePostUnPack is the event fired after unpacking an image to a local dir
EventImagePostUnPack pluggable.EventType = "image.post.unpack"
)
// Manager is the bus instance manager, which subscribes plugins to events emitted by Luet
var Manager *Bus = &Bus{
Manager: pluggable.NewManager(
[]pluggable.EventType{
EventPackageInstall,
EventPackageUnInstall,
EventPackagePreBuild,
EventPackagePreBuildArtifact,
EventPackagePostBuildArtifact,
EventPackagePostBuild,
EventRepositoryPreBuild,
EventRepositoryPostBuild,
EventImagePreBuild,
EventImagePrePull,
EventImagePrePush,
EventImagePostBuild,
EventImagePostPull,
EventImagePostPush,
EventImagePreUnPack,
EventImagePostUnPack,
},
),
}
type Bus struct {
*pluggable.Manager
}
func (b *Bus) Initialize(ctx *types.Context, plugin ...string) {
b.Manager.Load(plugin...).Register()
for _, e := range b.Manager.Events {
b.Manager.Response(e, func(p *pluggable.Plugin, r *pluggable.EventResponse) {
ctx.Debug(
"plugin_event",
"received from",
p.Name,
"at",
p.Executable,
r,
)
if r.Errored() {
err := fmt.Sprintf("Plugin %s at %s had an error: %s", p.Name, p.Executable, r.Error)
ctx.Fatal(err)
} else {
if r.State != "" {
message := fmt.Sprintf(":lollipop: Plugin %s at %s succeded, state reported:", p.Name, p.Executable)
ctx.Success(message)
ctx.Info(r.State)
}
}
})
}
}

View File

@@ -0,0 +1,108 @@
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package config
import (
"fmt"
"path/filepath"
"strings"
)
type ConfigProtectConfFile struct {
Filename string
Name string `mapstructure:"name" yaml:"name" json:"name"`
Directories []string `mapstructure:"dirs" yaml:"dirs" json:"dirs"`
}
func NewConfigProtectConfFile(filename string) *ConfigProtectConfFile {
return &ConfigProtectConfFile{
Filename: filename,
Name: "",
Directories: []string{},
}
}
func (c *ConfigProtectConfFile) String() string {
return fmt.Sprintf("[%s] filename: %s, dirs: %s", c.Name, c.Filename,
c.Directories)
}
type ConfigProtect struct {
AnnotationDir string
MapProtected map[string]bool
}
func NewConfigProtect(annotationDir string) *ConfigProtect {
if len(annotationDir) > 0 && annotationDir[0:1] != "/" {
annotationDir = "/" + annotationDir
}
return &ConfigProtect{
AnnotationDir: annotationDir,
MapProtected: make(map[string]bool),
}
}
func (c *ConfigProtect) Map(files []string, protected []ConfigProtectConfFile) {
for _, file := range files {
if file[0:1] != "/" {
file = "/" + file
}
if len(protected) > 0 {
for _, conf := range protected {
for _, dir := range conf.Directories {
// Note file is without / at begin (on unpack)
if strings.HasPrefix(file, filepath.Clean(dir)) {
// docker archive modifier works with path without / at begin.
c.MapProtected[file] = true
goto nextFile
}
}
}
}
if c.AnnotationDir != "" && strings.HasPrefix(file, filepath.Clean(c.AnnotationDir)) {
c.MapProtected[file] = true
}
nextFile:
}
}
func (c *ConfigProtect) Protected(file string) bool {
if file[0:1] != "/" {
file = "/" + file
}
_, ans := c.MapProtected[file]
return ans
}
func (c *ConfigProtect) GetProtectFiles(withSlash bool) []string {
ans := []string{}
for key := range c.MapProtected {
if withSlash {
ans = append(ans, key)
} else {
ans = append(ans, key[1:])
}
}
return ans
}

View File

@@ -17,7 +17,8 @@
package config_test
import (
config "github.com/mudler/luet/pkg/config"
config "github.com/mudler/luet/pkg/api/core/config"
"github.com/mudler/luet/pkg/api/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -28,7 +29,7 @@ var _ = Describe("Config", func() {
Context("Test config protect", func() {
It("Protect1", func() {
ctx := types.NewContext()
files := []string{
"etc/foo/my.conf",
"usr/bin/foo",
@@ -36,7 +37,7 @@ var _ = Describe("Config", func() {
}
cp := config.NewConfigProtect("/etc")
cp.Map(files)
cp.Map(files, ctx.Config.ConfigProtectConfFiles)
Expect(cp.Protected("etc/foo/my.conf")).To(BeTrue())
Expect(cp.Protected("/etc/foo/my.conf")).To(BeTrue())
@@ -58,6 +59,7 @@ var _ = Describe("Config", func() {
})
It("Protect2", func() {
ctx := types.NewContext()
files := []string{
"etc/foo/my.conf",
@@ -66,7 +68,7 @@ var _ = Describe("Config", func() {
}
cp := config.NewConfigProtect("")
cp.Map(files)
cp.Map(files, ctx.Config.ConfigProtectConfFiles)
Expect(cp.Protected("etc/foo/my.conf")).To(BeFalse())
Expect(cp.Protected("/etc/foo/my.conf")).To(BeFalse())
@@ -84,6 +86,7 @@ var _ = Describe("Config", func() {
})
It("Protect3: Annotation dir without initial slash", func() {
ctx := types.NewContext()
files := []string{
"etc/foo/my.conf",
@@ -92,7 +95,7 @@ var _ = Describe("Config", func() {
}
cp := config.NewConfigProtect("etc")
cp.Map(files)
cp.Map(files, ctx.Config.ConfigProtectConfFiles)
Expect(cp.Protected("etc/foo/my.conf")).To(BeTrue())
Expect(cp.Protected("/etc/foo/my.conf")).To(BeTrue())

View File

@@ -0,0 +1,29 @@
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package config_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestSolver(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Config Suite")
}

168
pkg/api/core/image/cache.go Normal file
View File

@@ -0,0 +1,168 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image
import (
"encoding/json"
"os"
"strings"
"github.com/peterbourgon/diskv"
)
// Cache represents a key-value store which is capable to upgrade to disk when it
// reaches a pre-defined threshold.
type Cache struct {
store *diskv.Diskv
memory map[string]string
dir string
onDisk bool
maxmemorySize, maxItemSize int
}
// New creates a new key-value cache
// the cache acts in memory as long as the maxItemsize is not reached.
// Once the threshold is met the cache is offloaded to disk automatically,
// with a buffer of maxmemorySize into memory.
func NewCache(path string, maxmemorySize, maxItemsize int) *Cache {
disk := diskv.New(diskv.Options{
BasePath: path,
CacheSizeMax: uint64(maxmemorySize), // 500MB
})
return &Cache{
memory: make(map[string]string),
store: disk,
dir: path,
maxmemorySize: maxmemorySize,
maxItemSize: maxItemsize,
}
}
// This is needed as the disk cache is merely stored as separate files
// thus we don't want to conflict file names with the path separator.
// XXX: This is inconvenient as while we are looping result we can't rely
// anymore originally to the key name.
// We don't do any hashing to avoid any performance impact
func cleanKey(s string) string {
return strings.ReplaceAll(s, string(os.PathSeparator), "_")
}
// Count returns the items in the cache.
// If it's a disk cache might be an expensive call.
func (c *Cache) Count() int {
if !c.onDisk {
return len(c.memory)
}
count := 0
for range c.store.Keys(nil) {
count++
}
return count
}
// Get attempts to retrieve a value for a key
func (c *Cache) Get(key string) (value string, found bool) {
if !c.onDisk {
v, ok := c.memory[key]
return v, ok
}
v, err := c.store.Read(cleanKey(key))
if err == nil {
found = true
}
value = string(v)
return
}
func (c *Cache) flushToDisk() {
for k, v := range c.memory {
c.store.Write(cleanKey(k), []byte(v))
}
c.memory = make(map[string]string)
c.onDisk = true
}
// Set updates or inserts a new value
func (c *Cache) Set(key, value string) error {
if !c.onDisk && c.Count() >= c.maxItemSize && c.maxItemSize != 0 {
c.flushToDisk()
}
if c.onDisk {
return c.store.Write(cleanKey(key), []byte(value))
}
c.memory[key] = value
return nil
}
// SetValue updates or inserts a new value by marshalling it into JSON.
func (c *Cache) SetValue(key string, value interface{}) error {
dat, err := json.Marshal(value)
if err != nil {
return err
}
return c.Set(cleanKey(key), string(dat))
}
// CacheResult represent the key value result when
// iterating over the cache
type CacheResult struct {
key, value string
}
// Value returns the underlying value
func (c CacheResult) Value() string {
return c.value
}
// Key returns the cache result key
func (c CacheResult) Key() string {
return c.key
}
// Unmarshal the result into the interface. Use it to retrieve data
// set with SetValue
func (c CacheResult) Unmarshal(i interface{}) error {
return json.Unmarshal([]byte(c.Value()), i)
}
// Iterates over cache by key
func (c *Cache) All(fn func(CacheResult)) {
if !c.onDisk {
for k, v := range c.memory {
fn(CacheResult{key: k, value: v})
}
return
}
for key := range c.store.Keys(nil) {
val, _ := c.store.Read(key)
fn(CacheResult{key: key, value: string(val)})
}
}
// Clean the cache
func (c *Cache) Clean() {
c.memory = make(map[string]string)
c.onDisk = false
os.RemoveAll(c.dir)
}

View File

@@ -0,0 +1,98 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"path/filepath"
. "github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Cache", func() {
ctx := types.NewContext()
Context("used as k/v store", func() {
cache := &Cache{}
var dir string
BeforeEach(func() {
ctx = types.NewContext()
var err error
dir, err = ctx.Config.System.TempDir("foo")
Expect(err).ToNot(HaveOccurred())
cache = NewCache(dir, 10*1024*1024, 1) // 10MB Cache when upgrading to files. Max volatile memory of 1 row.
})
AfterEach(func() {
cache.Clean()
})
It("does handle automatically memory upgrade", func() {
cache.Set("foo", "bar")
v, found := cache.Get("foo")
Expect(found).To(BeTrue())
Expect(v).To(Equal("bar"))
Expect(file.Exists(filepath.Join(dir, "foo"))).To(BeFalse())
cache.Set("baz", "bar")
Expect(file.Exists(filepath.Join(dir, "foo"))).To(BeTrue())
Expect(file.Exists(filepath.Join(dir, "baz"))).To(BeTrue())
v, found = cache.Get("foo")
Expect(found).To(BeTrue())
Expect(v).To(Equal("bar"))
Expect(cache.Count()).To(Equal(2))
})
It("does CRUD", func() {
cache.Set("foo", "bar")
v, found := cache.Get("foo")
Expect(found).To(BeTrue())
Expect(v).To(Equal("bar"))
hit := false
cache.All(func(c CacheResult) {
hit = true
Expect(c.Key()).To(Equal("foo"))
Expect(c.Value()).To(Equal("bar"))
})
Expect(hit).To(BeTrue())
})
It("Unmarshals values", func() {
type testStruct struct {
Test string
}
cache.SetValue("foo", &testStruct{Test: "baz"})
n := &testStruct{}
cache.All(func(cr CacheResult) {
err := cr.Unmarshal(n)
Expect(err).ToNot(HaveOccurred())
})
Expect(n.Test).To(Equal("baz"))
})
})
})

View File

@@ -0,0 +1,94 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image
import (
"io"
"os"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/pkg/errors"
)
func imageFromTar(imagename, architecture, OS string, r io.Reader) (name.Reference, v1.Image, error) {
newRef, err := name.ParseReference(imagename)
if err != nil {
return nil, nil, err
}
layer, err := tarball.LayerFromReader(r)
if err != nil {
return nil, nil, err
}
baseImage := empty.Image
cfg, err := baseImage.ConfigFile()
if err != nil {
return nil, nil, err
}
cfg.Architecture = architecture
cfg.OS = OS
baseImage, err = mutate.ConfigFile(baseImage, cfg)
if err != nil {
return nil, nil, err
}
img, err := mutate.Append(baseImage, mutate.Addendum{
Layer: layer,
History: v1.History{
CreatedBy: "luet",
Comment: "Custom image",
},
})
if err != nil {
return nil, nil, err
}
return newRef, img, nil
}
// CreateTar a imagetarball from a standard tarball
func CreateTar(srctar, dstimageTar, imagename, architecture, OS string) error {
f, err := os.Open(srctar)
if err != nil {
return errors.Wrap(err, "Cannot open "+srctar)
}
defer f.Close()
return CreateTarReader(f, dstimageTar, imagename, architecture, OS)
}
// CreateTarReader a imagetarball from a standard tarball
func CreateTarReader(r io.Reader, dstimageTar, imagename, architecture, OS string) error {
dstFile, err := os.Create(dstimageTar)
if err != nil {
return errors.Wrap(err, "Cannot create "+dstimageTar)
}
defer dstFile.Close()
newRef, img, err := imageFromTar(imagename, architecture, OS, r)
if err != nil {
return err
}
// NOTE: We might also stream that back to the daemon with daemon.Write(tag, img)
return tarball.Write(newRef, img, dstFile)
}

View File

@@ -0,0 +1,80 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"os"
"path/filepath"
"runtime"
. "github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/api/core/types/artifact"
"github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Create", func() {
Context("Creates an OCI image from a standard tar", func() {
It("creates an image which is loadable", func() {
ctx := types.NewContext()
dst, err := ctx.Config.System.TempFile("dst")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(dst.Name())
srcTar, err := ctx.Config.System.TempFile("srcTar")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(srcTar.Name())
b := backend.NewSimpleDockerBackend(ctx)
b.DownloadImage(backend.Options{ImageName: "alpine"})
img, err := b.ImageReference("alpine", false)
Expect(err).ToNot(HaveOccurred())
_, dir, err := Extract(ctx, img, false, nil)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(dir)
Expect(file.Touch(filepath.Join(dir, "test"))).ToNot(HaveOccurred())
Expect(file.Exists(filepath.Join(dir, "bin"))).To(BeTrue())
a := artifact.NewPackageArtifact(srcTar.Name())
a.Compress(dir, 1)
// Unfortunately there is no other easy way to test this
err = CreateTar(srcTar.Name(), dst.Name(), "testimage", runtime.GOARCH, runtime.GOOS)
Expect(err).ToNot(HaveOccurred())
b.LoadImage(dst.Name())
Expect(b.ImageExists("testimage")).To(BeTrue())
img, err = b.ImageReference("testimage", false)
Expect(err).ToNot(HaveOccurred())
_, dir, err = Extract(ctx, img, false, nil)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(dir)
Expect(file.Exists(filepath.Join(dir, "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(dir, "test"))).To(BeTrue())
})
})
})

111
pkg/api/core/image/delta.go Normal file
View File

@@ -0,0 +1,111 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image
import (
"archive/tar"
"io"
"regexp"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate"
)
func compileRegexes(regexes []string) []*regexp.Regexp {
var result []*regexp.Regexp
for _, i := range regexes {
r, e := regexp.Compile(i)
if e != nil {
continue
}
result = append(result, r)
}
return result
}
type ImageDiffNode struct {
Name string `json:"Name"`
Size int `json:"Size"`
}
type ImageDiff struct {
Additions []ImageDiffNode `json:"Adds"`
Deletions []ImageDiffNode `json:"Dels"`
Changes []ImageDiffNode `json:"Mods"`
}
func Delta(srcimg, dstimg v1.Image) (res ImageDiff, err error) {
srcReader := mutate.Extract(srcimg)
defer srcReader.Close()
dstReader := mutate.Extract(dstimg)
defer dstReader.Close()
filesSrc, filesDst := map[string]int64{}, map[string]int64{}
srcTar := tar.NewReader(srcReader)
dstTar := tar.NewReader(dstReader)
for {
var hdr *tar.Header
hdr, err = srcTar.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return
}
filesSrc[hdr.Name] = hdr.Size
}
for {
var hdr *tar.Header
hdr, err = dstTar.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return
}
filesDst[hdr.Name] = hdr.Size
}
err = nil
for f, size := range filesDst {
if size2, exist := filesSrc[f]; exist && size2 != size {
res.Changes = append(res.Changes, ImageDiffNode{
Name: f,
Size: int(size),
})
} else if !exist {
res.Additions = append(res.Additions, ImageDiffNode{
Name: f,
Size: int(size),
})
}
}
for f, size := range filesSrc {
if _, exist := filesDst[f]; !exist {
res.Deletions = append(res.Deletions, ImageDiffNode{
Name: f,
Size: int(size),
})
}
}
return
}

View File

@@ -0,0 +1,138 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
daemon "github.com/google/go-containerregistry/pkg/v1/daemon"
. "github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Delta", func() {
Context("Generates deltas of images", func() {
It("computes delta", func() {
ref, err := name.ParseReference("alpine")
Expect(err).ToNot(HaveOccurred())
img, err := daemon.Image(ref)
Expect(err).ToNot(HaveOccurred())
layers, err := Delta(img, img)
Expect(err).ToNot(HaveOccurred())
Expect(len(layers.Changes)).To(Equal(0))
Expect(len(layers.Additions)).To(Equal(0))
Expect(len(layers.Deletions)).To(Equal(0))
})
Context("ExtractDeltaFiles", func() {
ctx := types.NewContext()
var tmpfile *os.File
var ref, ref2 name.Reference
var img, img2 v1.Image
var err error
ref, _ = name.ParseReference("alpine")
ref2, _ = name.ParseReference("golang:alpine")
img, _ = daemon.Image(ref)
img2, _ = daemon.Image(ref2)
BeforeEach(func() {
ctx = types.NewContext()
tmpfile, err = ioutil.TempFile("", "delta")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpfile.Name()) // clean up
})
It("Extract all deltas", func() {
f, err := ExtractDeltaAdditionsFiles(ctx, img, []string{}, []string{})
Expect(err).ToNot(HaveOccurred())
_, tmpdir, err := Extract(
ctx,
img2,
true,
f,
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "home"))).To(BeFalse())
Expect(file.Exists(filepath.Join(tmpdir, "root", ".cache"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeFalse())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go", "bin"))).To(BeTrue())
})
It("Extract deltas and excludes /usr/local/go", func() {
f, err := ExtractDeltaAdditionsFiles(ctx, img, []string{}, []string{"usr/local/go"})
Expect(err).ToNot(HaveOccurred())
Expect(err).ToNot(HaveOccurred())
_, tmpdir, err := Extract(
ctx,
img2,
true,
f,
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go"))).To(BeFalse())
})
It("Extract deltas and excludes /usr/local/go/bin, but includes /usr/local/go", func() {
f, err := ExtractDeltaAdditionsFiles(ctx, img, []string{"usr/local/go"}, []string{"usr/local/go/bin"})
Expect(err).ToNot(HaveOccurred())
_, tmpdir, err := Extract(
ctx,
img2,
true,
f,
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go", "bin"))).To(BeFalse())
})
It("Extract deltas and includes /usr/local/go", func() {
f, err := ExtractDeltaAdditionsFiles(ctx, img, []string{"usr/local/go"}, []string{})
Expect(err).ToNot(HaveOccurred())
_, tmpdir, err := Extract(
ctx,
img2,
true,
f,
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "root", ".cache"))).To(BeFalse())
})
})
})
})

View File

@@ -0,0 +1,286 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image
import (
"archive/tar"
"context"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"syscall"
containerdarchive "github.com/containerd/containerd/archive"
"github.com/docker/docker/pkg/system"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/pkg/errors"
)
// ExtractDeltaAdditionsFromImages is a filter that takes two images
// an includes and an excludes list. It computes the delta between the images
// considering the added files only, and applies a filter on them based on the regexes
// in the lists.
func ExtractDeltaAdditionsFiles(
ctx *types.Context,
srcimg v1.Image,
includes []string, excludes []string,
) (func(h *tar.Header) (bool, error), error) {
includeRegexp := compileRegexes(includes)
excludeRegexp := compileRegexes(excludes)
srcfilesd, err := ctx.Config.System.TempDir("srcfiles")
if err != nil {
return nil, err
}
filesSrc := NewCache(srcfilesd, 50*1024*1024, 10000)
srcReader := mutate.Extract(srcimg)
defer srcReader.Close()
srcTar := tar.NewReader(srcReader)
for {
var hdr *tar.Header
hdr, err := srcTar.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return nil, err
}
switch hdr.Typeflag {
case tar.TypeDir:
filesSrc.Set(filepath.Dir(hdr.Name), "")
default:
filesSrc.Set(hdr.Name, "")
}
}
return func(h *tar.Header) (bool, error) {
fileName := filepath.Join(string(os.PathSeparator), h.Name)
_, exists := filesSrc.Get(h.Name)
if exists {
return false, nil
}
switch {
case len(includes) == 0 && len(excludes) != 0:
for _, i := range excludeRegexp {
if i.MatchString(filepath.Join(string(os.PathSeparator), h.Name)) &&
fileName == filepath.Join(string(os.PathSeparator), h.Name) {
return false, nil
}
}
ctx.Debug("Adding name", fileName)
return true, nil
case len(includes) > 0 && len(excludes) == 0:
for _, i := range includeRegexp {
if i.MatchString(filepath.Join(string(os.PathSeparator), h.Name)) && fileName == filepath.Join(string(os.PathSeparator), h.Name) {
ctx.Debug("Adding name", fileName)
return true, nil
}
}
return false, nil
case len(includes) != 0 && len(excludes) != 0:
for _, i := range includeRegexp {
if i.MatchString(filepath.Join(string(os.PathSeparator), h.Name)) && fileName == filepath.Join(string(os.PathSeparator), h.Name) {
for _, e := range excludeRegexp {
if e.MatchString(fileName) {
return false, nil
}
}
ctx.Debug("Adding name", fileName)
return true, nil
}
}
return false, nil
default:
ctx.Debug("Adding name", fileName)
return true, nil
}
}, nil
}
// ExtractFiles returns a filter that extracts files from the given path (if not empty)
// It then filters files by an include and exclude list.
// The list can be regexes
func ExtractFiles(
ctx *types.Context,
prefixPath string,
includes []string, excludes []string,
) func(h *tar.Header) (bool, error) {
includeRegexp := compileRegexes(includes)
excludeRegexp := compileRegexes(excludes)
return func(h *tar.Header) (bool, error) {
fileName := filepath.Join(string(os.PathSeparator), h.Name)
switch {
case len(includes) == 0 && len(excludes) != 0:
for _, i := range excludeRegexp {
if i.MatchString(filepath.Join(prefixPath, fileName)) {
return false, nil
}
}
if prefixPath != "" {
return strings.HasPrefix(fileName, prefixPath), nil
}
ctx.Debug("Adding name", fileName)
return true, nil
case len(includes) > 0 && len(excludes) == 0:
for _, i := range includeRegexp {
if i.MatchString(filepath.Join(prefixPath, fileName)) {
if prefixPath != "" {
return strings.HasPrefix(fileName, prefixPath), nil
}
ctx.Debug("Adding name", fileName)
return true, nil
}
}
return false, nil
case len(includes) != 0 && len(excludes) != 0:
for _, i := range includeRegexp {
if i.MatchString(filepath.Join(prefixPath, fileName)) {
for _, e := range excludeRegexp {
if e.MatchString(filepath.Join(prefixPath, fileName)) {
return false, nil
}
}
if prefixPath != "" {
return strings.HasPrefix(fileName, prefixPath), nil
}
ctx.Debug("Adding name", fileName)
return true, nil
}
}
return false, nil
default:
if prefixPath != "" {
return strings.HasPrefix(fileName, prefixPath), nil
}
return true, nil
}
}
}
// ExtractReader perform the extracting action over the io.ReadCloser
// it extracts the files over output. Accepts a filter as an option
// and additional containerd Options
func ExtractReader(ctx *types.Context, reader io.ReadCloser, output string, keepPerms bool, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
defer reader.Close()
// If no filter is specified, grab all.
if filter == nil {
filter = func(h *tar.Header) (bool, error) { return true, nil }
}
// Keep records of permissions as we walk the tar
type permData struct {
PAX, Xattrs map[string]string
Uid, Gid int
Name string
FileMode fs.FileMode
}
permstore, err := ctx.Config.System.TempDir("permstore")
if err != nil {
return 0, "", err
}
perms := NewCache(permstore, 50*1024*1024, 10000)
f := func(h *tar.Header) (bool, error) {
res, err := filter(h)
if res {
perms.SetValue(h.Name, permData{
PAX: h.PAXRecords,
Uid: h.Uid, Gid: h.Gid,
Xattrs: h.Xattrs,
Name: h.Name,
FileMode: h.FileInfo().Mode(),
})
//perms = append(perms, })
}
return res, err
}
opts = append(opts, containerdarchive.WithFilter(f))
// Handle the extraction
c, err := containerdarchive.Apply(context.Background(), output, reader, opts...)
if err != nil {
return 0, "", err
}
// Reconstruct permissions
if keepPerms {
ctx.Debug("Reconstructing permissions")
perms.All(func(cr CacheResult) {
p := &permData{}
cr.Unmarshal(p)
ff := filepath.Join(output, p.Name)
if _, err := os.Lstat(ff); err == nil {
if err := os.Lchown(ff, p.Uid, p.Gid); err != nil {
ctx.Warning(err, "failed chowning file")
}
ctx.Debug("Set", p.Name, p.FileMode)
if err := os.Chmod(ff, p.FileMode); err != nil {
ctx.Warning(err, "failed chmod file")
}
}
for _, attrs := range []map[string]string{p.Xattrs, p.PAX} {
for k, attr := range attrs {
if err := system.Lsetxattr(ff, k, []byte(attr), 0); err != nil {
if errors.Is(err, syscall.ENOTSUP) {
ctx.Debug("ignored xattr %s in archive", ff)
}
}
}
}
})
}
return c, output, nil
}
// Extract is just syntax sugar around ExtractReader. It extracts an image into a dir
func Extract(ctx *types.Context, img v1.Image, keepPerms bool, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
tmpdiffs, err := ctx.Config.GetSystem().TempDir("extraction")
if err != nil {
return 0, "", errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
return ExtractReader(ctx, mutate.Extract(img), tmpdiffs, keepPerms, filter, opts...)
}
// ExtractTo is just syntax sugar around ExtractReader
func ExtractTo(ctx *types.Context, img v1.Image, output string, keepPerms bool, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
return ExtractReader(ctx, mutate.Extract(img), output, keepPerms, filter, opts...)
}

View File

@@ -0,0 +1,115 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
daemon "github.com/google/go-containerregistry/pkg/v1/daemon"
. "github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Extract", func() {
Context("extract files from images", func() {
Context("ExtractFiles", func() {
ctx := types.NewContext()
var tmpfile *os.File
var ref name.Reference
var img v1.Image
var err error
BeforeEach(func() {
ctx = types.NewContext()
tmpfile, err = ioutil.TempFile("", "extract")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpfile.Name()) // clean up
ref, err = name.ParseReference("alpine")
Expect(err).ToNot(HaveOccurred())
img, err = daemon.Image(ref)
Expect(err).ToNot(HaveOccurred())
})
It("Extract all files", func() {
_, tmpdir, err := Extract(
ctx,
img,
true,
ExtractFiles(ctx, "", []string{}, []string{}),
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeTrue())
})
It("Extract specific dir", func() {
_, tmpdir, err := Extract(
ctx,
img,
true,
ExtractFiles(ctx, "/usr", []string{}, []string{}),
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "sbin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeFalse())
})
It("Extract a dir with includes/excludes", func() {
_, tmpdir, err := Extract(
ctx,
img,
true,
ExtractFiles(ctx, "/usr", []string{"bin"}, []string{"sbin"}),
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeFalse())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "sbin"))).To(BeFalse())
})
It("Extract with includes/excludes", func() {
_, tmpdir, err := Extract(
ctx,
img,
true,
ExtractFiles(ctx, "", []string{"/usr|/usr/bin"}, []string{"^/bin"}),
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeFalse())
})
})
})
})

View File

@@ -0,0 +1,34 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"testing"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler/backend"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestMutator(t *testing.T) {
RegisterFailHandler(Fail)
b := backend.NewSimpleDockerBackend(types.NewContext())
b.DownloadImage(backend.Options{ImageName: "alpine"})
b.DownloadImage(backend.Options{ImageName: "golang:alpine"})
RunSpecs(t, "Mutator API Suite")
}

View File

@@ -0,0 +1,614 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact
import (
"archive/tar"
"bufio"
"bytes"
"crypto/sha1"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"github.com/docker/docker/pkg/pools"
v1 "github.com/google/go-containerregistry/pkg/v1"
zstd "github.com/klauspost/compress/zstd"
gzip "github.com/klauspost/pgzip"
//"strconv"
"strings"
containerdCompression "github.com/containerd/containerd/archive/compression"
bus "github.com/mudler/luet/pkg/api/core/bus"
config "github.com/mudler/luet/pkg/api/core/config"
"github.com/mudler/luet/pkg/api/core/image"
types "github.com/mudler/luet/pkg/api/core/types"
backend "github.com/mudler/luet/pkg/compiler/backend"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
"github.com/mudler/luet/pkg/helpers"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
// When compiling, we write also a fingerprint.metadata.yaml file with PackageArtifact. In this way we can have another command to create the repository
// which will consist in just of an repository.yaml which is just the repository structure with the list of package artifact.
// In this way a generic client can fetch the packages and, after unpacking the tree, performing queries to install packages.
type PackageArtifact struct {
Path string `json:"path"`
Dependencies []*PackageArtifact `json:"dependencies"`
CompileSpec *compilerspec.LuetCompilationSpec `json:"compilationspec"`
Checksums Checksums `json:"checksums"`
SourceAssertion solver.PackagesAssertions `json:"-"`
CompressionType compression.Implementation `json:"compressiontype"`
Files []string `json:"files"`
PackageCacheImage string `json:"package_cacheimage"`
Runtime *pkg.DefaultPackage `json:"runtime,omitempty"`
}
func ImageToArtifact(ctx *types.Context, img v1.Image, t compression.Implementation, output string, keepPerms bool, filter func(h *tar.Header) (bool, error)) (*PackageArtifact, error) {
_, tmpdiffs, err := image.Extract(ctx, img, keepPerms, filter)
if err != nil {
return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(tmpdiffs) // clean up
a := NewPackageArtifact(output)
a.CompressionType = t
err = a.Compress(tmpdiffs, 1)
if err != nil {
return nil, errors.Wrap(err, "Error met while creating package archive")
}
return a, nil
}
func (p *PackageArtifact) ShallowCopy() *PackageArtifact {
copy := *p
return &copy
}
func NewPackageArtifact(path string) *PackageArtifact {
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: compression.None}
}
func NewPackageArtifactFromYaml(data []byte) (*PackageArtifact, error) {
p := &PackageArtifact{Checksums: Checksums{}}
err := yaml.Unmarshal(data, &p)
if err != nil {
return p, err
}
return p, err
}
func (a *PackageArtifact) Hash() error {
return a.Checksums.Generate(a)
}
func (a *PackageArtifact) Verify() error {
sum := Checksums{}
if err := sum.Generate(a); err != nil {
return err
}
if err := sum.Compare(a.Checksums); err != nil {
return err
}
return nil
}
func (a *PackageArtifact) WriteYAML(dst string) error {
// First compute checksum of artifact. When we write the yaml we want to write up-to-date informations.
err := a.Hash()
if err != nil {
return errors.Wrap(err, "Failed generating checksums for artifact")
}
// Update runtime package information
if a.CompileSpec != nil && a.CompileSpec.Package != nil {
runtime, err := a.CompileSpec.Package.GetRuntimePackage()
if err != nil {
return errors.Wrapf(err, "getting runtime package for '%s'", a.CompileSpec.Package.HumanReadableString())
}
a.Runtime = runtime
}
data, err := yaml.Marshal(a)
if err != nil {
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
}
bus.Manager.Publish(bus.EventPackagePreBuildArtifact, a)
mangle, err := NewPackageArtifactFromYaml(data)
if err != nil {
return errors.Wrap(err, "Generated invalid artifact")
}
//p := a.CompileSpec.GetPackage().GetPath()
mangle.CompileSpec.GetPackage().SetPath("")
for _, ass := range mangle.CompileSpec.GetSourceAssertion() {
ass.Package.SetPath("")
}
data, err = yaml.Marshal(mangle)
if err != nil {
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
}
err = ioutil.WriteFile(filepath.Join(dst, a.CompileSpec.GetPackage().GetMetadataFilePath()), data, os.ModePerm)
if err != nil {
return errors.Wrap(err, "While writing PackageArtifact YAML")
}
//a.CompileSpec.GetPackage().SetPath(p)
bus.Manager.Publish(bus.EventPackagePostBuildArtifact, a)
return nil
}
func (a *PackageArtifact) GetFileName() string {
return path.Base(a.Path)
}
// CreateArtifactForFile creates a new artifact from the given file
func CreateArtifactForFile(ctx *types.Context, s string, opts ...func(*PackageArtifact)) (*PackageArtifact, error) {
if _, err := os.Stat(s); os.IsNotExist(err) {
return nil, errors.Wrap(err, "artifact path doesn't exist")
}
fileName := path.Base(s)
archive, err := ctx.Config.GetSystem().TempDir("archive")
if err != nil {
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
}
defer os.RemoveAll(archive) // clean up
dst := filepath.Join(archive, fileName)
if err := fileHelper.CopyFile(s, dst); err != nil {
return nil, errors.Wrapf(err, "error while copying %s to %s", s, dst)
}
artifact, err := ctx.Config.GetSystem().TempDir("artifact")
if err != nil {
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
}
a := &PackageArtifact{Path: filepath.Join(artifact, fileName)}
for _, o := range opts {
o(a)
}
return a, a.Compress(archive, 1)
}
type ImageBuilder interface {
BuildImage(backend.Options) error
LoadImage(path string) error
}
// GenerateFinalImage takes an artifact and builds a Docker image with its content
func (a *PackageArtifact) GenerateFinalImage(ctx *types.Context, imageName string, b ImageBuilder, keepPerms bool) error {
archiveFile, err := os.Open(a.Path)
if err != nil {
return errors.Wrap(err, "Cannot open "+a.Path)
}
defer archiveFile.Close()
decompressed, err := containerdCompression.DecompressStream(archiveFile)
if err != nil {
return errors.Wrap(err, "Cannot open "+a.Path)
}
tempimage, err := ctx.Config.GetSystem().TempFile("tempimage")
if err != nil {
return errors.Wrap(err, "error met while creating tempdir for "+a.Path)
}
defer os.RemoveAll(tempimage.Name()) // clean up
if err := image.CreateTarReader(decompressed, tempimage.Name(), imageName, runtime.GOARCH, runtime.GOOS); err != nil {
return errors.Wrap(err, "could not create image from tar")
}
if err := b.LoadImage(tempimage.Name()); err != nil {
return errors.Wrap(err, "while loading image")
}
return nil
}
// Compress is responsible to archive and compress to the artifact Path.
// It accepts a source path, which is the content to be archived/compressed
// and a concurrency parameter.
func (a *PackageArtifact) Compress(src string, concurrency int) error {
switch a.CompressionType {
case compression.Zstandard:
err := helpers.Tar(src, a.Path)
if err != nil {
return err
}
original, err := os.Open(a.Path)
if err != nil {
return err
}
defer original.Close()
zstdFile := a.getCompressedName()
bufferedReader := bufio.NewReader(original)
// Open a file for writing.
dst, err := os.Create(zstdFile)
if err != nil {
return err
}
enc, err := zstd.NewWriter(dst)
if err != nil {
return err
}
_, err = io.Copy(enc, bufferedReader)
if err != nil {
enc.Close()
return err
}
if err := enc.Close(); err != nil {
return err
}
os.RemoveAll(a.Path) // Remove original
// Debug("Removed artifact", a.Path)
a.Path = zstdFile
return nil
case compression.GZip:
err := helpers.Tar(src, a.Path)
if err != nil {
return err
}
original, err := os.Open(a.Path)
if err != nil {
return err
}
defer original.Close()
gzipfile := a.getCompressedName()
bufferedReader := bufio.NewReader(original)
// Open a file for writing.
dst, err := os.Create(gzipfile)
if err != nil {
return err
}
// Create gzip writer.
w := gzip.NewWriter(dst)
w.SetConcurrency(1<<20, concurrency)
defer w.Close()
defer dst.Close()
_, err = io.Copy(w, bufferedReader)
if err != nil {
return err
}
w.Close()
os.RemoveAll(a.Path) // Remove original
// Debug("Removed artifact", a.Path)
// a.CompressedPath = gzipfile
a.Path = gzipfile
return nil
//a.Path = gzipfile
// Defaults to tar only (covers when "none" is supplied)
default:
return helpers.Tar(src, a.getCompressedName())
}
}
func (a *PackageArtifact) getCompressedName() string {
switch a.CompressionType {
case compression.Zstandard:
return a.Path + ".zst"
case compression.GZip:
return a.Path + ".gz"
}
return a.Path
}
// GetUncompressedName returns the artifact path without the extension suffix
func (a *PackageArtifact) GetUncompressedName() string {
switch a.CompressionType {
case compression.Zstandard, compression.GZip:
return strings.TrimSuffix(a.Path, filepath.Ext(a.Path))
}
return a.Path
}
func hashContent(bv []byte) string {
hasher := sha1.New()
hasher.Write(bv)
sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
return sha
}
func hashFileContent(path string) (string, error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
h := sha1.New()
if _, err := io.Copy(h, f); err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
}
func replaceFileTarWrapper(dst string, inputTarStream io.ReadCloser, mods []string, fn func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)) io.ReadCloser {
pipeReader, pipeWriter := io.Pipe()
go func() {
tarReader := tar.NewReader(inputTarStream)
tarWriter := tar.NewWriter(pipeWriter)
defer inputTarStream.Close()
defer tarWriter.Close()
modify := func(name string, original *tar.Header, tarReader io.Reader) error {
header, data, err := fn(dst, name, original, tarReader)
switch {
case err != nil:
return err
case header == nil:
return nil
}
if header.Name == "" {
header.Name = name
}
header.Size = int64(len(data))
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
if len(data) != 0 {
if _, err := tarWriter.Write(data); err != nil {
return err
}
}
return nil
}
// var remaining []string
var err error
var originalHeader *tar.Header
for {
originalHeader, err = tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
pipeWriter.CloseWithError(err)
return
}
if !helpers.Contains(mods, originalHeader.Name) {
// No modifiers for this file, copy the header and data
if err := tarWriter.WriteHeader(originalHeader); err != nil {
pipeWriter.CloseWithError(err)
return
}
if _, err := pools.Copy(tarWriter, tarReader); err != nil {
pipeWriter.CloseWithError(err)
return
}
continue
}
if err := modify(originalHeader.Name, originalHeader, tarReader); err != nil {
pipeWriter.CloseWithError(err)
return
}
}
// Apply the modifiers that haven't matched any files in the archive
pipeWriter.Close()
}()
return pipeReader
}
func tarModifierWrapperFunc(ctx *types.Context) func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
return func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
// If the destination path already exists I rename target file name with postfix.
var destPath string
// Read data. TODO: We need change archive callback to permit to return a Reader
buffer := bytes.Buffer{}
if content != nil {
if _, err := buffer.ReadFrom(content); err != nil {
return nil, nil, err
}
}
tarHash := hashContent(buffer.Bytes())
// If file is not present on archive but is defined on mods
// I receive the callback. Prevent nil exception.
if header != nil {
switch header.Typeflag {
case tar.TypeReg:
destPath = filepath.Join(dst, path)
default:
// Nothing to do. I return original reader
return header, buffer.Bytes(), nil
}
existingHash := ""
f, err := os.Lstat(destPath)
if err == nil {
ctx.Debug("File exists already, computing hash for", destPath)
hash, herr := hashFileContent(destPath)
if herr == nil {
existingHash = hash
}
}
ctx.Debug(destPath, "- existing file hash: ", existingHash, "Tar file hashsum: ", tarHash)
if fileHelper.Exists(destPath) {
ctx.Debug(destPath, "already exists")
}
// We want to protect file only if the hash of the files are differing OR the file size are
differs := (existingHash != "" && existingHash != tarHash) || (err != nil && f != nil && header.Size != f.Size())
// Check if exists
if fileHelper.Exists(destPath) && differs {
ctx.Debug(destPath, "already exists and differs")
for i := 1; i < 1000; i++ {
name := filepath.Join(filepath.Join(filepath.Dir(path),
fmt.Sprintf("._cfg%04d_%s", i, filepath.Base(path))))
if fileHelper.Exists(name) {
continue
}
ctx.Info(fmt.Sprintf("Found protected file %s. Creating %s.", destPath,
filepath.Join(dst, name)))
return &tar.Header{
Mode: header.Mode,
Typeflag: header.Typeflag,
PAXRecords: header.PAXRecords,
Name: name,
}, buffer.Bytes(), nil
}
}
}
return header, buffer.Bytes(), nil
}
}
func (a *PackageArtifact) GetProtectFiles(ctx *types.Context) (res []string) {
annotationDir := ""
if !ctx.Config.ConfigProtectSkip {
// a.CompileSpec could be nil when artifact.Unpack is used for tree tarball
if a.CompileSpec != nil &&
a.CompileSpec.GetPackage().HasAnnotation(string(pkg.ConfigProtectAnnnotation)) {
dir, ok := a.CompileSpec.GetPackage().GetAnnotations()[string(pkg.ConfigProtectAnnnotation)]
if ok {
annotationDir = dir
}
}
// TODO: check if skip this if we have a.CompileSpec nil
cp := config.NewConfigProtect(annotationDir)
cp.Map(a.Files, ctx.Config.GetConfigProtectConfFiles())
// NOTE: for unpack we need files path without initial /
res = cp.GetProtectFiles(false)
}
return
}
// Unpack Untar and decompress (TODO) to the given path
func (a *PackageArtifact) Unpack(ctx *types.Context, dst string, keepPerms bool) error {
if !strings.HasPrefix(dst, string(os.PathSeparator)) {
return errors.New("destination must be an absolute path")
}
// Create
protectedFiles := a.GetProtectFiles(ctx)
mod := tarModifierWrapperFunc(ctx)
//tarModifier := helpers.NewTarModifierWrapper(dst, mod)
archiveFile, err := os.Open(a.Path)
if err != nil {
return errors.Wrap(err, "Cannot open "+a.Path)
}
defer archiveFile.Close()
decompressed, err := containerdCompression.DecompressStream(archiveFile)
if err != nil {
return errors.Wrap(err, "Cannot open "+a.Path)
}
replacerArchive := replaceFileTarWrapper(dst, decompressed, protectedFiles, mod)
// or with filter?
// func(header *tar.Header) (bool, error) {
// if helpers.Contains(protectedFiles, header.Name) {
// newHead, _, err := mod(dst, header.Name, header, decompressed)
// if err != nil {
// return false, err
// }
// header.Name = newHead.Name
// // Override target path
// //target = filepath.Join(dest, header.Name)
// }
// // tarModifier.Modifier()
// return true, nil
// },
_, _, err = image.ExtractReader(ctx, replacerArchive, dst, ctx.Config.GetGeneral().SameOwner, nil)
return err
}
// FileList generates the list of file of a package from the local archive
func (a *PackageArtifact) FileList() ([]string, error) {
var files []string
archiveFile, err := os.Open(a.Path)
if err != nil {
return files, errors.Wrap(err, "Cannot open "+a.Path)
}
defer archiveFile.Close()
decompressed, err := containerdCompression.DecompressStream(archiveFile)
if err != nil {
return files, errors.Wrap(err, "Cannot open "+a.Path)
}
defer decompressed.Close()
tr := tar.NewReader(decompressed)
// untar each segment
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return []string{}, err
}
// determine proper file path info
finfo := hdr.FileInfo()
fileName := hdr.Name
if finfo.Mode().IsDir() {
continue
}
files = append(files, fileName)
// if a dir, create it, then go to next segment
}
return files, nil
}

View File

@@ -0,0 +1,28 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestArtifact(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Artifact Suite")
}

View File

@@ -0,0 +1,235 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact_test
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
. "github.com/mudler/luet/pkg/api/core/types/artifact"
. "github.com/mudler/luet/pkg/compiler/backend"
backend "github.com/mudler/luet/pkg/compiler/backend"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/compiler/types/options"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
. "github.com/mudler/luet/pkg/compiler"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Artifact", func() {
Context("Simple package build definition", func() {
ctx := types.NewContext()
It("Generates a verified delta", func() {
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../../../../tests/fixtures/buildtree")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
cc := NewLuetCompiler(nil, generalRecipe.GetDatabase(), options.WithContext(types.NewContext()))
lspec, err := cc.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
Expect(err).ToNot(HaveOccurred())
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
Expect(lspec.Image).To(Equal("luet/base"))
Expect(lspec.Seed).To(Equal("alpine"))
tmpdir, err := ioutil.TempDir(os.TempDir(), "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpdir2, err := ioutil.TempDir(os.TempDir(), "tree2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir2) // clean up
unpacked, err := ioutil.TempDir(os.TempDir(), "unpacked")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(unpacked) // clean up
rootfs, err := ioutil.TempDir(os.TempDir(), "rootfs")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(rootfs) // clean up
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM alpine
COPY . /luetbuild
WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin`))
b := NewSimpleDockerBackend(ctx)
opts := backend.Options{
ImageName: "luet/base",
SourcePath: tmpdir,
DockerFileName: "Dockerfile",
Destination: filepath.Join(tmpdir2, "output1.tar"),
}
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM luet/base
COPY . /luetbuild
WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin
RUN echo foo > /test
RUN echo bar > /test2`))
opts2 := backend.Options{
ImageName: "test",
SourcePath: tmpdir,
DockerFileName: "LuetDockerfile",
Destination: filepath.Join(tmpdir, "output2.tar"),
}
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
})
It("Generates packages images", func() {
b := NewSimpleDockerBackend(ctx)
imageprefix := "foo/"
testString := []byte(`funky test data`)
tmpdir, err := ioutil.TempDir(os.TempDir(), "artifact")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpWork, err := ioutil.TempDir(os.TempDir(), "artifact2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpWork) // clean up
Expect(os.MkdirAll(filepath.Join(tmpdir, "foo", "bar"), os.ModePerm)).ToNot(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(tmpdir, "test"), testString, 0644)
Expect(err).ToNot(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(tmpdir, "foo", "bar", "test"), testString, 0644)
Expect(err).ToNot(HaveOccurred())
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
err = a.Compress(tmpdir, 1)
Expect(err).ToNot(HaveOccurred())
resultingImage := imageprefix + "foo--1.0"
err = a.GenerateFinalImage(ctx, resultingImage, b, false)
Expect(err).ToNot(HaveOccurred())
Expect(b.ImageExists(resultingImage)).To(BeTrue())
result, err := ioutil.TempDir(os.TempDir(), "result")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(result) // clean up
img, err := b.ImageReference(resultingImage, true)
Expect(err).ToNot(HaveOccurred())
_, _, err = image.ExtractTo(
ctx,
img,
result,
false,
nil,
)
Expect(err).ToNot(HaveOccurred())
content, err := ioutil.ReadFile(filepath.Join(result, "test"))
Expect(err).ToNot(HaveOccurred())
Expect(content).To(Equal(testString))
content, err = ioutil.ReadFile(filepath.Join(result, "foo", "bar", "test"))
Expect(err).ToNot(HaveOccurred())
Expect(content).To(Equal(testString))
})
It("Generates empty packages images", func() {
b := NewSimpleDockerBackend(ctx)
imageprefix := "foo/"
tmpdir, err := ioutil.TempDir(os.TempDir(), "artifact")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpWork, err := ioutil.TempDir(os.TempDir(), "artifact2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpWork) // clean up
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
err = a.Compress(tmpdir, 1)
Expect(err).ToNot(HaveOccurred())
resultingImage := imageprefix + "foo--1.0"
err = a.GenerateFinalImage(ctx, resultingImage, b, false)
Expect(err).ToNot(HaveOccurred())
Expect(b.ImageExists(resultingImage)).To(BeTrue())
result, err := ioutil.TempDir(os.TempDir(), "result")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(result) // clean up
img, err := b.ImageReference(resultingImage, false)
Expect(err).ToNot(HaveOccurred())
_, _, err = image.ExtractTo(
ctx,
img,
result,
false,
nil,
)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.DirectoryIsEmpty(result)).To(BeTrue())
})
It("Retrieves uncompressed name", func() {
a := NewPackageArtifact("foo.tar.gz")
a.CompressionType = (compression.GZip)
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
a = NewPackageArtifact("foo.tar.zst")
a.CompressionType = compression.Zstandard
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
a = NewPackageArtifact("foo.tar")
a.CompressionType = compression.None
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
})
})
})

View File

@@ -0,0 +1,63 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact
import (
"crypto/sha512"
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/rancher-sandbox/gofilecache"
)
type ArtifactCache struct {
gofilecache.Cache
}
func NewCache(dir string) *ArtifactCache {
return &ArtifactCache{Cache: *gofilecache.InitCache(dir)}
}
func (c *ArtifactCache) cacheID(a *PackageArtifact) [64]byte {
fingerprint := filepath.Base(a.Path)
if a.CompileSpec != nil && a.CompileSpec.Package != nil {
fingerprint = a.CompileSpec.Package.GetFingerPrint()
}
if len(a.Checksums) > 0 {
for _, cs := range a.Checksums.List() {
t := cs[0]
result := cs[1]
fingerprint += fmt.Sprintf("+%s:%s", t, result)
}
}
return sha512.Sum512([]byte(fingerprint))
}
func (c *ArtifactCache) Get(a *PackageArtifact) (string, error) {
fileName, _, err := c.Cache.GetFile(c.cacheID(a))
return fileName, err
}
func (c *ArtifactCache) Put(a *PackageArtifact) (gofilecache.OutputID, int64, error) {
file, err := os.Open(a.Path)
if err != nil {
return [64]byte{}, 0, errors.Wrapf(err, "failed opening %s", a.Path)
}
defer file.Close()
return c.Cache.Put(c.cacheID(a), file)
}

View File

@@ -0,0 +1,90 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact_test
import (
"io/ioutil"
"os"
"path/filepath"
types "github.com/mudler/luet/pkg/api/core/types"
. "github.com/mudler/luet/pkg/api/core/types/artifact"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Cache", func() {
Context("CacheID", func() {
It("Get and retrieve files", func() {
tmpdir, err := ioutil.TempDir(os.TempDir(), "test")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpdirartifact, err := ioutil.TempDir(os.TempDir(), "testartifact")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdirartifact) // clean up
err = ioutil.WriteFile(filepath.Join(tmpdirartifact, "foo"), []byte(string("foo")), os.ModePerm)
Expect(err).ToNot(HaveOccurred())
a := NewPackageArtifact(filepath.Join(tmpdir, "foo.tar.gz"))
err = a.Compress(tmpdirartifact, 1)
Expect(err).ToNot(HaveOccurred())
cache := NewCache(tmpdir)
// Put an artifact in the cache and retrieve it later
// the artifact is NOT hashed so it is referenced just by the path in the cache
_, _, err = cache.Put(a)
Expect(err).ToNot(HaveOccurred())
path, err := cache.Get(a)
Expect(err).ToNot(HaveOccurred())
b := NewPackageArtifact(path)
ctx := types.NewContext()
err = b.Unpack(ctx, tmpdir, false)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(tmpdir, "foo"))).To(BeTrue())
bb, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo"))
Expect(err).ToNot(HaveOccurred())
Expect(string(bb)).To(Equal("foo"))
// After the artifact is hashed, the fingerprint mutates so the cache doesn't see it hitting again
// the test we did above fails as we expect to.
a.Hash()
_, err = cache.Get(a)
Expect(err).To(HaveOccurred())
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "bar"}}
_, _, err = cache.Put(a)
Expect(err).ToNot(HaveOccurred())
c := NewPackageArtifact(filepath.Join(tmpdir, "foo.tar.gz"))
c.Hash()
c.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "bar"}}
_, err = cache.Get(c)
Expect(err).ToNot(HaveOccurred())
})
})
})

View File

@@ -0,0 +1,91 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact
import (
//"strconv"
"crypto/sha256"
"fmt"
"hash"
"io"
"os"
"sort"
// . "github.com/mudler/luet/pkg/logger"
"github.com/pkg/errors"
)
type HashImplementation string
const (
SHA256 HashImplementation = "sha256"
)
type Checksums map[string]string
type HashOptions struct {
Hasher hash.Hash
Type HashImplementation
}
func (c Checksums) List() (res [][]string) {
keys := make([]string, 0)
for k, _ := range c {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
res = append(res, []string{k, c[k]})
}
return
}
// Generate generates all Checksums supported for the artifact
func (c *Checksums) Generate(a *PackageArtifact) error {
return c.generateSHA256(a)
}
func (c Checksums) Compare(d Checksums) error {
for t, sum := range d {
if v, ok := c[t]; ok && v != sum {
return errors.New("Checksum mismsatch")
}
}
return nil
}
func (c *Checksums) generateSHA256(a *PackageArtifact) error {
return c.generateSum(a, HashOptions{Hasher: sha256.New(), Type: SHA256})
}
func (c *Checksums) generateSum(a *PackageArtifact, opts HashOptions) error {
f, err := os.Open(a.Path)
if err != nil {
return err
}
defer f.Close()
if _, err := io.Copy(opts.Hasher, f); err != nil {
return err
}
sum := fmt.Sprintf("%x", opts.Hasher.Sum(nil))
(*c)[string(opts.Type)] = sum
return nil
}

View File

@@ -19,7 +19,7 @@ import (
"io/ioutil"
"os"
. "github.com/mudler/luet/pkg/compiler/types/artifact"
. "github.com/mudler/luet/pkg/api/core/types/artifact"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -40,13 +40,13 @@ var _ = Describe("Checksum", func() {
Expect(len(definitionsum)).To(Equal(0))
Expect(len(definitionsum2)).To(Equal(0))
err = buildsum.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/build.yaml"))
err = buildsum.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/build.yaml"))
Expect(err).ToNot(HaveOccurred())
err = definitionsum.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/definition.yaml"))
err = definitionsum.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/definition.yaml"))
Expect(err).ToNot(HaveOccurred())
err = definitionsum2.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/definition.yaml"))
err = definitionsum2.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/definition.yaml"))
Expect(err).ToNot(HaveOccurred())
Expect(len(buildsum)).To(Equal(1))

View File

@@ -0,0 +1,472 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
// 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/mudler/luet/pkg/api/core/config"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
solver "github.com/mudler/luet/pkg/solver"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
)
var AvailableResolvers = strings.Join([]string{solver.QLearningResolverType}, " ")
type LuetLoggingConfig struct {
// Path of the logfile
Path string `mapstructure:"path"`
// Enable/Disable logging to file
EnableLogFile bool `mapstructure:"enable_logfile"`
// Enable JSON format logging in file
JsonFormat bool `mapstructure:"json_format"`
// Log level
Level LogLevel `mapstructure:"level"`
// Enable emoji
EnableEmoji bool `mapstructure:"enable_emoji"`
// Enable/Disable color in logging
Color bool `mapstructure:"color"`
}
type LuetGeneralConfig struct {
SameOwner bool `yaml:"same_owner,omitempty" mapstructure:"same_owner"`
Concurrency int `yaml:"concurrency,omitempty" mapstructure:"concurrency"`
Debug bool `yaml:"debug,omitempty" mapstructure:"debug"`
ShowBuildOutput bool `yaml:"show_build_output,omitempty" mapstructure:"show_build_output"`
FatalWarns bool `yaml:"fatal_warnings,omitempty" mapstructure:"fatal_warnings"`
HTTPTimeout int `yaml:"http_timeout,omitempty" mapstructure:"http_timeout"`
}
type LuetSolverOptions struct {
solver.Options `yaml:"options,omitempty"`
Type string `yaml:"type,omitempty" mapstructure:"type"`
LearnRate float32 `yaml:"rate,omitempty" mapstructure:"rate"`
Discount float32 `yaml:"discount,omitempty" mapstructure:"discount"`
MaxAttempts int `yaml:"max_attempts,omitempty" mapstructure:"max_attempts"`
Implementation solver.SolverType `yaml:"implementation,omitempty" mapstructure:"implementation"`
}
func (opts LuetSolverOptions) ResolverIsSet() bool {
switch opts.Type {
case solver.QLearningResolverType:
return true
default:
return false
}
}
func (opts LuetSolverOptions) Resolver() solver.PackageResolver {
switch opts.Type {
case solver.QLearningResolverType:
if opts.LearnRate != 0.0 {
return solver.NewQLearningResolver(opts.LearnRate, opts.Discount, opts.MaxAttempts, 999999)
}
return solver.SimpleQLearningSolver()
}
return &solver.Explainer{}
}
func (opts *LuetSolverOptions) CompactString() string {
return fmt.Sprintf("type: %s rate: %f, discount: %f, attempts: %d, initialobserved: %d",
opts.Type, opts.LearnRate, opts.Discount, opts.MaxAttempts, 999999)
}
type LuetSystemConfig struct {
DatabaseEngine string `yaml:"database_engine" mapstructure:"database_engine"`
DatabasePath string `yaml:"database_path" mapstructure:"database_path"`
Rootfs string `yaml:"rootfs" mapstructure:"rootfs"`
PkgsCachePath string `yaml:"pkgs_cache_path" mapstructure:"pkgs_cache_path"`
TmpDirBase string `yaml:"tmpdir_base" mapstructure:"tmpdir_base"`
}
func (s *LuetSystemConfig) SetRootFS(path string) error {
p, err := fileHelper.Rel2Abs(path)
if err != nil {
return err
}
s.Rootfs = p
return nil
}
func (sc *LuetSystemConfig) GetRepoDatabaseDirPath(name string) string {
dbpath := filepath.Join(sc.Rootfs, sc.DatabasePath)
dbpath = filepath.Join(dbpath, "repos/"+name)
err := os.MkdirAll(dbpath, os.ModePerm)
if err != nil {
panic(err)
}
return dbpath
}
func (sc *LuetSystemConfig) GetSystemRepoDatabaseDirPath() string {
dbpath := filepath.Join(sc.Rootfs,
sc.DatabasePath)
err := os.MkdirAll(dbpath, os.ModePerm)
if err != nil {
panic(err)
}
return dbpath
}
func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
var cachepath string
if sc.PkgsCachePath != "" {
cachepath = sc.PkgsCachePath
} else {
// Create dynamic cache for test suites
cachepath, _ = ioutil.TempDir(os.TempDir(), "cachepkgs")
}
if filepath.IsAbs(cachepath) {
ans = cachepath
} else {
ans = filepath.Join(sc.GetSystemRepoDatabaseDirPath(), cachepath)
}
return
}
func (sc *LuetSystemConfig) GetRootFsAbs() (string, error) {
return filepath.Abs(sc.Rootfs)
}
type LuetKV struct {
Key string `json:"key" yaml:"key" mapstructure:"key"`
Value string `json:"value" yaml:"value" mapstructure:"value"`
}
type LuetConfig struct {
Logging LuetLoggingConfig `yaml:"logging,omitempty" mapstructure:"logging"`
General LuetGeneralConfig `yaml:"general,omitempty" mapstructure:"general"`
System LuetSystemConfig `yaml:"system" mapstructure:"system"`
Solver LuetSolverOptions `yaml:"solver,omitempty" mapstructure:"solver"`
RepositoriesConfDir []string `yaml:"repos_confdir,omitempty" mapstructure:"repos_confdir"`
ConfigProtectConfDir []string `yaml:"config_protect_confdir,omitempty" mapstructure:"config_protect_confdir"`
ConfigProtectSkip bool `yaml:"config_protect_skip,omitempty" mapstructure:"config_protect_skip"`
ConfigFromHost bool `yaml:"config_from_host,omitempty" mapstructure:"config_from_host"`
SystemRepositories LuetRepositories `yaml:"repositories,omitempty" mapstructure:"repositories"`
FinalizerEnvs []LuetKV `json:"finalizer_envs,omitempty" yaml:"finalizer_envs,omitempty" mapstructure:"finalizer_envs,omitempty"`
ConfigProtectConfFiles []config.ConfigProtectConfFile `yaml:"-" mapstructure:"-"`
}
func (c *LuetConfig) GetSystemDB() pkg.PackageDatabase {
switch c.GetSystem().DatabaseEngine {
case "boltdb":
return pkg.NewBoltDatabase(
filepath.Join(c.GetSystem().GetSystemRepoDatabaseDirPath(), "luet.db"))
default:
return pkg.NewInMemoryDatabase(true)
}
}
func (c *LuetConfig) AddSystemRepository(r LuetRepository) {
c.SystemRepositories = append(c.SystemRepositories, r)
}
func (c *LuetConfig) GetFinalizerEnvsMap() map[string]string {
ans := make(map[string]string)
for _, kv := range c.FinalizerEnvs {
ans[kv.Key] = kv.Value
}
return ans
}
func (c *LuetConfig) SetFinalizerEnv(k, v string) {
keyPresent := false
envs := []LuetKV{}
for _, kv := range c.FinalizerEnvs {
if kv.Key == k {
keyPresent = true
envs = append(envs, LuetKV{Key: kv.Key, Value: v})
} else {
envs = append(envs, kv)
}
}
if !keyPresent {
envs = append(envs, LuetKV{Key: k, Value: v})
}
c.FinalizerEnvs = envs
}
func (c *LuetConfig) GetFinalizerEnvs() []string {
ans := []string{}
for _, kv := range c.FinalizerEnvs {
ans = append(ans, fmt.Sprintf("%s=%s", kv.Key, kv.Value))
}
return ans
}
func (c *LuetConfig) GetFinalizerEnv(k string) (string, error) {
keyNotPresent := true
ans := ""
for _, kv := range c.FinalizerEnvs {
if kv.Key == k {
keyNotPresent = false
ans = kv.Value
}
}
if keyNotPresent {
return "", errors.New("Finalizer key " + k + " not found")
}
return ans, nil
}
func (c *LuetConfig) GetLogging() *LuetLoggingConfig {
return &c.Logging
}
func (c *LuetConfig) GetGeneral() *LuetGeneralConfig {
return &c.General
}
func (c *LuetConfig) GetSystem() *LuetSystemConfig {
return &c.System
}
func (c *LuetConfig) GetSolverOptions() *LuetSolverOptions {
return &c.Solver
}
func (c *LuetConfig) YAML() ([]byte, error) {
return yaml.Marshal(c)
}
func (c *LuetConfig) GetConfigProtectConfFiles() []config.ConfigProtectConfFile {
return c.ConfigProtectConfFiles
}
func (c *LuetConfig) AddConfigProtectConfFile(file *config.ConfigProtectConfFile) {
if c.ConfigProtectConfFiles == nil {
c.ConfigProtectConfFiles = []config.ConfigProtectConfFile{*file}
} else {
c.ConfigProtectConfFiles = append(c.ConfigProtectConfFiles, *file)
}
}
func (c *LuetConfig) LoadRepositories(ctx *Context) error {
var regexRepo = regexp.MustCompile(`.yml$|.yaml$`)
var err error
rootfs := ""
// Respect the rootfs param on read repositories
if !c.ConfigFromHost {
rootfs, err = c.GetSystem().GetRootFsAbs()
if err != nil {
return err
}
}
for _, rdir := range c.RepositoriesConfDir {
rdir = filepath.Join(rootfs, rdir)
ctx.Debug("Parsing Repository Directory", rdir, "...")
files, err := ioutil.ReadDir(rdir)
if err != nil {
ctx.Debug("Skip dir", rdir, ":", err.Error())
continue
}
for _, file := range files {
if file.IsDir() {
continue
}
if !regexRepo.MatchString(file.Name()) {
ctx.Debug("File", file.Name(), "skipped.")
continue
}
content, err := ioutil.ReadFile(path.Join(rdir, file.Name()))
if err != nil {
ctx.Warning("On read file", file.Name(), ":", err.Error())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
r, err := LoadRepository(content)
if err != nil {
ctx.Warning("On parse file", file.Name(), ":", err.Error())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
if r.Name == "" || len(r.Urls) == 0 || r.Type == "" {
ctx.Warning("Invalid repository ", file.Name())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
c.AddSystemRepository(*r)
}
}
return nil
}
func (c *LuetConfig) GetSystemRepository(name string) (*LuetRepository, error) {
var ans *LuetRepository = nil
for idx, repo := range c.SystemRepositories {
if repo.Name == name {
ans = &c.SystemRepositories[idx]
break
}
}
if ans == nil {
return nil, errors.New("Repository " + name + " not found")
}
return ans, nil
}
func (c *LuetConfig) LoadConfigProtect(ctx *Context) error {
var regexConfs = regexp.MustCompile(`.yml$`)
var err error
rootfs := ""
// Respect the rootfs param on read repositories
if !c.ConfigFromHost {
rootfs, err = c.GetSystem().GetRootFsAbs()
if err != nil {
return err
}
}
for _, cdir := range c.ConfigProtectConfDir {
cdir = filepath.Join(rootfs, cdir)
ctx.Debug("Parsing Config Protect Directory", cdir, "...")
files, err := ioutil.ReadDir(cdir)
if err != nil {
ctx.Debug("Skip dir", cdir, ":", err.Error())
continue
}
for _, file := range files {
if file.IsDir() {
continue
}
if !regexConfs.MatchString(file.Name()) {
ctx.Debug("File", file.Name(), "skipped.")
continue
}
content, err := ioutil.ReadFile(path.Join(cdir, file.Name()))
if err != nil {
ctx.Warning("On read file", file.Name(), ":", err.Error())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
r, err := loadConfigProtectConFile(file.Name(), content)
if err != nil {
ctx.Warning("On parse file", file.Name(), ":", err.Error())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
if r.Name == "" || len(r.Directories) == 0 {
ctx.Warning("Invalid config protect file", file.Name())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
c.AddConfigProtectConfFile(r)
}
}
return nil
}
func loadConfigProtectConFile(filename string, data []byte) (*config.ConfigProtectConfFile, error) {
ans := config.NewConfigProtectConfFile(filename)
err := yaml.Unmarshal(data, &ans)
if err != nil {
return nil, err
}
return ans, nil
}
func (c *LuetLoggingConfig) SetLogLevel(s LogLevel) {
c.Level = s
}
func (c *LuetSystemConfig) InitTmpDir() error {
if !filepath.IsAbs(c.TmpDirBase) {
abs, err := fileHelper.Rel2Abs(c.TmpDirBase)
if err != nil {
return errors.Wrap(err, "while converting relative path to absolute path")
}
c.TmpDirBase = abs
}
if _, err := os.Stat(c.TmpDirBase); err != nil {
if os.IsNotExist(err) {
err = os.MkdirAll(c.TmpDirBase, os.ModePerm)
if err != nil {
return err
}
}
}
return nil
}
func (c *LuetSystemConfig) CleanupTmpDir() error {
return os.RemoveAll(c.TmpDirBase)
}
func (c *LuetSystemConfig) TempDir(pattern string) (string, error) {
err := c.InitTmpDir()
if err != nil {
return "", err
}
return ioutil.TempDir(c.TmpDirBase, pattern)
}
func (c *LuetSystemConfig) TempFile(pattern string) (*os.File, error) {
err := c.InitTmpDir()
if err != nil {
return nil, err
}
return ioutil.TempFile(c.TmpDirBase, pattern)
}

View File

@@ -0,0 +1,28 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestAPITypes(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Types Suite")
}

View File

@@ -0,0 +1,89 @@
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types_test
import (
"os"
"path/filepath"
"strings"
types "github.com/mudler/luet/pkg/api/core/types"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Config", func() {
Context("Load Repository1", func() {
ctx := types.NewContext()
ctx.Config.RepositoriesConfDir = []string{
"../../../../tests/fixtures/repos.conf.d",
}
err := ctx.Config.LoadRepositories(ctx)
It("Check Load Repository 1", func() {
Expect(err).Should(BeNil())
Expect(len(ctx.Config.SystemRepositories)).Should(Equal(2))
Expect(ctx.Config.SystemRepositories[0].Name).Should(Equal("test1"))
Expect(ctx.Config.SystemRepositories[0].Priority).Should(Equal(999))
Expect(ctx.Config.SystemRepositories[0].Type).Should(Equal("disk"))
Expect(len(ctx.Config.SystemRepositories[0].Urls)).Should(Equal(1))
Expect(ctx.Config.SystemRepositories[0].Urls[0]).Should(Equal("tests/repos/test1"))
})
It("Chec Load Repository 2", func() {
Expect(err).Should(BeNil())
Expect(len(ctx.Config.SystemRepositories)).Should(Equal(2))
Expect(ctx.Config.SystemRepositories[1].Name).Should(Equal("test2"))
Expect(ctx.Config.SystemRepositories[1].Priority).Should(Equal(1000))
Expect(ctx.Config.SystemRepositories[1].Type).Should(Equal("disk"))
Expect(len(ctx.Config.SystemRepositories[1].Urls)).Should(Equal(1))
Expect(ctx.Config.SystemRepositories[1].Urls[0]).Should(Equal("tests/repos/test2"))
})
})
Context("Simple temporary directory creation", func() {
It("Create Temporary directory", func() {
ctx := types.NewContext()
ctx.Config.GetSystem().TmpDirBase = os.TempDir() + "/tmpluet"
tmpDir, err := ctx.Config.GetSystem().TempDir("test1")
Expect(err).ToNot(HaveOccurred())
Expect(strings.HasPrefix(tmpDir, filepath.Join(os.TempDir(), "tmpluet"))).To(BeTrue())
Expect(fileHelper.Exists(tmpDir)).To(BeTrue())
defer os.RemoveAll(tmpDir)
})
It("Create Temporary file", func() {
ctx := types.NewContext()
ctx.Config.GetSystem().TmpDirBase = os.TempDir() + "/tmpluet"
tmpFile, err := ctx.Config.GetSystem().TempFile("testfile1")
Expect(err).ToNot(HaveOccurred())
Expect(strings.HasPrefix(tmpFile.Name(), filepath.Join(os.TempDir(), "tmpluet"))).To(BeTrue())
Expect(fileHelper.Exists(tmpFile.Name())).To(BeTrue())
defer os.Remove(tmpFile.Name())
})
})
})

View File

@@ -0,0 +1,415 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types
import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"github.com/kyokomi/emoji"
"github.com/mudler/luet/pkg/helpers/terminal"
"github.com/pkg/errors"
"github.com/pterm/pterm"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/term"
)
const (
ErrorLevel LogLevel = "error"
WarningLevel LogLevel = "warning"
InfoLevel LogLevel = "info"
SuccessLevel LogLevel = "success"
FatalLevel LogLevel = "fatal"
)
type Context struct {
context.Context
Config *LuetConfig
IsTerminal bool
NoSpinner bool
s *pterm.SpinnerPrinter
spinnerLock *sync.Mutex
z *zap.Logger
ProgressBar *pterm.ProgressbarPrinter
}
func NewContext() *Context {
return &Context{
spinnerLock: &sync.Mutex{},
IsTerminal: terminal.IsTerminal(os.Stdout),
Config: &LuetConfig{
ConfigFromHost: true,
Logging: LuetLoggingConfig{},
General: LuetGeneralConfig{},
System: LuetSystemConfig{
DatabasePath: filepath.Join("var", "db", "packages"),
TmpDirBase: filepath.Join(os.TempDir(), "tmpluet")},
Solver: LuetSolverOptions{},
},
s: pterm.DefaultSpinner.WithShowTimer(false).WithRemoveWhenDone(true),
}
}
func (c *Context) Copy() *Context {
configCopy := *c.Config
configCopy.System = *c.Config.GetSystem()
configCopy.General = *c.Config.GetGeneral()
configCopy.Logging = *c.Config.GetLogging()
ctx := *c
ctxCopy := &ctx
ctxCopy.Config = &configCopy
return ctxCopy
}
// GetTerminalSize returns the width and the height of the active terminal.
func (c *Context) GetTerminalSize() (width, height int, err error) {
w, h, err := term.GetSize(int(os.Stdout.Fd()))
if w <= 0 {
w = 0
}
if h <= 0 {
h = 0
}
if err != nil {
err = errors.New("size not detectable")
}
return w, h, err
}
func (c *Context) Init() (err error) {
if c.IsTerminal {
if !c.Config.Logging.Color {
c.Debug("Disabling colors")
c.NoColor()
}
} else {
c.Debug("Not a terminal, disabling colors")
c.NoColor()
}
c.Debug("Colors", c.Config.GetLogging().Color)
c.Debug("Logging level", c.Config.GetLogging().Level)
c.Debug("Debug mode", c.Config.GetGeneral().Debug)
if c.Config.GetLogging().EnableLogFile && c.Config.GetLogging().Path != "" {
// Init zap logger
err = c.InitZap()
if err != nil {
return
}
}
// Load repositories
err = c.Config.LoadRepositories(c)
if err != nil {
return
}
return
}
func (c *Context) NoColor() {
pterm.DisableColor()
}
func (c *Context) Ask() bool {
var input string
c.Info("Do you want to continue with this operation? [y/N]: ")
_, err := fmt.Scanln(&input)
if err != nil {
return false
}
input = strings.ToLower(input)
if input == "y" || input == "yes" {
return true
}
return false
}
func (c *Context) InitZap() error {
var err error
if c.z == nil {
// TODO: test permission for open logfile.
cfg := zap.NewProductionConfig()
cfg.OutputPaths = []string{c.Config.GetLogging().Path}
cfg.Level = c.Config.GetLogging().Level.ZapLevel()
cfg.ErrorOutputPaths = []string{}
if c.Config.GetLogging().JsonFormat {
cfg.Encoding = "json"
} else {
cfg.Encoding = "console"
}
cfg.DisableCaller = true
cfg.DisableStacktrace = true
cfg.EncoderConfig.TimeKey = "time"
cfg.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
c.z, err = cfg.Build()
if err != nil {
fmt.Fprint(os.Stderr, "Error on initialize file logger: "+err.Error()+"\n")
return err
}
}
return nil
}
// Spinner starts the spinner
func (c *Context) Spinner() {
if !c.IsTerminal || c.NoSpinner {
return
}
c.spinnerLock.Lock()
defer c.spinnerLock.Unlock()
var confLevel int
if c.Config.GetGeneral().Debug {
confLevel = 3
} else {
confLevel = c.Config.GetLogging().Level.ToNumber()
}
if 2 > confLevel {
return
}
if !c.s.IsActive {
c.s, _ = c.s.Start()
}
}
func (c *Context) Screen(text string) {
pterm.DefaultHeader.WithBackgroundStyle(pterm.NewStyle(pterm.BgLightBlue)).WithMargin(2).Println(text)
//pterm.DefaultCenter.Print(pterm.DefaultHeader.WithFullWidth().WithBackgroundStyle(pterm.NewStyle(pterm.BgLightBlue)).WithMargin(10).Sprint(text))
}
func (c *Context) SpinnerText(suffix, prefix string) {
if !c.IsTerminal || c.NoSpinner {
return
}
c.spinnerLock.Lock()
defer c.spinnerLock.Unlock()
if c.Config.GetGeneral().Debug {
fmt.Printf("%s %s\n",
suffix, prefix,
)
} else {
c.s.UpdateText(suffix + prefix)
}
}
func (c *Context) SpinnerStop() {
if !c.IsTerminal {
return
}
c.spinnerLock.Lock()
defer c.spinnerLock.Unlock()
var confLevel int
if c.Config.GetGeneral().Debug {
confLevel = 3
} else {
confLevel = c.Config.GetLogging().Level.ToNumber()
}
if 2 > confLevel {
return
}
if c.s != nil {
c.s.Success()
}
}
func (c *Context) log2File(level LogLevel, msg string) {
switch level {
case FatalLevel:
c.z.Fatal(msg)
case ErrorLevel:
c.z.Error(msg)
case WarningLevel:
c.z.Warn(msg)
case InfoLevel, SuccessLevel:
c.z.Info(msg)
default:
c.z.Debug(msg)
}
}
func (c *Context) Msg(level LogLevel, ln bool, msg ...interface{}) {
var message string
var confLevel, msgLevel int
if c.Config.GetGeneral().Debug {
confLevel = 3
pterm.EnableDebugMessages()
} else {
confLevel = c.Config.GetLogging().Level.ToNumber()
}
msgLevel = level.ToNumber()
if msgLevel > confLevel {
return
}
for _, m := range msg {
message += " " + fmt.Sprintf("%v", m)
}
// Color message
levelMsg := message
if c.Config.GetLogging().Color {
switch level {
case WarningLevel:
levelMsg = pterm.LightYellow(":construction: warning" + message)
case InfoLevel:
levelMsg = message
case SuccessLevel:
levelMsg = pterm.LightGreen(message)
case ErrorLevel:
levelMsg = pterm.Red(message)
default:
levelMsg = pterm.Blue(message)
}
}
// Strip emoji if needed
if c.Config.GetLogging().EnableEmoji && c.IsTerminal {
levelMsg = emoji.Sprint(levelMsg)
} else {
re := regexp.MustCompile(`[:][\w]+[:]`)
levelMsg = re.ReplaceAllString(levelMsg, "")
}
if c.z != nil {
c.log2File(level, message)
}
// Print the message based on the level
switch level {
case SuccessLevel:
if ln {
pterm.Success.Println(levelMsg)
} else {
pterm.Success.Print(levelMsg)
}
case InfoLevel:
if ln {
pterm.Info.Println(levelMsg)
} else {
pterm.Info.Print(levelMsg)
}
case WarningLevel:
if ln {
pterm.Warning.Println(levelMsg)
} else {
pterm.Warning.Print(levelMsg)
}
case ErrorLevel:
if ln {
pterm.Error.Println(levelMsg)
} else {
pterm.Error.Print(levelMsg)
}
case FatalLevel:
if ln {
pterm.Fatal.Println(levelMsg)
} else {
pterm.Fatal.Print(levelMsg)
}
default:
if ln {
pterm.Debug.Println(levelMsg)
} else {
pterm.Debug.Print(levelMsg)
}
}
}
func (c *Context) Warning(mess ...interface{}) {
c.Msg("warning", true, mess...)
if c.Config.GetGeneral().FatalWarns {
os.Exit(2)
}
}
func (c *Context) Debug(mess ...interface{}) {
pc, file, line, ok := runtime.Caller(1)
if ok {
mess = append([]interface{}{fmt.Sprintf("(%s:#%d:%v)",
path.Base(file), line, runtime.FuncForPC(pc).Name())}, mess...)
}
c.Msg("debug", true, mess...)
}
func (c *Context) Info(mess ...interface{}) {
c.Msg("info", true, mess...)
}
func (c *Context) Success(mess ...interface{}) {
c.Msg("success", true, mess...)
}
func (c *Context) Error(mess ...interface{}) {
c.Msg("error", true, mess...)
}
func (c *Context) Fatal(mess ...interface{}) {
c.Error(mess...)
os.Exit(1)
}
type LogLevel string
func (level LogLevel) ToNumber() int {
switch level {
case ErrorLevel, FatalLevel:
return 0
case WarningLevel:
return 1
case InfoLevel, SuccessLevel:
return 2
default: // debug
return 3
}
}
func (level LogLevel) ZapLevel() zap.AtomicLevel {
switch level {
case FatalLevel:
return zap.NewAtomicLevelAt(zap.FatalLevel)
case ErrorLevel:
return zap.NewAtomicLevelAt(zap.ErrorLevel)
case WarningLevel:
return zap.NewAtomicLevelAt(zap.WarnLevel)
case InfoLevel, SuccessLevel:
return zap.NewAtomicLevelAt(zap.InfoLevel)
default:
return zap.NewAtomicLevelAt(zap.DebugLevel)
}
}

View File

@@ -0,0 +1,125 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types_test
import (
"io"
"io/ioutil"
"os"
"github.com/gookit/color"
types "github.com/mudler/luet/pkg/api/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func captureStdout(f func(w io.Writer)) string {
originalStdout := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
color.SetOutput(w)
f(w)
_ = w.Close()
out, _ := ioutil.ReadAll(r)
os.Stdout = originalStdout
color.SetOutput(os.Stdout)
_ = r.Close()
return string(out)
}
var _ = Describe("Context and logging", func() {
ctx := types.NewContext()
BeforeEach(func() {
ctx = types.NewContext()
})
Context("LogLevel", func() {
It("converts it correctly to number and zaplog", func() {
Expect(types.ErrorLevel.ToNumber()).To(Equal(0))
Expect(types.InfoLevel.ToNumber()).To(Equal(2))
Expect(types.WarningLevel.ToNumber()).To(Equal(1))
Expect(types.LogLevel("foo").ToNumber()).To(Equal(3))
Expect(types.WarningLevel.ZapLevel().String()).To(Equal("warn"))
Expect(types.InfoLevel.ZapLevel().String()).To(Equal("info"))
Expect(types.ErrorLevel.ZapLevel().String()).To(Equal("error"))
Expect(types.FatalLevel.ZapLevel().String()).To(Equal("fatal"))
Expect(types.LogLevel("foo").ZapLevel().String()).To(Equal("debug"))
})
})
Context("Context", func() {
It("detect if is a terminal", func() {
Expect(captureStdout(func(w io.Writer) {
_, _, err := ctx.GetTerminalSize()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("size not detectable"))
os.Stdout.Write([]byte(err.Error()))
})).To(ContainSubstring("size not detectable"))
})
It("respects loglevel", func() {
ctx.Config.GetGeneral().Debug = false
Expect(captureStdout(func(w io.Writer) {
ctx.Debug("")
})).To(Equal(""))
ctx.Config.GetGeneral().Debug = true
Expect(captureStdout(func(w io.Writer) {
ctx.Debug("foo")
})).To(ContainSubstring("foo"))
})
It("logs to file", func() {
ctx.NoColor()
t, err := ioutil.TempFile("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(t.Name()) // clean up
ctx.Config.GetLogging().EnableLogFile = true
ctx.Config.GetLogging().Path = t.Name()
ctx.Init()
Expect(captureStdout(func(w io.Writer) {
ctx.Info("foot")
})).To(And(ContainSubstring("INFO"), ContainSubstring("foot")))
Expect(captureStdout(func(w io.Writer) {
ctx.Success("test")
})).To(And(ContainSubstring("SUCCESS"), ContainSubstring("test")))
Expect(captureStdout(func(w io.Writer) {
ctx.Error("foobar")
})).To(And(ContainSubstring("ERROR"), ContainSubstring("foobar")))
Expect(captureStdout(func(w io.Writer) {
ctx.Warning("foowarn")
})).To(And(ContainSubstring("WARNING"), ContainSubstring("foowarn")))
l, err := ioutil.ReadFile(t.Name())
Expect(err).ToNot(HaveOccurred())
logs := string(l)
Expect(logs).To(ContainSubstring("foot"))
Expect(logs).To(ContainSubstring("test"))
Expect(logs).To(ContainSubstring("foowarn"))
Expect(logs).To(ContainSubstring("foobar"))
})
})
})

View File

@@ -0,0 +1,96 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types
import (
"fmt"
"runtime"
"gopkg.in/yaml.v2"
)
type LuetRepository struct {
Name string `json:"name" yaml:"name" mapstructure:"name"`
Description string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description"`
Urls []string `json:"urls" yaml:"urls" mapstructure:"urls"`
Type string `json:"type" yaml:"type" mapstructure:"type"`
Mode string `json:"mode,omitempty" yaml:"mode,omitempty" mapstructure:"mode,omitempty"`
Priority int `json:"priority,omitempty" yaml:"priority,omitempty" mapstructure:"priority"`
Enable bool `json:"enable" yaml:"enable" mapstructure:"enable"`
Cached bool `json:"cached,omitempty" yaml:"cached,omitempty" mapstructure:"cached,omitempty"`
Authentication map[string]string `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"`
TreePath string `json:"treepath,omitempty" yaml:"treepath,omitempty" mapstructure:"treepath"`
MetaPath string `json:"metapath,omitempty" yaml:"metapath,omitempty" mapstructure:"metapath"`
Verify bool `json:"verify,omitempty" yaml:"verify,omitempty" mapstructure:"verify"`
Arch string `json:"arch,omitempty" yaml:"arch,omitempty" mapstructure:"arch"`
ReferenceID string `json:"reference,omitempty" yaml:"reference,omitempty" mapstructure:"reference"`
// Incremented value that identify revision of the repository in a user-friendly way.
Revision int `json:"revision,omitempty" yaml:"-" mapstructure:"-"`
// Epoch time in seconds
LastUpdate string `json:"last_update,omitempty" yaml:"-" mapstructure:"-"`
}
func (r *LuetRepository) String() string {
return fmt.Sprintf("[%s] prio: %d, type: %s, enable: %t, cached: %t",
r.Name, r.Priority, r.Type, r.Enable, r.Cached)
}
// Enabled returns a boolean indicating if the repository should be considered enabled or not
func (r *LuetRepository) Enabled() bool {
return r.Arch != "" && r.Arch == runtime.GOARCH && !r.Enable || r.Enable
}
type LuetRepositories []LuetRepository
func (l LuetRepositories) Enabled() (res LuetRepositories) {
for _, r := range l {
if r.Enabled() {
res = append(res, r)
}
}
return
}
func NewLuetRepository(name, t, descr string, urls []string, priority int, enable, cached bool) *LuetRepository {
return &LuetRepository{
Name: name,
Description: descr,
Urls: urls,
Type: t,
Priority: priority,
Enable: enable,
Cached: cached,
Authentication: make(map[string]string),
}
}
func NewEmptyLuetRepository() *LuetRepository {
return &LuetRepository{
Priority: 9999,
Authentication: make(map[string]string),
}
}
func LoadRepository(data []byte) (*LuetRepository, error) {
ans := NewEmptyLuetRepository()
err := yaml.Unmarshal(data, &ans)
if err != nil {
return nil, err
}
return ans, nil
}

View File

@@ -0,0 +1,49 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types_test
import (
"runtime"
types "github.com/mudler/luet/pkg/api/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Types", func() {
Context("Repository detects underlying arch", func() {
It("is enabled if arch is matching", func() {
r := types.LuetRepository{Arch: runtime.GOARCH}
Expect(r.Enabled()).To(BeTrue())
})
It("is disabled if arch is NOT matching", func() {
r := types.LuetRepository{Arch: "foo"}
Expect(r.Enabled()).To(BeFalse())
})
It("is enabled if arch is NOT matching and enabled is true", func() {
r := types.LuetRepository{Arch: "foo", Enable: true}
Expect(r.Enabled()).To(BeTrue())
})
It("enabled is true", func() {
r := types.LuetRepository{Enable: true}
Expect(r.Enabled()).To(BeTrue())
})
It("enabled is false", func() {
r := types.LuetRepository{Enable: false}
Expect(r.Enabled()).To(BeFalse())
})
})
})

View File

@@ -23,9 +23,9 @@ import (
"strings"
"syscall"
"github.com/pkg/errors"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
helpers "github.com/mudler/luet/pkg/helpers"
"github.com/pkg/errors"
)
type Box interface {
@@ -107,7 +107,7 @@ func (b *DefaultBox) Exec() error {
func (b *DefaultBox) Run() error {
if !helpers.Exists(b.Root) {
if !fileHelper.Exists(b.Root) {
return errors.New(b.Root + " does not exist")
}

View File

@@ -1,67 +0,0 @@
package bus
import (
"github.com/mudler/go-pluggable"
)
var (
// Package events
// EventPackageInstall is the event fired when a new package is being installed
EventPackageInstall pluggable.EventType = "package.install"
// EventPackageUnInstall is the event fired when a new package is being uninstalled
EventPackageUnInstall pluggable.EventType = "package.uninstall"
// Package build
// EventPackagePreBuild is the event fired before a package is being built
EventPackagePreBuild pluggable.EventType = "package.pre.build"
// EventPackagePreBuildArtifact is the event fired before a package tarball is being generated
EventPackagePreBuildArtifact pluggable.EventType = "package.pre.build_artifact"
// EventPackagePostBuildArtifact is the event fired after a package tarball is generated
EventPackagePostBuildArtifact pluggable.EventType = "package.post.build_artifact"
// EventPackagePostBuild is the event fired after a package was built
EventPackagePostBuild pluggable.EventType = "package.post.build"
// Image build
// EventImagePreBuild is the event fired before a image is being built
EventImagePreBuild pluggable.EventType = "image.pre.build"
// EventImagePrePull is the event fired before a image is being pulled
EventImagePrePull pluggable.EventType = "image.pre.pull"
// EventImagePrePush is the event fired before a image is being pushed
EventImagePrePush pluggable.EventType = "image.pre.push"
// EventImagePostBuild is the event fired after an image is being built
EventImagePostBuild pluggable.EventType = "image.post.build"
// EventImagePostPull is the event fired after an image is being pulled
EventImagePostPull pluggable.EventType = "image.post.pull"
// EventImagePostPush is the event fired after an image is being pushed
EventImagePostPush pluggable.EventType = "image.post.push"
// Repository events
// EventRepositoryPreBuild is the event fired before a repository is being built
EventRepositoryPreBuild pluggable.EventType = "repository.pre.build"
// EventRepositoryPostBuild is the event fired after a repository was built
EventRepositoryPostBuild pluggable.EventType = "repository.post.build"
)
// Manager is the bus instance manager, which subscribes plugins to events emitted by Luet
var Manager *pluggable.Manager = pluggable.NewManager(
[]pluggable.EventType{
EventPackageInstall,
EventPackageUnInstall,
EventPackagePreBuild,
EventPackagePreBuildArtifact,
EventPackagePostBuildArtifact,
EventPackagePostBuild,
EventRepositoryPreBuild,
EventRepositoryPostBuild,
EventImagePreBuild,
EventImagePrePull,
EventImagePrePush,
EventImagePostBuild,
EventImagePostPull,
EventImagePostPush,
},
)

View File

@@ -1,29 +1,21 @@
package compiler
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/config"
"github.com/pkg/errors"
. "github.com/mudler/luet/pkg/logger"
)
func NewBackend(s string) (CompilerBackend, error) {
func NewBackend(ctx *types.Context, s string) (CompilerBackend, error) {
var compilerBackend CompilerBackend
switch s {
case backend.ImgBackend:
compilerBackend = backend.NewSimpleImgBackend()
compilerBackend = backend.NewSimpleImgBackend(ctx)
case backend.DockerBackend:
compilerBackend = backend.NewSimpleDockerBackend()
compilerBackend = backend.NewSimpleDockerBackend(ctx)
default:
return nil, errors.New("invalid backend. Unsupported")
}
@@ -34,9 +26,9 @@ func NewBackend(s string) (CompilerBackend, error) {
type CompilerBackend interface {
BuildImage(backend.Options) error
ExportImage(backend.Options) error
LoadImage(string) error
RemoveImage(backend.Options) error
ImageDefinitionToTar(backend.Options) error
ExtractRootfs(opts backend.Options, keepPerms bool) error
CopyImage(string, string) error
DownloadImage(opts backend.Options) error
@@ -44,204 +36,6 @@ type CompilerBackend interface {
Push(opts backend.Options) error
ImageAvailable(string) bool
ImageReference(img1 string, ondisk bool) (v1.Image, error)
ImageExists(string) bool
}
// GenerateChanges generates changes between two images using a backend by leveraging export/extractrootfs methods
// example of json return: [
// {
// "Image1": "luet/base",
// "Image2": "alpine",
// "DiffType": "File",
// "Diff": {
// "Adds": null,
// "Dels": [
// {
// "Name": "/luetbuild",
// "Size": 5830706
// },
// {
// "Name": "/luetbuild/Dockerfile",
// "Size": 50
// },
// {
// "Name": "/luetbuild/output1",
// "Size": 5830656
// }
// ],
// "Mods": null
// }
// }
// ]
func GenerateChanges(b CompilerBackend, fromImage, toImage backend.Options) ([]artifact.ArtifactLayer, error) {
res := artifact.ArtifactLayer{FromImage: fromImage.ImageName, ToImage: toImage.ImageName}
tmpdiffs, err := config.LuetCfg.GetSystem().TempDir("extraction")
if err != nil {
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(tmpdiffs) // clean up
srcRootFS, err := ioutil.TempDir(tmpdiffs, "src")
if err != nil {
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(srcRootFS) // clean up
dstRootFS, err := ioutil.TempDir(tmpdiffs, "dst")
if err != nil {
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(dstRootFS) // clean up
srcImageExtract := backend.Options{
ImageName: fromImage.ImageName,
Destination: srcRootFS,
}
Debug("Extracting source image", fromImage.ImageName)
err = b.ExtractRootfs(srcImageExtract, false) // No need to keep permissions as we just collect file diffs
if err != nil {
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking src image "+fromImage.ImageName)
}
dstImageExtract := backend.Options{
ImageName: toImage.ImageName,
Destination: dstRootFS,
}
Debug("Extracting destination image", toImage.ImageName)
err = b.ExtractRootfs(dstImageExtract, false)
if err != nil {
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking dst image "+toImage.ImageName)
}
// Get Additions/Changes. dst -> src
err = filepath.Walk(dstRootFS, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
realpath := strings.Replace(path, dstRootFS, "", -1)
fileInfo, err := os.Lstat(filepath.Join(srcRootFS, realpath))
if err == nil {
var sizeA, sizeB int64
sizeA = fileInfo.Size()
if s, err := os.Lstat(filepath.Join(dstRootFS, realpath)); err == nil {
sizeB = s.Size()
}
if sizeA != sizeB {
// fmt.Println("File changed", path, filepath.Join(srcRootFS, realpath))
res.Diffs.Changes = append(res.Diffs.Changes, artifact.ArtifactNode{
Name: filepath.Join("/", realpath),
Size: int(sizeB),
})
} else {
// fmt.Println("File already exists", path, filepath.Join(srcRootFS, realpath))
}
} else {
var sizeB int64
if s, err := os.Lstat(filepath.Join(dstRootFS, realpath)); err == nil {
sizeB = s.Size()
}
res.Diffs.Additions = append(res.Diffs.Additions, artifact.ArtifactNode{
Name: filepath.Join("/", realpath),
Size: int(sizeB),
})
// fmt.Println("File created", path, filepath.Join(srcRootFS, realpath))
}
return nil
})
if err != nil {
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image destination")
}
// Get deletions. src -> dst
err = filepath.Walk(srcRootFS, func(path string, info os.FileInfo, err error) error {
if info.IsDir() {
return nil
}
realpath := strings.Replace(path, srcRootFS, "", -1)
if _, err = os.Lstat(filepath.Join(dstRootFS, realpath)); err != nil {
// fmt.Println("File deleted", path, filepath.Join(srcRootFS, realpath))
res.Diffs.Deletions = append(res.Diffs.Deletions, artifact.ArtifactNode{
Name: filepath.Join("/", realpath),
})
}
return nil
})
if err != nil {
return []artifact.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image source")
}
diffs := []artifact.ArtifactLayer{res}
if config.LuetCfg.GetGeneral().Debug {
summary := ComputeArtifactLayerSummary(diffs)
for _, l := range summary.Layers {
Debug(fmt.Sprintf("Diff %s -> %s: add %d (%d bytes), del %d (%d bytes), change %d (%d bytes)",
l.FromImage, l.ToImage,
l.AddFiles, l.AddSizes,
l.DelFiles, l.DelSizes,
l.ChangeFiles, l.ChangeSizes))
}
}
return diffs, nil
}
type ArtifactLayerSummary struct {
FromImage string `json:"image1"`
ToImage string `json:"image2"`
AddFiles int `json:"add_files"`
AddSizes int64 `json:"add_sizes"`
DelFiles int `json:"del_files"`
DelSizes int64 `json:"del_sizes"`
ChangeFiles int `json:"change_files"`
ChangeSizes int64 `json:"change_sizes"`
}
type ArtifactLayersSummary struct {
Layers []ArtifactLayerSummary `json:"summary"`
}
func ComputeArtifactLayerSummary(diffs []artifact.ArtifactLayer) ArtifactLayersSummary {
ans := ArtifactLayersSummary{
Layers: make([]ArtifactLayerSummary, 0),
}
for _, layer := range diffs {
sum := ArtifactLayerSummary{
FromImage: layer.FromImage,
ToImage: layer.ToImage,
AddFiles: 0,
AddSizes: 0,
DelFiles: 0,
DelSizes: 0,
ChangeFiles: 0,
ChangeSizes: 0,
}
for _, a := range layer.Diffs.Additions {
sum.AddFiles++
sum.AddSizes += int64(a.Size)
}
for _, d := range layer.Diffs.Deletions {
sum.DelFiles++
sum.DelSizes += int64(d.Size)
}
for _, c := range layer.Diffs.Changes {
sum.ChangeFiles++
sum.ChangeSizes += int64(c.Size)
}
ans.Layers = append(ans.Layers, sum)
}
return ans
}

View File

@@ -18,15 +18,11 @@ package backend_test
import (
"testing"
. "github.com/mudler/luet/cmd"
config "github.com/mudler/luet/pkg/config"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestSolver(t *testing.T) {
RegisterFailHandler(Fail)
LoadConfig(config.LuetCfg)
RunSpecs(t, "Backend Suite")
}

View File

@@ -18,8 +18,7 @@ package backend
import (
"os/exec"
"github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/pkg/errors"
@@ -44,17 +43,17 @@ type Options struct {
BackendArgs []string
}
func runCommand(cmd *exec.Cmd) error {
func runCommand(ctx *types.Context, cmd *exec.Cmd) error {
output := ""
buffered := !config.LuetCfg.GetGeneral().ShowBuildOutput
writer := NewBackendWriter(buffered)
buffered := !ctx.Config.GetGeneral().ShowBuildOutput
writer := NewBackendWriter(buffered, ctx)
cmd.Stdout = writer
cmd.Stderr = writer
if buffered {
Spinner(22)
defer SpinnerStop()
ctx.Spinner()
defer ctx.SpinnerStop()
}
err := cmd.Start()

View File

@@ -1,4 +1,4 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
@@ -16,70 +16,81 @@
package backend
import (
"encoding/json"
"io/ioutil"
"os"
"io"
"os/exec"
"path/filepath"
"strings"
bus "github.com/mudler/luet/pkg/bus"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/daemon"
"github.com/google/go-containerregistry/pkg/v1/tarball"
bus "github.com/mudler/luet/pkg/api/core/bus"
"github.com/mudler/luet/pkg/api/core/types"
capi "github.com/mudler/docker-companion/api"
"github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/pkg/errors"
)
type SimpleDocker struct{}
type SimpleDocker struct {
ctx *types.Context
}
func NewSimpleDockerBackend() *SimpleDocker {
return &SimpleDocker{}
func NewSimpleDockerBackend(ctx *types.Context) *SimpleDocker {
return &SimpleDocker{ctx: ctx}
}
// TODO: Missing still: labels, and build args expansion
func (*SimpleDocker) BuildImage(opts Options) error {
func (s *SimpleDocker) BuildImage(opts Options) error {
name := opts.ImageName
bus.Manager.Publish(bus.EventImagePreBuild, opts)
buildarg := genBuildCommand(opts)
Info(":whale2: Building image " + name)
s.ctx.Info(":whale2: Building image " + name)
cmd := exec.Command("docker", buildarg...)
cmd.Dir = opts.SourcePath
err := runCommand(cmd)
err := runCommand(s.ctx, cmd)
if err != nil {
return err
}
Info(":whale: Building image " + name + " done")
s.ctx.Success(":whale: Building image " + name + " done")
bus.Manager.Publish(bus.EventImagePostBuild, opts)
return nil
}
func (*SimpleDocker) CopyImage(src, dst string) error {
Debug(":whale: Tagging image:", src, "->", dst)
func (s *SimpleDocker) CopyImage(src, dst string) error {
s.ctx.Debug(":whale: Tagging image:", src, "->", dst)
cmd := exec.Command("docker", "tag", src, dst)
out, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed tagging image: "+string(out))
}
Info(":whale: Tagged image:", src, "->", dst)
s.ctx.Success(":whale: Tagged image:", src, "->", dst)
return nil
}
func (*SimpleDocker) DownloadImage(opts Options) error {
func (s *SimpleDocker) LoadImage(path string) error {
s.ctx.Debug(":whale: Loading image:", path)
cmd := exec.Command("docker", "load", "-i", path)
out, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed loading image: "+string(out))
}
s.ctx.Success(":whale: Loaded image:", path)
return nil
}
func (s *SimpleDocker) DownloadImage(opts Options) error {
name := opts.ImageName
bus.Manager.Publish(bus.EventImagePrePull, opts)
buildarg := []string{"pull", name}
Debug(":whale: Downloading image " + name)
s.ctx.Debug(":whale: Downloading image " + name)
Spinner(22)
defer SpinnerStop()
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
cmd := exec.Command("docker", buildarg...)
out, err := cmd.CombinedOutput()
@@ -87,20 +98,20 @@ func (*SimpleDocker) DownloadImage(opts Options) error {
return errors.Wrap(err, "Failed pulling image: "+string(out))
}
Info(":whale: Downloaded image:", name)
s.ctx.Success(":whale: Downloaded image:", name)
bus.Manager.Publish(bus.EventImagePostPull, opts)
return nil
}
func (*SimpleDocker) ImageExists(imagename string) bool {
func (s *SimpleDocker) ImageExists(imagename string) bool {
buildarg := []string{"inspect", "--type=image", imagename}
Debug(":whale: Checking existance of docker image: " + imagename)
s.ctx.Debug(":whale: Checking existance of docker image: " + imagename)
cmd := exec.Command("docker", buildarg...)
out, err := cmd.CombinedOutput()
if err != nil {
Debug("Image not present")
Debug(string(out))
s.ctx.Debug("Image not present")
s.ctx.Debug(string(out))
return false
}
return true
@@ -110,37 +121,99 @@ func (*SimpleDocker) ImageAvailable(imagename string) bool {
return imageAvailable(imagename)
}
func (*SimpleDocker) RemoveImage(opts Options) error {
func (s *SimpleDocker) RemoveImage(opts Options) error {
name := opts.ImageName
buildarg := []string{"rmi", name}
out, err := exec.Command("docker", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed removing image: "+string(out))
}
Info(":whale: Removed image:", name)
s.ctx.Success(":whale: Removed image:", name)
//Info(string(out))
return nil
}
func (*SimpleDocker) Push(opts Options) error {
func (s *SimpleDocker) Push(opts Options) error {
name := opts.ImageName
pusharg := []string{"push", name}
bus.Manager.Publish(bus.EventImagePrePush, opts)
Spinner(22)
defer SpinnerStop()
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("docker", pusharg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed pushing image: "+string(out))
}
Info(":whale: Pushed image:", name)
s.ctx.Success(":whale: Pushed image:", name)
bus.Manager.Publish(bus.EventImagePostPush, opts)
//Info(string(out))
return nil
}
func (s *SimpleDocker) imagefromDaemon(a string) (v1.Image, error) {
ref, err := name.ParseReference(a)
if err != nil {
return nil, err
}
img, err := daemon.Image(ref, daemon.WithUnbufferedOpener())
if err != nil {
return nil, err
}
return img, nil
}
// TODO: Make it possible optionally to use this?
// It might be unsafer, as it relies on the pipe.
// imageFromCLIPipe returns a new image from a tarball by providing a reader from the docker stdout pipe.
// See also daemon.Image implementation below for an example (which returns the tarball stream
// from the HTTP api endpoint instead ).
func (s *SimpleDocker) imageFromCLIPipe(a string) (v1.Image, error) {
return tarball.Image(func() (io.ReadCloser, error) {
buildarg := []string{"save", a}
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
c := exec.Command("docker", buildarg...)
p, err := c.StdoutPipe()
if err != nil {
return nil, err
}
err = c.Start()
if err != nil {
return nil, err
}
go func() { c.Wait() }()
return p, nil
}, nil)
}
func (s *SimpleDocker) imageFromDisk(a string) (v1.Image, error) {
f, err := s.ctx.Config.GetSystem().TempFile("snapshot")
if err != nil {
return nil, err
}
buildarg := []string{"save", a, "-o", f.Name()}
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("docker", buildarg...).CombinedOutput()
if err != nil {
return nil, errors.Wrap(err, "Failed saving image: "+string(out))
}
return crane.Load(f.Name())
}
func (s *SimpleDocker) ImageReference(a string, ondisk bool) (v1.Image, error) {
if ondisk {
return s.imageFromDisk(a)
}
return s.imagefromDaemon(a)
}
func (s *SimpleDocker) ImageDefinitionToTar(opts Options) error {
if err := s.BuildImage(opts); err != nil {
return errors.Wrap(err, "Failed building image")
@@ -154,120 +227,25 @@ func (s *SimpleDocker) ImageDefinitionToTar(opts Options) error {
return nil
}
func (*SimpleDocker) ExportImage(opts Options) error {
func (s *SimpleDocker) ExportImage(opts Options) error {
name := opts.ImageName
path := opts.Destination
buildarg := []string{"save", name, "-o", path}
Debug(":whale: Saving image " + name)
s.ctx.Debug(":whale: Saving image " + name)
Spinner(22)
defer SpinnerStop()
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("docker", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed exporting image: "+string(out))
}
Debug(":whale: Exported image:", name)
s.ctx.Debug(":whale: Exported image:", name)
return nil
}
type ManifestEntry struct {
Layers []string `json:"Layers"`
}
func (b *SimpleDocker) ExtractRootfs(opts Options, keepPerms bool) error {
name := opts.ImageName
dst := opts.Destination
tempexport, err := ioutil.TempDir(dst, "tmprootfs")
if err != nil {
return errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(tempexport) // clean up
imageExport := filepath.Join(tempexport, "image.tar")
Spinner(22)
defer SpinnerStop()
if err := b.ExportImage(Options{ImageName: name, Destination: imageExport}); err != nil {
return errors.Wrap(err, "failed while extracting rootfs for "+name)
}
SpinnerStop()
src := imageExport
if src == "" && opts.ImageName != "" {
tempUnpack, err := ioutil.TempDir(dst, "tempUnpack")
if err != nil {
return errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(tempUnpack) // clean up
imageExport := filepath.Join(tempUnpack, "image.tar")
if err := b.ExportImage(Options{ImageName: opts.ImageName, Destination: imageExport}); err != nil {
return errors.Wrap(err, "while exporting image before extraction")
}
src = imageExport
}
rootfs, err := ioutil.TempDir(dst, "tmprootfs")
if err != nil {
return errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(rootfs) // clean up
// TODO: Following as option if archive as output?
// archive, err := ioutil.TempDir(os.TempDir(), "archive")
// if err != nil {
// return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
// }
// defer os.RemoveAll(archive) // clean up
err = helpers.Untar(src, rootfs, keepPerms)
if err != nil {
return errors.Wrap(err, "Error met while unpacking rootfs")
}
manifest, err := helpers.Read(filepath.Join(rootfs, "manifest.json"))
if err != nil {
return errors.Wrap(err, "Error met while reading image manifest")
}
// Unpack all layers
var manifestData []ManifestEntry
if err := json.Unmarshal([]byte(manifest), &manifestData); err != nil {
return errors.Wrap(err, "Error met while unmarshalling manifest")
}
layers_sha := []string{}
for _, data := range manifestData {
for _, l := range data.Layers {
if strings.Contains(l, "layer.tar") {
layers_sha = append(layers_sha, strings.Replace(l, "/layer.tar", "", -1))
}
}
}
// TODO: Drop capi in favor of the img approach already used in pkg/installer/repository
export, err := capi.CreateExport(rootfs)
if err != nil {
return err
}
err = export.UnPackLayers(layers_sha, dst, "containerd")
if err != nil {
return err
}
// err = helpers.Tar(archive, dst)
// if err != nil {
// return nil, errors.Wrap(err, "Error met while creating package archive")
// }
return nil
}

View File

@@ -16,17 +16,16 @@
package backend_test
import (
"github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/api/core/types"
. "github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/compiler/backend"
. "github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/artifact"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
"io/ioutil"
"os"
"path/filepath"
helpers "github.com/mudler/luet/pkg/helpers"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo"
@@ -35,6 +34,7 @@ import (
var _ = Describe("Docker backend", func() {
Context("Simple Docker backend satisfies main interface functionalities", func() {
ctx := types.NewContext()
It("Builds and generate tars", func() {
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
@@ -60,7 +60,7 @@ var _ = Describe("Docker backend", func() {
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM alpine
@@ -69,7 +69,7 @@ WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin`))
b := NewSimpleDockerBackend()
b := NewSimpleDockerBackend(ctx)
opts := backend.Options{
ImageName: "luet/base",
SourcePath: tmpdir,
@@ -79,11 +79,11 @@ ENV PACKAGE_CATEGORY=app-admin`))
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "LuetDockerfile"))
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM luet/base
@@ -103,41 +103,12 @@ RUN echo bar > /test2`))
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
artifacts := []artifact.ArtifactNode{{
Name: "/luetbuild/LuetDockerfile",
Size: 175,
}}
if os.Getenv("DOCKER_BUILDKIT") == "1" {
artifacts = append(artifacts, artifact.ArtifactNode{Name: "/etc/resolv.conf", Size: 0})
}
artifacts = append(artifacts, artifact.ArtifactNode{Name: "/test", Size: 4})
artifacts = append(artifacts, artifact.ArtifactNode{Name: "/test2", Size: 4})
Expect(compiler.GenerateChanges(b, opts, opts2)).To(Equal(
[]artifact.ArtifactLayer{{
FromImage: "luet/base",
ToImage: "test",
Diffs: artifact.ArtifactDiffs{
Additions: artifacts,
},
}}))
opts2 = backend.Options{
ImageName: "test",
SourcePath: tmpdir,
DockerFileName: "LuetDockerfile",
Destination: filepath.Join(tmpdir, "output3.tar"),
}
Expect(b.ImageDefinitionToTar(opts2)).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir, "output3.tar"))).To(BeTrue())
Expect(b.ImageExists(opts2.ImageName)).To(BeFalse())
})
It("Detects available images", func() {
b := NewSimpleDockerBackend()
b := NewSimpleDockerBackend(ctx)
Expect(b.ImageAvailable("quay.io/mocaccino/extra")).To(BeTrue())
Expect(b.ImageAvailable("ubuntu:20.10")).To(BeTrue())
Expect(b.ImageAvailable("igjo5ijgo25nho52")).To(BeFalse())

View File

@@ -16,68 +16,97 @@
package backend
import (
"os"
"os/exec"
"strings"
bus "github.com/mudler/luet/pkg/bus"
. "github.com/mudler/luet/pkg/logger"
"github.com/google/go-containerregistry/pkg/crane"
v1 "github.com/google/go-containerregistry/pkg/v1"
bus "github.com/mudler/luet/pkg/api/core/bus"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/pkg/errors"
)
type SimpleImg struct{}
type SimpleImg struct {
ctx *types.Context
}
func NewSimpleImgBackend() *SimpleImg {
return &SimpleImg{}
func NewSimpleImgBackend(ctx *types.Context) *SimpleImg {
return &SimpleImg{ctx: ctx}
}
func (s *SimpleImg) LoadImage(string) error {
return errors.New("Not supported")
}
// TODO: Missing still: labels, and build args expansion
func (*SimpleImg) BuildImage(opts Options) error {
func (s *SimpleImg) BuildImage(opts Options) error {
name := opts.ImageName
bus.Manager.Publish(bus.EventImagePreBuild, opts)
buildarg := genBuildCommand(opts)
Info(":tea: Building image " + name)
s.ctx.Info(":tea: Building image " + name)
cmd := exec.Command("img", buildarg...)
cmd.Dir = opts.SourcePath
err := runCommand(cmd)
err := runCommand(s.ctx, cmd)
if err != nil {
return err
}
bus.Manager.Publish(bus.EventImagePostBuild, opts)
Info(":tea: Building image " + name + " done")
s.ctx.Info(":tea: Building image " + name + " done")
return nil
}
func (*SimpleImg) RemoveImage(opts Options) error {
func (s *SimpleImg) RemoveImage(opts Options) error {
name := opts.ImageName
buildarg := []string{"rm", name}
Spinner(22)
defer SpinnerStop()
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("img", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed removing image: "+string(out))
}
Info(":tea: Image " + name + " removed")
s.ctx.Info(":tea: Image " + name + " removed")
return nil
}
func (*SimpleImg) DownloadImage(opts Options) error {
func (s *SimpleImg) ImageReference(a string, ondisk bool) (v1.Image, error) {
f, err := s.ctx.Config.GetSystem().TempFile("snapshot")
if err != nil {
return nil, err
}
buildarg := []string{"save", a, "-o", f.Name()}
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("img", buildarg...).CombinedOutput()
if err != nil {
return nil, errors.Wrap(err, "Failed saving image: "+string(out))
}
img, err := crane.Load(f.Name())
if err != nil {
return nil, err
}
return img, nil
}
func (s *SimpleImg) DownloadImage(opts Options) error {
name := opts.ImageName
bus.Manager.Publish(bus.EventImagePrePull, opts)
buildarg := []string{"pull", name}
Debug(":tea: Downloading image " + name)
s.ctx.Debug(":tea: Downloading image " + name)
Spinner(22)
defer SpinnerStop()
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
cmd := exec.Command("img", buildarg...)
out, err := cmd.CombinedOutput()
@@ -85,27 +114,27 @@ func (*SimpleImg) DownloadImage(opts Options) error {
return errors.Wrap(err, "Failed downloading image: "+string(out))
}
Info(":tea: Image " + name + " downloaded")
s.ctx.Info(":tea: Image " + name + " downloaded")
bus.Manager.Publish(bus.EventImagePostPull, opts)
return nil
}
func (*SimpleImg) CopyImage(src, dst string) error {
Spinner(22)
defer SpinnerStop()
func (s *SimpleImg) CopyImage(src, dst string) error {
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
Debug(":tea: Tagging image", src, dst)
s.ctx.Debug(":tea: Tagging image", src, dst)
cmd := exec.Command("img", "tag", src, dst)
out, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed tagging image: "+string(out))
}
Info(":tea: Image " + dst + " tagged")
s.ctx.Info(":tea: Image " + dst + " tagged")
return nil
}
func (*SimpleImg) ImageAvailable(imagename string) bool {
func (s *SimpleImg) ImageAvailable(imagename string) bool {
return imageAvailable(imagename)
}
@@ -135,51 +164,24 @@ func (s *SimpleImg) ImageDefinitionToTar(opts Options) error {
return nil
}
func (*SimpleImg) ExportImage(opts Options) error {
func (s *SimpleImg) ExportImage(opts Options) error {
name := opts.ImageName
path := opts.Destination
buildarg := []string{"save", "-o", path, name}
Debug(":tea: Saving image " + name)
s.ctx.Debug(":tea: Saving image " + name)
Spinner(22)
defer SpinnerStop()
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("img", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed exporting image: "+string(out))
}
Info(":tea: Image " + name + " saved")
s.ctx.Info(":tea: Image " + name + " saved")
return nil
}
// ExtractRootfs extracts the docker image content inside the destination
func (s *SimpleImg) ExtractRootfs(opts Options, keepPerms bool) error {
name := opts.ImageName
path := opts.Destination
if !s.ImageExists(name) {
if err := s.DownloadImage(opts); err != nil {
return errors.Wrap(err, "failed pulling image "+name+" during extraction")
}
}
os.RemoveAll(path)
buildarg := []string{"unpack", "-o", path, name}
Debug(":tea: Extracting image " + name)
Spinner(22)
defer SpinnerStop()
out, err := exec.Command("img", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed extracting image: "+string(out))
}
Debug(":tea: Image " + name + " extracted")
return nil
}
func (*SimpleImg) Push(opts Options) error {
func (s *SimpleImg) Push(opts Options) error {
name := opts.ImageName
bus.Manager.Publish(bus.EventImagePrePush, opts)
@@ -188,9 +190,9 @@ func (*SimpleImg) Push(opts Options) error {
if err != nil {
return errors.Wrap(err, "Failed pushing image: "+string(out))
}
Info(":tea: Pushed image:", name)
s.ctx.Info(":tea: Pushed image:", name)
bus.Manager.Publish(bus.EventImagePostPush, opts)
//Info(string(out))
//s.ctx.Info(string(out))
return nil
}

View File

@@ -19,18 +19,20 @@ package backend
import (
"bytes"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/pkg/api/core/types"
)
type BackendWriter struct {
BufferedOutput bool
Buffer *bytes.Buffer
ctx *types.Context
}
func NewBackendWriter(buffered bool) *BackendWriter {
func NewBackendWriter(buffered bool, ctx *types.Context) *BackendWriter {
return &BackendWriter{
BufferedOutput: buffered,
Buffer: &bytes.Buffer{},
ctx: ctx,
}
}
@@ -39,7 +41,7 @@ func (b *BackendWriter) Write(p []byte) (int, error) {
return b.Buffer.Write(p)
}
Msg("info", false, false, (string(p)))
b.ctx.Msg("info", false, (string(p)))
return len(p), nil
}

View File

@@ -1,78 +0,0 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler_test
import (
. "github.com/mudler/luet/pkg/compiler"
. "github.com/mudler/luet/pkg/compiler/backend"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Docker image diffs", func() {
var b CompilerBackend
BeforeEach(func() {
b = NewSimpleDockerBackend()
})
Context("Generate diffs from docker images", func() {
It("Detect no changes", func() {
opts := Options{
ImageName: "alpine:latest",
}
err := b.DownloadImage(opts)
Expect(err).ToNot(HaveOccurred())
layers, err := GenerateChanges(b, opts, opts)
Expect(err).ToNot(HaveOccurred())
Expect(len(layers)).To(Equal(1))
Expect(len(layers[0].Diffs.Additions)).To(Equal(0))
Expect(len(layers[0].Diffs.Changes)).To(Equal(0))
Expect(len(layers[0].Diffs.Deletions)).To(Equal(0))
})
It("Detects additions and changed files", func() {
err := b.DownloadImage(Options{
ImageName: "quay.io/mocaccino/micro",
})
Expect(err).ToNot(HaveOccurred())
err = b.DownloadImage(Options{
ImageName: "quay.io/mocaccino/extra",
})
Expect(err).ToNot(HaveOccurred())
layers, err := GenerateChanges(b, Options{
ImageName: "quay.io/mocaccino/micro",
}, Options{
ImageName: "quay.io/mocaccino/extra",
})
Expect(err).ToNot(HaveOccurred())
Expect(len(layers)).To(Equal(1))
Expect(len(layers[0].Diffs.Changes) > 0).To(BeTrue())
Expect(len(layers[0].Diffs.Changes[0].Name) > 0).To(BeTrue())
Expect(layers[0].Diffs.Changes[0].Size > 0).To(BeTrue())
Expect(len(layers[0].Diffs.Additions) > 0).To(BeTrue())
Expect(len(layers[0].Diffs.Additions[0].Name) > 0).To(BeTrue())
Expect(layers[0].Diffs.Additions[0].Size > 0).To(BeTrue())
Expect(len(layers[0].Diffs.Deletions)).To(Equal(0))
})
})
})

140
pkg/compiler/buildtree.go Normal file
View File

@@ -0,0 +1,140 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler
import (
"encoding/json"
"sort"
)
func rankMapStringInt(values map[string]int) []string {
type kv struct {
Key string
Value int
}
var ss []kv
for k, v := range values {
ss = append(ss, kv{k, v})
}
sort.Slice(ss, func(i, j int) bool {
return ss[i].Value > ss[j].Value
})
ranked := make([]string, len(values))
for i, kv := range ss {
ranked[i] = kv.Key
}
return ranked
}
type BuildTree struct {
order map[string]int
}
func (bt *BuildTree) Increase(s string) {
if bt.order == nil {
bt.order = make(map[string]int)
}
if _, ok := bt.order[s]; !ok {
bt.order[s] = 0
}
bt.order[s]++
}
func (bt *BuildTree) Reset(s string) {
if bt.order == nil {
bt.order = make(map[string]int)
}
bt.order[s] = 0
}
func (bt *BuildTree) Level(s string) int {
return bt.order[s]
}
func ints(input []int) []int {
u := make([]int, 0, len(input))
m := make(map[int]bool)
for _, val := range input {
if _, ok := m[val]; !ok {
m[val] = true
u = append(u, val)
}
}
return u
}
func (bt *BuildTree) AllInLevel(l int) []string {
var all []string
for k, v := range bt.order {
if v == l {
all = append(all, k)
}
}
return all
}
func (bt *BuildTree) Order(compilationTree map[string]map[string]interface{}) {
sentinel := false
for !sentinel {
sentinel = true
LEVEL:
for _, l := range bt.AllLevels() {
for _, j := range bt.AllInLevel(l) {
for _, k := range bt.AllInLevel(l) {
if j == k {
continue
}
if _, ok := compilationTree[j][k]; ok {
if bt.Level(j) == bt.Level(k) {
bt.Increase(j)
sentinel = false
break LEVEL
}
}
}
}
}
}
}
func (bt *BuildTree) AllLevels() []int {
var all []int
for _, v := range bt.order {
all = append(all, v)
}
sort.Sort(sort.IntSlice(all))
return ints(all)
}
func (bt *BuildTree) JSON() (string, error) {
type buildjob struct {
Jobs []string `json:"packages"`
}
result := []buildjob{}
for _, l := range bt.AllLevels() {
result = append(result, buildjob{Jobs: bt.AllInLevel(l)})
}
dat, err := json.Marshal(&result)
return string(dat), err
}

File diff suppressed because it is too large Load Diff

View File

@@ -18,15 +18,11 @@ package compiler_test
import (
"testing"
. "github.com/mudler/luet/cmd"
config "github.com/mudler/luet/pkg/config"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestSolver(t *testing.T) {
RegisterFailHandler(Fail)
LoadConfig(config.LuetCfg)
RunSpecs(t, "Compiler Suite")
}

View File

@@ -16,15 +16,23 @@
package compiler_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
helpers "github.com/mudler/luet/tests/helpers"
"github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/api/core/types/artifact"
. "github.com/mudler/luet/pkg/compiler"
sd "github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/compiler/types/options"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
helpers "github.com/mudler/luet/pkg/helpers"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo"
@@ -32,6 +40,8 @@ import (
)
var _ = Describe("Compiler", func() {
ctx := types.NewContext()
Context("Simple package build definition", func() {
It("Compiles it correctly", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
@@ -41,7 +51,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2), options.WithContext(types.NewContext()))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -59,15 +69,15 @@ var _ = Describe("Compiler", func() {
artifact, err := compiler.Compile(false, spec)
Expect(err).ToNot(HaveOccurred())
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
content1, err := helpers.Read(spec.Rel("test5"))
content1, err := fileHelper.Read(spec.Rel("test5"))
Expect(err).ToNot(HaveOccurred())
content2, err := helpers.Read(spec.Rel("test6"))
content2, err := fileHelper.Read(spec.Rel("test6"))
Expect(err).ToNot(HaveOccurred())
Expect(content1).To(Equal("artifact5\n"))
Expect(content2).To(Equal("artifact6\n"))
@@ -75,6 +85,68 @@ var _ = Describe("Compiler", func() {
})
})
Context("Copy and Join", func() {
It("Compiles it correctly with Copy", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../tests/fixtures/copy")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2), options.WithContext(types.NewContext()))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.2"})
Expect(err).ToNot(HaveOccurred())
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
tmpdir, err := ioutil.TempDir("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
spec.SetOutputPath(tmpdir)
artifact, err := compiler.Compile(false, spec)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(spec.Rel("result"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("bina/busybox"))).To(BeTrue())
})
It("Compiles it correctly with Join", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../tests/fixtures/join")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2), options.WithContext(types.NewContext()))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.2"})
Expect(err).ToNot(HaveOccurred())
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
tmpdir, err := ioutil.TempDir("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
spec.SetOutputPath(tmpdir)
artifact, err := compiler.Compile(false, spec)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(spec.Rel("newc"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
})
})
Context("Simple package build definition", func() {
It("Compiles it in parallel", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
@@ -84,7 +156,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(1))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(1), options.WithContext(types.NewContext()))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -102,8 +174,8 @@ var _ = Describe("Compiler", func() {
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2))
Expect(errs).To(BeNil())
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
})
@@ -118,7 +190,7 @@ var _ = Describe("Compiler", func() {
err = generalRecipe.Load("../../tests/fixtures/templates")
Expect(err).ToNot(HaveOccurred())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.WithContext(types.NewContext()))
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
pkg, err := generalRecipe.GetDatabase().FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
@@ -142,7 +214,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2), options.WithContext(types.NewContext()))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -165,23 +237,23 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(3))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test4"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
content1, err := helpers.Read(spec.Rel("c"))
content1, err := fileHelper.Read(spec.Rel("c"))
Expect(err).ToNot(HaveOccurred())
content2, err := helpers.Read(spec.Rel("cd"))
content2, err := fileHelper.Read(spec.Rel("cd"))
Expect(err).ToNot(HaveOccurred())
Expect(content1).To(Equal("c\n"))
Expect(content2).To(Equal("c\n"))
content1, err = helpers.Read(spec.Rel("d"))
content1, err = fileHelper.Read(spec.Rel("d"))
Expect(err).ToNot(HaveOccurred())
content2, err = helpers.Read(spec.Rel("dd"))
content2, err = fileHelper.Read(spec.Rel("dd"))
Expect(err).ToNot(HaveOccurred())
Expect(content1).To(Equal("s\n"))
Expect(content2).To(Equal("dd\n"))
@@ -198,7 +270,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(1))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(1), options.WithContext(types.NewContext()))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "extra", Category: "layer", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -215,17 +287,17 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts2)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
for _, artifact := range artifacts2 {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("etc/hosts"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test1"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("etc/hosts"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test1"))).To(BeTrue())
})
It("Compiles and includes ony wanted files", func() {
@@ -239,7 +311,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -252,14 +324,13 @@ var _ = Describe("Compiler", func() {
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test6"))).ToNot(BeTrue())
})
It("Compiles and excludes files", func() {
@@ -273,7 +344,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -288,13 +359,13 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
})
It("Compiles includes and excludes files", func() {
@@ -308,7 +379,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -323,13 +394,13 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test6"))).ToNot(BeTrue())
})
It("Compiles and excludes ony wanted files also from unpacked packages", func() {
@@ -343,7 +414,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -357,12 +428,12 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
})
It("Compiles includes and excludes ony wanted files also from unpacked packages", func() {
@@ -376,7 +447,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -390,12 +461,12 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
})
It("Compiles and includes ony wanted files also from unpacked packages", func() {
@@ -409,7 +480,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -423,16 +494,16 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Exists(spec.Rel("var/lib/udhcpd"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test5"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("test2"))).ToNot(BeTrue())
Expect(helpers.Exists(spec.Rel("lib/firmware"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("var/lib/udhcpd"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("marvin"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test5"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test6"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test2"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("lib/firmware"))).ToNot(BeTrue())
})
It("Compiles a more complex tree", func() {
@@ -446,7 +517,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "pkgs-checker", Category: "package", Version: "9999"})
Expect(err).ToNot(HaveOccurred())
@@ -461,18 +532,18 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Untar(spec.Rel("extra-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
Expect(artifact.NewPackageArtifact(spec.Rel("extra-layer-0.1.package.tar")).Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec.Rel("extra-layer"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("extra-layer"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("base-layer-0.1.metadata.yaml"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.metadata.yaml"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("base-layer-0.1.metadata.yaml"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("extra-layer-0.1.metadata.yaml"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
})
It("Compiles with provides support", func() {
@@ -486,7 +557,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -502,19 +573,19 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Untar(spec.Rel("c-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
Expect(artifact.NewPackageArtifact(spec.Rel("c-test-1.0.package.tar")).Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec.Rel("d"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("dd"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("c"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("cd"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("d"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("dd"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("c"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("cd"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
})
It("Compiles with provides and selectors support", func() {
@@ -529,7 +600,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -545,19 +616,19 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Untar(spec.Rel("c-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
Expect(artifact.NewPackageArtifact(spec.Rel("c-test-1.0.package.tar")).Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec.Rel("d"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("dd"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("c"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("cd"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("d"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("dd"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("c"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("cd"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("d-test-1.0.metadata.yaml"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("c-test-1.0.metadata.yaml"))).To(BeTrue())
})
It("Compiles revdeps", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
@@ -570,7 +641,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "extra", Category: "layer", Version: "0.1"})
Expect(err).ToNot(HaveOccurred())
@@ -585,16 +656,40 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(2))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Untar(spec.Rel("extra-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
Expect(artifact.NewPackageArtifact(spec.Rel("extra-layer-0.1.package.tar")).Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec.Rel("extra-layer"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("extra-layer"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
})
It("Generates a correct buildtree", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../tests/fixtures/complex/selection")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(10))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "vhba", Category: "sys-fs-5.4.2", Version: "20190410"})
Expect(err).ToNot(HaveOccurred())
bt, err := compiler.BuildTree(compilerspec.LuetCompilationspecs{*spec})
Expect(err).ToNot(HaveOccurred())
Expect(bt.AllLevels()).To(Equal([]int{0, 1, 2, 3, 4, 5}))
Expect(bt.AllInLevel(0)).To(Equal([]string{"layer/build"}))
Expect(bt.AllInLevel(1)).To(Equal([]string{"layer/sabayon-build-portage"}))
Expect(bt.AllInLevel(2)).To(Equal([]string{"layer/build-sabayon-overlay"}))
Expect(bt.AllInLevel(3)).To(Equal([]string{"layer/build-sabayon-overlays"}))
Expect(bt.AllInLevel(4)).To(ContainElements("sys-kernel/linux-sabayon", "sys-kernel/sabayon-sources"))
Expect(bt.AllInLevel(5)).To(Equal([]string{"sys-fs-5.4.2/vhba"}))
})
It("Compiles complex dependencies trees with best matches", func() {
@@ -608,7 +703,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(10))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "vhba", Category: "sys-fs-5.4.2", Version: "20190410"})
Expect(err).ToNot(HaveOccurred())
@@ -623,17 +718,17 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(6))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
Expect(helpers.Untar(spec.Rel("vhba-sys-fs-5.4.2-20190410.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec.Rel("sabayon-build-portage-layer-0.20191126.package.tar"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("build-layer-0.1.package.tar"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("build-sabayon-overlay-layer-0.20191212.package.tar"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("build-sabayon-overlays-layer-0.1.package.tar"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("linux-sabayon-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("sabayon-sources-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("vhba"))).To(BeTrue())
Expect(artifact.NewPackageArtifact(spec.Rel("vhba-sys-fs-5.4.2-20190410.package.tar")).Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(spec.Rel("sabayon-build-portage-layer-0.20191126.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("build-layer-0.1.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("build-sabayon-overlay-layer-0.20191212.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("build-sabayon-overlays-layer-0.1.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("linux-sabayon-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("sabayon-sources-sys-kernel-5.4.2.package.tar"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("vhba"))).To(BeTrue())
})
It("Compiles revdeps with seeds", func() {
@@ -647,7 +742,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
@@ -658,31 +753,31 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(4))
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
Expect(artifact.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
// A deps on B, so A artifacts are here:
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test4"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
// B
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("artifact42"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("artifact42"))).To(BeTrue())
// C depends on B, so B is here
content1, err := helpers.Read(spec.Rel("c"))
content1, err := fileHelper.Read(spec.Rel("c"))
Expect(err).ToNot(HaveOccurred())
content2, err := helpers.Read(spec.Rel("cd"))
content2, err := fileHelper.Read(spec.Rel("cd"))
Expect(err).ToNot(HaveOccurred())
Expect(content1).To(Equal("c\n"))
Expect(content2).To(Equal("c\n"))
// D is here as it requires C, and C was recompiled
content1, err = helpers.Read(spec.Rel("d"))
content1, err = fileHelper.Read(spec.Rel("d"))
Expect(err).ToNot(HaveOccurred())
content2, err = helpers.Read(spec.Rel("dd"))
content2, err = fileHelper.Read(spec.Rel("dd"))
Expect(err).ToNot(HaveOccurred())
Expect(content1).To(Equal("s\n"))
Expect(content2).To(Equal("dd\n"))
@@ -699,7 +794,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), options.Concurrency(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2), options.WithContext(types.NewContext()))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
@@ -714,20 +809,20 @@ var _ = Describe("Compiler", func() {
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil())
for _, artifact := range artifacts {
Expect(helpers.Exists(artifact.Path)).To(BeTrue())
Expect(helpers.Untar(artifact.Path, tmpdir, false)).ToNot(HaveOccurred())
for _, a := range artifacts {
Expect(fileHelper.Exists(a.Path)).To(BeTrue())
Expect(a.Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
for _, d := range artifact.Dependencies {
Expect(helpers.Exists(d.Path)).To(BeTrue())
Expect(helpers.Untar(d.Path, tmpdir, false)).ToNot(HaveOccurred())
for _, d := range a.Dependencies {
Expect(fileHelper.Exists(d.Path)).To(BeTrue())
Expect(artifact.NewPackageArtifact(d.Path).Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
}
}
Expect(helpers.Exists(spec.Rel("test3"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test4"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test3"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test4"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test5"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test6"))).To(BeTrue())
})
})
@@ -741,7 +836,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
Expect(err).ToNot(HaveOccurred())
@@ -758,9 +853,71 @@ var _ = Describe("Compiler", func() {
Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
Expect(helpers.Untar(spec.Rel("runtime-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("var"))).ToNot(BeTrue())
Expect(artifact.NewPackageArtifact(filepath.Join(tmpdir, "runtime-layer-0.1.package.tar")).Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("var"))).ToNot(BeTrue())
})
It("Pushes final images along", func() {
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
randString := strings.ToLower(helpers.String(10))
imageName := fmt.Sprintf("ttl.sh/%s", randString)
b := sd.NewSimpleDockerBackend(ctx)
err := generalRecipe.Load("../../tests/fixtures/packagelayers")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(b, generalRecipe.GetDatabase(),
options.EnablePushFinalImages, options.ForcePushFinalImages, options.WithFinalRepository(imageName))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
Expect(err).ToNot(HaveOccurred())
spec2, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "build", Category: "layer", Version: "0.1"})
Expect(err).ToNot(HaveOccurred())
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
tmpdir, err := ioutil.TempDir("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2))
Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(2))
//Expect(len(artifacts[0].Dependencies)).To(Equal(1))
Expect(b.ImageAvailable(fmt.Sprintf("%s:%s", imageName, artifacts[0].Runtime.ImageID()))).To(BeTrue())
Expect(b.ImageAvailable(fmt.Sprintf("%s:%s", imageName, artifacts[0].Runtime.GetMetadataFilePath()))).To(BeTrue())
Expect(b.ImageAvailable(fmt.Sprintf("%s:%s", imageName, artifacts[1].Runtime.ImageID()))).To(BeTrue())
Expect(b.ImageAvailable(fmt.Sprintf("%s:%s", imageName, artifacts[1].Runtime.GetMetadataFilePath()))).To(BeTrue())
img, err := b.ImageReference(fmt.Sprintf("%s:%s", imageName, artifacts[0].Runtime.ImageID()), true)
Expect(err).ToNot(HaveOccurred())
_, path, err := image.Extract(ctx, img, false, nil)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(path) // clean up
Expect(fileHelper.Exists(filepath.Join(path, "bin/busybox"))).To(BeTrue())
img, err = b.ImageReference(fmt.Sprintf("%s:%s", imageName, artifacts[1].Runtime.GetMetadataFilePath()), true)
Expect(err).ToNot(HaveOccurred())
_, path, err = image.Extract(ctx, img, false, nil)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(path) // clean up
meta := filepath.Join(path, artifacts[1].Runtime.GetMetadataFilePath())
Expect(fileHelper.Exists(meta)).To(BeTrue())
d, err := ioutil.ReadFile(meta)
Expect(err).ToNot(HaveOccurred())
Expect(string(d)).To(ContainSubstring(artifacts[1].CompileSpec.GetPackage().GetName()))
})
})
@@ -773,7 +930,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{
Name: "dironly",
@@ -806,14 +963,14 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts)).To(Equal(2))
Expect(len(artifacts[0].Dependencies)).To(Equal(0))
Expect(helpers.Untar(spec.Rel("dironly-test-1.0.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec.Rel("test1"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("test2"))).To(BeTrue())
Expect(artifact.NewPackageArtifact(filepath.Join(tmpdir, "dironly-test-1.0.package.tar")).Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(spec.Rel("test1"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("test2"))).To(BeTrue())
Expect(helpers.Untar(spec2.Rel("dironly_filter-test-1.0.package.tar"), tmpdir2, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec2.Rel("test5"))).To(BeTrue())
Expect(helpers.Exists(spec2.Rel("test6"))).ToNot(BeTrue())
Expect(helpers.Exists(spec2.Rel("artifact42"))).ToNot(BeTrue())
Expect(artifact.NewPackageArtifact(filepath.Join(tmpdir2, "dironly_filter-test-1.0.package.tar")).Unpack(ctx, tmpdir2, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(spec2.Rel("test5"))).To(BeTrue())
Expect(fileHelper.Exists(spec2.Rel("test6"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec2.Rel("artifact42"))).ToNot(BeTrue())
})
})
@@ -826,7 +983,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.WithContext(types.NewContext()))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
Expect(err).ToNot(HaveOccurred())
@@ -843,12 +1000,12 @@ var _ = Describe("Compiler", func() {
Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.package.tar.gz"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.package.tar"))).To(BeFalse())
Expect(artifacts[0].Unpack(tmpdir, false)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(spec.Rel("runtime-layer-0.1.package.tar.gz"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("runtime-layer-0.1.package.tar"))).To(BeFalse())
Expect(artifacts[0].Unpack(ctx, tmpdir, false)).ToNot(HaveOccurred())
// Expect(helpers.Untar(spec.Rel("runtime-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
Expect(helpers.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
Expect(helpers.Exists(spec.Rel("var"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("var"))).ToNot(BeTrue())
})
})
@@ -861,7 +1018,7 @@ var _ = Describe("Compiler", func() {
err := generalRecipe.Load("../../tests/fixtures/includeimage")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.WithContext(types.NewContext()))
specs, err := compiler.FromDatabase(generalRecipe.GetDatabase(), true, "")
Expect(err).ToNot(HaveOccurred())
@@ -880,7 +1037,7 @@ var _ = Describe("Compiler", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.WithContext(types.NewContext()))
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
Expect(err).ToNot(HaveOccurred())
@@ -899,7 +1056,7 @@ var _ = Describe("Compiler", func() {
Expect(len(artifacts[0].Dependencies)).To(Equal(1))
Expect(artifacts[0].Files).To(ContainElement("bin/busybox"))
Expect(helpers.Exists(spec.Rel("runtime-layer-0.1.metadata.yaml"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel("runtime-layer-0.1.metadata.yaml"))).To(BeTrue())
art, err := LoadArtifactFromYaml(spec)
Expect(err).ToNot(HaveOccurred())

View File

@@ -0,0 +1,161 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler
import (
"fmt"
"github.com/mudler/luet/pkg/api/core/types"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/pkg/errors"
)
// ImageHashTree is holding the Database
// and the options to resolve PackageImageHashTrees
// for a given specfile
// It is responsible of returning a concrete result
// which identifies a Package in a HashTree
type ImageHashTree struct {
Database pkg.PackageDatabase
SolverOptions types.LuetSolverOptions
}
// PackageImageHashTree represent the Package into a given image hash tree
// The hash tree is constructed by a set of images representing
// the package during its build stage. A Hash is assigned to each image
// from the package fingerprint, plus the SAT solver assertion result (which is hashed as well)
// and the specfile signatures. This guarantees that each image of the build stage
// is unique and can be identified later on.
type PackageImageHashTree struct {
Target *solver.PackageAssert
Dependencies solver.PackagesAssertions
Solution solver.PackagesAssertions
dependencyBuilderImageHashes map[string]string
SourceHash string
BuilderImageHash string
}
func NewHashTree(db pkg.PackageDatabase) *ImageHashTree {
return &ImageHashTree{
Database: db,
}
}
func (ht *PackageImageHashTree) DependencyBuildImage(p pkg.Package) (string, error) {
found, ok := ht.dependencyBuilderImageHashes[p.GetFingerPrint()]
if !ok {
return "", errors.New("package hash not found")
}
return found, nil
}
func (ht *PackageImageHashTree) String() string {
return fmt.Sprintf(
"Target buildhash: %s\nTarget packagehash: %s\nBuilder Imagehash: %s\nSource Imagehash: %s\n",
ht.Target.Hash.BuildHash,
ht.Target.Hash.PackageHash,
ht.BuilderImageHash,
ht.SourceHash,
)
}
// Query takes a compiler and a compilation spec and returns a PackageImageHashTree tied to it.
// PackageImageHashTree contains all the informations to resolve the spec build images in order to
// reproducibly re-build images from packages
func (ht *ImageHashTree) Query(cs *LuetCompiler, p *compilerspec.LuetCompilationSpec) (*PackageImageHashTree, error) {
assertions, err := ht.resolve(cs, p)
if err != nil {
return nil, err
}
targetAssertion := assertions.Search(p.GetPackage().GetFingerPrint())
dependencies := assertions.Drop(p.GetPackage())
var sourceHash string
imageHashes := map[string]string{}
for _, assertion := range dependencies {
var depbuildImageTag string
compileSpec, err := cs.FromPackage(assertion.Package)
if err != nil {
return nil, errors.Wrap(err, "Error while generating compilespec for "+assertion.Package.GetName())
}
if compileSpec.GetImage() != "" {
depbuildImageTag = assertion.Hash.BuildHash
} else {
depbuildImageTag = ht.genBuilderImageTag(compileSpec, targetAssertion.Hash.PackageHash)
}
imageHashes[assertion.Package.GetFingerPrint()] = depbuildImageTag
sourceHash = assertion.Hash.PackageHash
}
return &PackageImageHashTree{
Dependencies: dependencies,
Target: targetAssertion,
SourceHash: sourceHash,
BuilderImageHash: ht.genBuilderImageTag(p, targetAssertion.Hash.PackageHash),
dependencyBuilderImageHashes: imageHashes,
Solution: assertions,
}, nil
}
func (ht *ImageHashTree) genBuilderImageTag(p *compilerspec.LuetCompilationSpec, packageImage string) string {
// Use packageImage as salt into the fp being used
// so the hash is unique also in cases where
// some package deps does have completely different
// depgraphs
return fmt.Sprintf("builder-%s", p.GetPackage().HashFingerprint(packageImage))
}
// resolve computes the dependency tree of a compilation spec and returns solver assertions
// in order to be able to compile the spec.
func (ht *ImageHashTree) resolve(cs *LuetCompiler, p *compilerspec.LuetCompilationSpec) (solver.PackagesAssertions, error) {
dependencies, err := cs.ComputeDepTree(p)
if err != nil {
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().HumanReadableString())
}
// Get hash from buildpsecs
salts := map[string]string{}
for _, assertion := range dependencies { //highly dependent on the order
if assertion.Value {
spec, err := cs.FromPackage(assertion.Package)
if err != nil {
return nil, errors.Wrap(err, "while computing hash buildspecs")
}
hash, err := spec.Hash()
if err != nil {
return nil, errors.Wrap(err, "failed computing hash")
}
salts[assertion.Package.GetFingerPrint()] = hash
}
}
assertions := solver.PackagesAssertions{}
for _, assertion := range dependencies { //highly dependent on the order
if assertion.Value {
nthsolution := dependencies.Cut(assertion.Package)
assertion.Hash = solver.PackageHash{
BuildHash: nthsolution.SaltedHashFrom(assertion.Package, salts),
PackageHash: nthsolution.SaltedAssertionHash(salts),
}
assertion.Package.SetTreeDir(p.Package.GetTreeDir())
assertions = append(assertions, assertion)
}
}
return assertions, nil
}

View File

@@ -0,0 +1,151 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler_test
import (
"github.com/mudler/luet/pkg/api/core/types"
. "github.com/mudler/luet/pkg/compiler"
sd "github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/options"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("ImageHashTree", func() {
ctx := types.NewContext()
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2))
hashtree := NewHashTree(generalRecipe.GetDatabase())
Context("Simple package definition", func() {
BeforeEach(func() {
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../tests/fixtures/buildable")
Expect(err).ToNot(HaveOccurred())
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2))
hashtree = NewHashTree(generalRecipe.GetDatabase())
})
It("Calculates the hash correctly", func() {
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
packageHash, err := hashtree.Query(compiler, spec)
Expect(err).ToNot(HaveOccurred())
Expect(packageHash.Target.Hash.BuildHash).To(Equal("895697a8bb51b219b78ed081fa1b778801e81505bb03f56acafcf3c476620fc1"))
Expect(packageHash.Target.Hash.PackageHash).To(Equal("2a6c3dc0dd7af2902fd8823a24402d89b2030cfbea6e63fe81afb34af8b1a005"))
Expect(packageHash.BuilderImageHash).To(Equal("builder-3a28d240f505d69123735a567beaf80e"))
})
})
expectedPackageHash := "f3f42a7435293225e92a51da8416f90b7c0ccd5958cd5c72276c39ece408c01f"
Context("complex package definition", func() {
BeforeEach(func() {
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision")
Expect(err).ToNot(HaveOccurred())
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2))
hashtree = NewHashTree(generalRecipe.GetDatabase())
})
It("Calculates the hash correctly", func() {
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
packageHash, err := hashtree.Query(compiler, spec)
Expect(err).ToNot(HaveOccurred())
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(expectedPackageHash))
Expect(packageHash.SourceHash).To(Equal(expectedPackageHash))
Expect(packageHash.BuilderImageHash).To(Equal("builder-977129605c0d7e974cc8a431a563cec1"))
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
Expect(packageHash.Target.Hash.PackageHash).To(Equal("9112e2c97bf8ca998c1df303a9ebc4957b685930c882e9aa556eab4507220079"))
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
hash, err := packageHash.DependencyBuildImage(a)
Expect(err).ToNot(HaveOccurred())
Expect(hash).To(Equal("b4b61939260263582da1dfa5289182a0a7570ef8658f3b01b1997fe5d8a95e49"))
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
Expect(assertionA.Hash.PackageHash).To(Equal(expectedPackageHash))
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
Expect(assertionB.Hash.PackageHash).To(Equal("b4b61939260263582da1dfa5289182a0a7570ef8658f3b01b1997fe5d8a95e49"))
hashB, err := packageHash.DependencyBuildImage(b)
Expect(err).ToNot(HaveOccurred())
Expect(hashB).To(Equal("fc6fdd4bd62d51fc06c2c22e8bc56543727a2340220972594e28c623ea3a9c6c"))
})
})
Context("complex package definition, with small change in build.yaml", func() {
BeforeEach(func() {
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
//Definition of A here is slightly changed in the steps build.yaml file (1 character only)
err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision_content_changed")
Expect(err).ToNot(HaveOccurred())
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2))
hashtree = NewHashTree(generalRecipe.GetDatabase())
})
It("Calculates the hash correctly", func() {
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
packageHash, err := hashtree.Query(compiler, spec)
Expect(err).ToNot(HaveOccurred())
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).ToNot(Equal(expectedPackageHash))
sourceHash := "f0d96e79a0021b065674c58203158a02ed702ff29cc2ee6605e1f16f1fae9b89"
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(sourceHash))
Expect(packageHash.SourceHash).To(Equal(sourceHash))
Expect(packageHash.SourceHash).ToNot(Equal(expectedPackageHash))
Expect(packageHash.BuilderImageHash).To(Equal("builder-05506adb3777ed74825254e8b46da5e9"))
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
Expect(packageHash.Target.Hash.PackageHash).To(Equal("489ece9825e18eba2de91e93293d5c61578d492d42bbfe9b6ae2e2c3c11bc842"))
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
hash, err := packageHash.DependencyBuildImage(a)
Expect(err).ToNot(HaveOccurred())
Expect(hash).To(Equal("b4b61939260263582da1dfa5289182a0a7570ef8658f3b01b1997fe5d8a95e49"))
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
Expect(assertionA.Hash.PackageHash).To(Equal("f0d96e79a0021b065674c58203158a02ed702ff29cc2ee6605e1f16f1fae9b89"))
Expect(assertionA.Hash.PackageHash).ToNot(Equal(expectedPackageHash))
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
Expect(assertionB.Hash.PackageHash).To(Equal("b4b61939260263582da1dfa5289182a0a7570ef8658f3b01b1997fe5d8a95e49"))
hashB, err := packageHash.DependencyBuildImage(b)
Expect(err).ToNot(HaveOccurred())
Expect(hashB).To(Equal("fc6fdd4bd62d51fc06c2c22e8bc56543727a2340220972594e28c623ea3a9c6c"))
})
})
})

View File

@@ -1,795 +0,0 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact
import (
"archive/tar"
"bufio"
"bytes"
"crypto/sha1"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
zstd "github.com/klauspost/compress/zstd"
gzip "github.com/klauspost/pgzip"
//"strconv"
"strings"
"sync"
bus "github.com/mudler/luet/pkg/bus"
backend "github.com/mudler/luet/pkg/compiler/backend"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
// When compiling, we write also a fingerprint.metadata.yaml file with PackageArtifact. In this way we can have another command to create the repository
// which will consist in just of an repository.yaml which is just the repository structure with the list of package artifact.
// In this way a generic client can fetch the packages and, after unpacking the tree, performing queries to install packages.
type PackageArtifact struct {
Path string `json:"path"`
Dependencies []*PackageArtifact `json:"dependencies"`
CompileSpec *compilerspec.LuetCompilationSpec `json:"compilationspec"`
Checksums Checksums `json:"checksums"`
SourceAssertion solver.PackagesAssertions `json:"-"`
CompressionType compression.Implementation `json:"compressiontype"`
Files []string `json:"files"`
}
func (p *PackageArtifact) ShallowCopy() *PackageArtifact {
copy := *p
return &copy
}
func NewPackageArtifact(path string) *PackageArtifact {
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: compression.None}
}
func NewPackageArtifactFromYaml(data []byte) (*PackageArtifact, error) {
p := &PackageArtifact{Checksums: Checksums{}}
err := yaml.Unmarshal(data, &p)
if err != nil {
return p, err
}
return p, err
}
func (a *PackageArtifact) Hash() error {
return a.Checksums.Generate(a)
}
func (a *PackageArtifact) Verify() error {
sum := Checksums{}
if err := sum.Generate(a); err != nil {
return err
}
if err := sum.Compare(a.Checksums); err != nil {
return err
}
return nil
}
func (a *PackageArtifact) WriteYaml(dst string) error {
// First compute checksum of artifact. When we write the yaml we want to write up-to-date informations.
err := a.Hash()
if err != nil {
return errors.Wrap(err, "Failed generating checksums for artifact")
}
//p := a.CompileSpec.GetPackage().GetPath()
//a.CompileSpec.GetPackage().SetPath("")
// for _, ass := range a.CompileSpec.GetSourceAssertion() {
// ass.Package.SetPath("")
// }
data, err := yaml.Marshal(a)
if err != nil {
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
}
bus.Manager.Publish(bus.EventPackagePreBuildArtifact, a)
mangle, err := NewPackageArtifactFromYaml(data)
if err != nil {
return errors.Wrap(err, "Generated invalid artifact")
}
//p := a.CompileSpec.GetPackage().GetPath()
mangle.CompileSpec.GetPackage().SetPath("")
for _, ass := range mangle.CompileSpec.GetSourceAssertion() {
ass.Package.SetPath("")
}
data, err = yaml.Marshal(mangle)
if err != nil {
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
}
err = ioutil.WriteFile(filepath.Join(dst, a.CompileSpec.GetPackage().GetMetadataFilePath()), data, os.ModePerm)
if err != nil {
return errors.Wrap(err, "While writing PackageArtifact YAML")
}
//a.CompileSpec.GetPackage().SetPath(p)
bus.Manager.Publish(bus.EventPackagePostBuildArtifact, a)
return nil
}
func (a *PackageArtifact) GetFileName() string {
return path.Base(a.Path)
}
func (a *PackageArtifact) genDockerfile() string {
return `
FROM scratch
COPY . /`
}
// CreateArtifactForFile creates a new artifact from the given file
func CreateArtifactForFile(s string, opts ...func(*PackageArtifact)) (*PackageArtifact, error) {
if _, err := os.Stat(s); os.IsNotExist(err) {
return nil, errors.Wrap(err, "artifact path doesn't exist")
}
fileName := path.Base(s)
archive, err := LuetCfg.GetSystem().TempDir("archive")
if err != nil {
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
}
defer os.RemoveAll(archive) // clean up
dst := filepath.Join(archive, fileName)
if err := helpers.CopyFile(s, dst); err != nil {
return nil, errors.Wrapf(err, "error while copying %s to %s", s, dst)
}
artifact, err := LuetCfg.GetSystem().TempDir("artifact")
if err != nil {
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
}
a := &PackageArtifact{Path: filepath.Join(artifact, fileName)}
for _, o := range opts {
o(a)
}
return a, a.Compress(archive, 1)
}
type ImageBuilder interface {
BuildImage(backend.Options) error
}
// GenerateFinalImage takes an artifact and builds a Docker image with its content
func (a *PackageArtifact) GenerateFinalImage(imageName string, b ImageBuilder, keepPerms bool) (backend.Options, error) {
builderOpts := backend.Options{}
archive, err := LuetCfg.GetSystem().TempDir("archive")
if err != nil {
return builderOpts, errors.Wrap(err, "error met while creating tempdir for "+a.Path)
}
defer os.RemoveAll(archive) // clean up
uncompressedFiles := filepath.Join(archive, "files")
dockerFile := filepath.Join(archive, "Dockerfile")
if err := os.MkdirAll(uncompressedFiles, os.ModePerm); err != nil {
return builderOpts, errors.Wrap(err, "error met while creating tempdir for "+a.Path)
}
if err := a.Unpack(uncompressedFiles, keepPerms); err != nil {
return builderOpts, errors.Wrap(err, "error met while uncompressing artifact "+a.Path)
}
empty, err := helpers.DirectoryIsEmpty(uncompressedFiles)
if err != nil {
return builderOpts, errors.Wrap(err, "error met while checking if directory is empty "+uncompressedFiles)
}
// See https://github.com/moby/moby/issues/38039.
// We can't generate FROM scratch empty images. Docker will refuse to export them
// workaround: Inject a .virtual empty file
if empty {
helpers.Touch(filepath.Join(uncompressedFiles, ".virtual"))
}
data := a.genDockerfile()
if err := ioutil.WriteFile(dockerFile, []byte(data), 0644); err != nil {
return builderOpts, errors.Wrap(err, "error met while rendering artifact dockerfile "+a.Path)
}
builderOpts = backend.Options{
ImageName: imageName,
SourcePath: archive,
DockerFileName: dockerFile,
Context: uncompressedFiles,
}
return builderOpts, b.BuildImage(builderOpts)
}
// Compress is responsible to archive and compress to the artifact Path.
// It accepts a source path, which is the content to be archived/compressed
// and a concurrency parameter.
func (a *PackageArtifact) Compress(src string, concurrency int) error {
switch a.CompressionType {
case compression.Zstandard:
err := helpers.Tar(src, a.Path)
if err != nil {
return err
}
original, err := os.Open(a.Path)
if err != nil {
return err
}
defer original.Close()
zstdFile := a.getCompressedName()
bufferedReader := bufio.NewReader(original)
// Open a file for writing.
dst, err := os.Create(zstdFile)
if err != nil {
return err
}
enc, err := zstd.NewWriter(dst)
if err != nil {
return err
}
_, err = io.Copy(enc, bufferedReader)
if err != nil {
enc.Close()
return err
}
if err := enc.Close(); err != nil {
return err
}
os.RemoveAll(a.Path) // Remove original
Debug("Removed artifact", a.Path)
a.Path = zstdFile
return nil
case compression.GZip:
err := helpers.Tar(src, a.Path)
if err != nil {
return err
}
original, err := os.Open(a.Path)
if err != nil {
return err
}
defer original.Close()
gzipfile := a.getCompressedName()
bufferedReader := bufio.NewReader(original)
// Open a file for writing.
dst, err := os.Create(gzipfile)
if err != nil {
return err
}
// Create gzip writer.
w := gzip.NewWriter(dst)
w.SetConcurrency(1<<20, concurrency)
defer w.Close()
defer dst.Close()
_, err = io.Copy(w, bufferedReader)
if err != nil {
return err
}
w.Close()
os.RemoveAll(a.Path) // Remove original
Debug("Removed artifact", a.Path)
// a.CompressedPath = gzipfile
a.Path = gzipfile
return nil
//a.Path = gzipfile
// Defaults to tar only (covers when "none" is supplied)
default:
return helpers.Tar(src, a.getCompressedName())
}
}
func (a *PackageArtifact) getCompressedName() string {
switch a.CompressionType {
case compression.Zstandard:
return a.Path + ".zst"
case compression.GZip:
return a.Path + ".gz"
}
return a.Path
}
// GetUncompressedName returns the artifact path without the extension suffix
func (a *PackageArtifact) GetUncompressedName() string {
switch a.CompressionType {
case compression.Zstandard, compression.GZip:
return strings.TrimSuffix(a.Path, filepath.Ext(a.Path))
}
return a.Path
}
func hashContent(bv []byte) string {
hasher := sha1.New()
hasher.Write(bv)
sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
return sha
}
func hashFileContent(path string) (string, error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
h := sha1.New()
if _, err := io.Copy(h, f); err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
}
func tarModifierWrapperFunc(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
// If the destination path already exists I rename target file name with postfix.
var destPath string
// Read data. TODO: We need change archive callback to permit to return a Reader
buffer := bytes.Buffer{}
if content != nil {
if _, err := buffer.ReadFrom(content); err != nil {
return nil, nil, err
}
}
tarHash := hashContent(buffer.Bytes())
// If file is not present on archive but is defined on mods
// I receive the callback. Prevent nil exception.
if header != nil {
switch header.Typeflag {
case tar.TypeReg:
destPath = filepath.Join(dst, path)
default:
// Nothing to do. I return original reader
return header, buffer.Bytes(), nil
}
existingHash := ""
f, err := os.Lstat(destPath)
if err == nil {
Debug("File exists already, computing hash for", destPath)
hash, herr := hashFileContent(destPath)
if herr == nil {
existingHash = hash
}
}
Debug("Existing file hash: ", existingHash, "Tar file hashsum: ", tarHash)
// We want to protect file only if the hash of the files are differing OR the file size are
differs := (existingHash != "" && existingHash != tarHash) || (err != nil && f != nil && header.Size != f.Size())
// Check if exists
if helpers.Exists(destPath) && differs {
for i := 1; i < 1000; i++ {
name := filepath.Join(filepath.Join(filepath.Dir(path),
fmt.Sprintf("._cfg%04d_%s", i, filepath.Base(path))))
if helpers.Exists(name) {
continue
}
Info(fmt.Sprintf("Found protected file %s. Creating %s.", destPath,
filepath.Join(dst, name)))
return &tar.Header{
Mode: header.Mode,
Typeflag: header.Typeflag,
PAXRecords: header.PAXRecords,
Name: name,
}, buffer.Bytes(), nil
}
}
}
return header, buffer.Bytes(), nil
}
func (a *PackageArtifact) GetProtectFiles() []string {
ans := []string{}
annotationDir := ""
if !LuetCfg.ConfigProtectSkip {
// a.CompileSpec could be nil when artifact.Unpack is used for tree tarball
if a.CompileSpec != nil &&
a.CompileSpec.GetPackage().HasAnnotation(string(pkg.ConfigProtectAnnnotation)) {
dir, ok := a.CompileSpec.GetPackage().GetAnnotations()[string(pkg.ConfigProtectAnnnotation)]
if ok {
annotationDir = dir
}
}
// TODO: check if skip this if we have a.CompileSpec nil
cp := NewConfigProtect(annotationDir)
cp.Map(a.Files)
// NOTE: for unpack we need files path without initial /
ans = cp.GetProtectFiles(false)
}
return ans
}
// Unpack Untar and decompress (TODO) to the given path
func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
// Create
protectedFiles := a.GetProtectFiles()
tarModifier := helpers.NewTarModifierWrapper(dst, tarModifierWrapperFunc)
switch a.CompressionType {
case compression.Zstandard:
// Create the uncompressed archive
archive, err := os.Create(a.Path + ".uncompressed")
if err != nil {
return err
}
defer os.RemoveAll(a.Path + ".uncompressed")
defer archive.Close()
original, err := os.Open(a.Path)
if err != nil {
return errors.Wrap(err, "Cannot open "+a.Path)
}
defer original.Close()
bufferedReader := bufio.NewReader(original)
d, err := zstd.NewReader(bufferedReader)
if err != nil {
return err
}
defer d.Close()
_, err = io.Copy(archive, d)
if err != nil {
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
}
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
LuetCfg.GetGeneral().SameOwner, protectedFiles, tarModifier)
if err != nil {
return err
}
return nil
case compression.GZip:
// Create the uncompressed archive
archive, err := os.Create(a.Path + ".uncompressed")
if err != nil {
return err
}
defer os.RemoveAll(a.Path + ".uncompressed")
defer archive.Close()
original, err := os.Open(a.Path)
if err != nil {
return errors.Wrap(err, "Cannot open "+a.Path)
}
defer original.Close()
bufferedReader := bufio.NewReader(original)
r, err := gzip.NewReader(bufferedReader)
if err != nil {
return err
}
defer r.Close()
_, err = io.Copy(archive, r)
if err != nil {
return errors.Wrap(err, "Cannot copy to "+a.Path+".uncompressed")
}
err = helpers.UntarProtect(a.Path+".uncompressed", dst,
LuetCfg.GetGeneral().SameOwner, protectedFiles, tarModifier)
if err != nil {
return err
}
return nil
// Defaults to tar only (covers when "none" is supplied)
default:
return helpers.UntarProtect(a.Path, dst, LuetCfg.GetGeneral().SameOwner,
protectedFiles, tarModifier)
}
return errors.New("Compression type must be supplied")
}
// FileList generates the list of file of a package from the local archive
func (a *PackageArtifact) FileList() ([]string, error) {
var tr *tar.Reader
switch a.CompressionType {
case compression.Zstandard:
archive, err := os.Create(a.Path + ".uncompressed")
if err != nil {
return []string{}, err
}
defer os.RemoveAll(a.Path + ".uncompressed")
defer archive.Close()
original, err := os.Open(a.Path)
if err != nil {
return []string{}, errors.Wrap(err, "Cannot open "+a.Path)
}
defer original.Close()
bufferedReader := bufio.NewReader(original)
r, err := zstd.NewReader(bufferedReader)
if err != nil {
return []string{}, err
}
defer r.Close()
tr = tar.NewReader(r)
case compression.GZip:
// Create the uncompressed archive
archive, err := os.Create(a.Path + ".uncompressed")
if err != nil {
return []string{}, err
}
defer os.RemoveAll(a.Path + ".uncompressed")
defer archive.Close()
original, err := os.Open(a.Path)
if err != nil {
return []string{}, errors.Wrap(err, "Cannot open "+a.Path)
}
defer original.Close()
bufferedReader := bufio.NewReader(original)
r, err := gzip.NewReader(bufferedReader)
if err != nil {
return []string{}, err
}
defer r.Close()
tr = tar.NewReader(r)
// Defaults to tar only (covers when "none" is supplied)
default:
tarFile, err := os.Open(a.Path)
if err != nil {
return []string{}, errors.Wrap(err, "Could not open package archive")
}
defer tarFile.Close()
tr = tar.NewReader(tarFile)
}
var files []string
// untar each segment
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return []string{}, err
}
// determine proper file path info
finfo := hdr.FileInfo()
fileName := hdr.Name
if finfo.Mode().IsDir() {
continue
}
files = append(files, fileName)
// if a dir, create it, then go to next segment
}
return files, nil
}
type CopyJob struct {
Src, Dst string
Artifact string
}
func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
defer wg.Done()
for job := range s {
_, err := os.Lstat(job.Dst)
if err != nil {
Debug("Copying ", job.Src)
if err := helpers.DeepCopyFile(job.Src, job.Dst); err != nil {
Warning("Error copying", job, err)
}
}
}
}
func compileRegexes(regexes []string) []*regexp.Regexp {
var result []*regexp.Regexp
for _, i := range regexes {
r, e := regexp.Compile(i)
if e != nil {
Warning("Failed compiling regex:", e)
continue
}
result = append(result, r)
}
return result
}
type ArtifactNode struct {
Name string `json:"Name"`
Size int `json:"Size"`
}
type ArtifactDiffs struct {
Additions []ArtifactNode `json:"Adds"`
Deletions []ArtifactNode `json:"Dels"`
Changes []ArtifactNode `json:"Mods"`
}
type ArtifactLayer struct {
FromImage string `json:"Image1"`
ToImage string `json:"Image2"`
Diffs ArtifactDiffs `json:"Diff"`
}
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, excludes []string, t compression.Implementation) (*PackageArtifact, error) {
archive, err := LuetCfg.GetSystem().TempDir("archive")
if err != nil {
return nil, errors.Wrap(err, "Error met while creating tempdir for archive")
}
defer os.RemoveAll(archive) // clean up
if strings.HasSuffix(src, ".tar") {
rootfs, err := LuetCfg.GetSystem().TempDir("rootfs")
if err != nil {
return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(rootfs) // clean up
err = helpers.Untar(src, rootfs, keepPerms)
if err != nil {
return nil, errors.Wrap(err, "Error met while unpacking rootfs")
}
src = rootfs
}
toCopy := make(chan CopyJob)
var wg = new(sync.WaitGroup)
for i := 0; i < concurrency; i++ {
wg.Add(1)
go worker(i, wg, toCopy)
}
// Handle includes in spec. If specified they filter what gets in the package
if len(includes) > 0 && len(excludes) == 0 {
includeRegexp := compileRegexes(includes)
for _, l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
ADDS:
for _, a := range l.Diffs.Additions {
for _, i := range includeRegexp {
if i.MatchString(a.Name) {
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
continue ADDS
}
}
}
for _, a := range l.Diffs.Changes {
Debug("File ", a.Name, " changed")
}
for _, a := range l.Diffs.Deletions {
Debug("File ", a.Name, " deleted")
}
}
} else if len(includes) == 0 && len(excludes) != 0 {
excludeRegexp := compileRegexes(excludes)
for _, l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
ADD:
for _, a := range l.Diffs.Additions {
for _, i := range excludeRegexp {
if i.MatchString(a.Name) {
continue ADD
}
}
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
}
for _, a := range l.Diffs.Changes {
Debug("File ", a.Name, " changed")
}
for _, a := range l.Diffs.Deletions {
Debug("File ", a.Name, " deleted")
}
}
} else if len(includes) != 0 && len(excludes) != 0 {
includeRegexp := compileRegexes(includes)
excludeRegexp := compileRegexes(excludes)
for _, l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
EXCLUDES:
for _, a := range l.Diffs.Additions {
for _, i := range includeRegexp {
if i.MatchString(a.Name) {
for _, e := range excludeRegexp {
if e.MatchString(a.Name) {
continue EXCLUDES
}
}
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
continue EXCLUDES
}
}
}
for _, a := range l.Diffs.Changes {
Debug("File ", a.Name, " changed")
}
for _, a := range l.Diffs.Deletions {
Debug("File ", a.Name, " deleted")
}
}
} else {
// Otherwise just grab all
for _, l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
for _, a := range l.Diffs.Additions {
Debug("File ", a.Name, " added")
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
}
for _, a := range l.Diffs.Changes {
Debug("File ", a.Name, " changed")
}
for _, a := range l.Diffs.Deletions {
Debug("File ", a.Name, " deleted")
}
}
}
close(toCopy)
wg.Wait()
a := NewPackageArtifact(dst)
a.CompressionType = t
err = a.Compress(archive, concurrency)
if err != nil {
return nil, errors.Wrap(err, "Error met while creating package archive")
}
return a, nil
}

View File

@@ -1,268 +0,0 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact_test
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/mudler/luet/pkg/compiler"
. "github.com/mudler/luet/pkg/compiler/backend"
backend "github.com/mudler/luet/pkg/compiler/backend"
. "github.com/mudler/luet/pkg/compiler/types/artifact"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
. "github.com/mudler/luet/pkg/compiler"
helpers "github.com/mudler/luet/pkg/helpers"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Artifact", func() {
Context("Simple package build definition", func() {
It("Generates a verified delta", func() {
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../tests/fixtures/buildtree")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
cc := NewLuetCompiler(nil, generalRecipe.GetDatabase())
lspec, err := cc.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
Expect(err).ToNot(HaveOccurred())
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
Expect(lspec.Image).To(Equal("luet/base"))
Expect(lspec.Seed).To(Equal("alpine"))
tmpdir, err := ioutil.TempDir(os.TempDir(), "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpdir2, err := ioutil.TempDir(os.TempDir(), "tree2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir2) // clean up
unpacked, err := ioutil.TempDir(os.TempDir(), "unpacked")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(unpacked) // clean up
rootfs, err := ioutil.TempDir(os.TempDir(), "rootfs")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(rootfs) // clean up
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM alpine
COPY . /luetbuild
WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin`))
b := NewSimpleDockerBackend()
opts := backend.Options{
ImageName: "luet/base",
SourcePath: tmpdir,
DockerFileName: "Dockerfile",
Destination: filepath.Join(tmpdir2, "output1.tar"),
}
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM luet/base
COPY . /luetbuild
WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin
RUN echo foo > /test
RUN echo bar > /test2`))
opts2 := backend.Options{
ImageName: "test",
SourcePath: tmpdir,
DockerFileName: "LuetDockerfile",
Destination: filepath.Join(tmpdir, "output2.tar"),
}
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
diffs, err := compiler.GenerateChanges(b, opts, opts2)
Expect(err).ToNot(HaveOccurred())
artifacts := []ArtifactNode{{
Name: "/luetbuild/LuetDockerfile",
Size: 175,
}}
if os.Getenv("DOCKER_BUILDKIT") == "1" {
artifacts = append(artifacts, ArtifactNode{Name: "/etc/resolv.conf", Size: 0})
}
artifacts = append(artifacts, ArtifactNode{Name: "/test", Size: 4})
artifacts = append(artifacts, ArtifactNode{Name: "/test2", Size: 4})
Expect(diffs).To(Equal(
[]ArtifactLayer{{
FromImage: "luet/base",
ToImage: "test",
Diffs: ArtifactDiffs{
Additions: artifacts,
},
}}))
err = b.ExtractRootfs(backend.Options{ImageName: "test", Destination: rootfs}, false)
Expect(err).ToNot(HaveOccurred())
a, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, compression.None)
Expect(err).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue())
err = helpers.Untar(a.Path, unpacked, false)
Expect(err).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(unpacked, "test"))).To(BeTrue())
Expect(helpers.Exists(filepath.Join(unpacked, "test2"))).To(BeTrue())
content1, err := helpers.Read(filepath.Join(unpacked, "test"))
Expect(err).ToNot(HaveOccurred())
Expect(content1).To(Equal("foo\n"))
content2, err := helpers.Read(filepath.Join(unpacked, "test2"))
Expect(err).ToNot(HaveOccurred())
Expect(content2).To(Equal("bar\n"))
err = a.Hash()
Expect(err).ToNot(HaveOccurred())
err = a.Verify()
Expect(err).ToNot(HaveOccurred())
Expect(helpers.CopyFile(filepath.Join(tmpdir, "output2.tar"), filepath.Join(tmpdir, "package.tar"))).ToNot(HaveOccurred())
err = a.Verify()
Expect(err).To(HaveOccurred())
})
It("Generates packages images", func() {
b := NewSimpleDockerBackend()
imageprefix := "foo/"
testString := []byte(`funky test data`)
tmpdir, err := ioutil.TempDir(os.TempDir(), "artifact")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpWork, err := ioutil.TempDir(os.TempDir(), "artifact2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpWork) // clean up
Expect(os.MkdirAll(filepath.Join(tmpdir, "foo", "bar"), os.ModePerm)).ToNot(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(tmpdir, "test"), testString, 0644)
Expect(err).ToNot(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(tmpdir, "foo", "bar", "test"), testString, 0644)
Expect(err).ToNot(HaveOccurred())
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
err = a.Compress(tmpdir, 1)
Expect(err).ToNot(HaveOccurred())
resultingImage := imageprefix + "foo--1.0"
opts, err := a.GenerateFinalImage(resultingImage, b, false)
Expect(err).ToNot(HaveOccurred())
Expect(opts.ImageName).To(Equal(resultingImage))
Expect(b.ImageExists(resultingImage)).To(BeTrue())
result, err := ioutil.TempDir(os.TempDir(), "result")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(result) // clean up
err = b.ExtractRootfs(backend.Options{ImageName: resultingImage, Destination: result}, false)
Expect(err).ToNot(HaveOccurred())
content, err := ioutil.ReadFile(filepath.Join(result, "test"))
Expect(err).ToNot(HaveOccurred())
Expect(content).To(Equal(testString))
content, err = ioutil.ReadFile(filepath.Join(result, "foo", "bar", "test"))
Expect(err).ToNot(HaveOccurred())
Expect(content).To(Equal(testString))
})
It("Generates empty packages images", func() {
b := NewSimpleDockerBackend()
imageprefix := "foo/"
tmpdir, err := ioutil.TempDir(os.TempDir(), "artifact")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpWork, err := ioutil.TempDir(os.TempDir(), "artifact2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpWork) // clean up
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
err = a.Compress(tmpdir, 1)
Expect(err).ToNot(HaveOccurred())
resultingImage := imageprefix + "foo--1.0"
opts, err := a.GenerateFinalImage(resultingImage, b, false)
Expect(err).ToNot(HaveOccurred())
Expect(opts.ImageName).To(Equal(resultingImage))
Expect(b.ImageExists(resultingImage)).To(BeTrue())
result, err := ioutil.TempDir(os.TempDir(), "result")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(result) // clean up
err = b.ExtractRootfs(backend.Options{ImageName: resultingImage, Destination: result}, false)
Expect(err).ToNot(HaveOccurred())
Expect(helpers.DirectoryIsEmpty(result)).To(BeFalse())
content, err := ioutil.ReadFile(filepath.Join(result, ".virtual"))
Expect(err).ToNot(HaveOccurred())
Expect(string(content)).To(Equal(""))
})
It("Retrieves uncompressed name", func() {
a := NewPackageArtifact("foo.tar.gz")
a.CompressionType = (compression.GZip)
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
a = NewPackageArtifact("foo.tar.zst")
a.CompressionType = compression.Zstandard
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
a = NewPackageArtifact("foo.tar")
a.CompressionType = compression.None
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
})
})
})

View File

@@ -1,78 +0,0 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact
import (
//"strconv"
"crypto/sha256"
"fmt"
"hash"
"io"
"os"
// . "github.com/mudler/luet/pkg/logger"
"github.com/pkg/errors"
)
type HashImplementation string
const (
SHA256 HashImplementation = "sha256"
)
type Checksums map[string]string
type HashOptions struct {
Hasher hash.Hash
Type HashImplementation
}
// Generate generates all Checksums supported for the artifact
func (c *Checksums) Generate(a *PackageArtifact) error {
return c.generateSHA256(a)
}
func (c Checksums) Compare(d Checksums) error {
for t, sum := range d {
if v, ok := c[t]; ok && v != sum {
return errors.New("Checksum mismsatch")
}
}
return nil
}
func (c *Checksums) generateSHA256(a *PackageArtifact) error {
return c.generateSum(a, HashOptions{Hasher: sha256.New(), Type: SHA256})
}
func (c *Checksums) generateSum(a *PackageArtifact, opts HashOptions) error {
f, err := os.Open(a.Path)
if err != nil {
return err
}
defer f.Close()
if _, err := io.Copy(opts.Hasher, f); err != nil {
return err
}
sum := fmt.Sprintf("%x", opts.Hasher.Sum(nil))
(*c)[string(opts.Type)] = sum
return nil
}

View File

@@ -18,8 +18,8 @@ package options
import (
"runtime"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/pkg/solver"
)
@@ -33,7 +33,7 @@ type Compiler struct {
Wait bool
OnlyDeps bool
NoDeps bool
SolverOptions config.LuetSolverOptions
SolverOptions types.LuetSolverOptions
BuildValuesFile []string
BuildValues []map[string]interface{}
@@ -43,6 +43,17 @@ type Compiler struct {
BackendArgs []string
BackendType string
// TemplatesFolder. should default to tree/templates
TemplatesFolder []string
// Tells wether to push final container images after building
PushFinalImages bool
PushFinalImagesForce bool
// Image repository to push to
PushFinalImagesRepository string
Context *types.Context
}
func NewDefaultCompiler() *Compiler {
@@ -55,7 +66,7 @@ func NewDefaultCompiler() *Compiler {
Concurrency: runtime.NumCPU(),
OnlyDeps: false,
NoDeps: false,
SolverOptions: config.LuetSolverOptions{Options: solver.Options{Concurrency: 1, Type: solver.SingleCoreSimple}},
SolverOptions: types.LuetSolverOptions{Options: solver.Options{Concurrency: 1, Type: solver.SingleCoreSimple}},
}
}
@@ -80,6 +91,25 @@ func WithOptions(opt *Compiler) func(cfg *Compiler) error {
}
}
// WithFinalRepository Sets the final repository where to push
// images of built artifacts
func WithFinalRepository(r string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.PushFinalImagesRepository = r
return nil
}
}
func EnablePushFinalImages(cfg *Compiler) error {
cfg.PushFinalImages = true
return nil
}
func ForcePushFinalImages(cfg *Compiler) error {
cfg.PushFinalImagesForce = true
return nil
}
func WithBackendType(r string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.BackendType = r
@@ -87,6 +117,13 @@ func WithBackendType(r string) func(cfg *Compiler) error {
}
}
func WithTemplateFolder(r []string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.TemplatesFolder = r
return nil
}
}
func WithBuildValues(r []string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.BuildValuesFile = r
@@ -101,6 +138,8 @@ func WithPullRepositories(r []string) func(cfg *Compiler) error {
}
}
// WithPushRepository Sets the image reference where to push
// cache images
func WithPushRepository(r string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
if len(cfg.PullImageRepository) == 0 {
@@ -191,9 +230,16 @@ func WithCompressionType(t compression.Implementation) func(cfg *Compiler) error
}
}
func WithSolverOptions(c config.LuetSolverOptions) func(cfg *Compiler) error {
func WithSolverOptions(c types.LuetSolverOptions) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.SolverOptions = c
return nil
}
}
func WithContext(c *types.Context) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.Context = c
return nil
}
}

View File

@@ -16,15 +16,18 @@
package compilerspec
import (
"fmt"
"io/ioutil"
"path/filepath"
"github.com/mitchellh/hashstructure/v2"
options "github.com/mudler/luet/pkg/compiler/types/options"
"github.com/ghodss/yaml"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/otiai10/copy"
yaml "gopkg.in/yaml.v2"
dirhash "golang.org/x/mod/sumdb/dirhash"
)
type LuetCompilationspecs []LuetCompilationSpec
@@ -85,6 +88,13 @@ func (specs *LuetCompilationspecs) Unique() *LuetCompilationspecs {
return &newSpecs
}
type CopyField struct {
Package *pkg.DefaultPackage `json:"package"`
Image string `json:"image"`
Source string `json:"source"`
Destination string `json:"destination"`
}
type LuetCompilationSpec struct {
Steps []string `json:"steps"` // Are run inside a container and the result layer diff is saved
Env []string `json:"env"`
@@ -103,15 +113,74 @@ type LuetCompilationSpec struct {
Excludes []string `json:"excludes"`
BuildOptions *options.Compiler `json:"build_options"`
Copy []CopyField `json:"copy"`
RequiresFinalImages bool `json:"requires_final_images" yaml:"requires_final_images"`
}
// Signature is a portion of the spec that yields a signature for the hash
type Signature struct {
Image string
Steps []string
PackageDir string
Prelude []string
Seed string
Env []string
Retrieve []string
Unpack bool
Includes []string
Excludes []string
Copy []CopyField
Requires pkg.DefaultPackages
RequiresFinalImages bool
}
func (cs *LuetCompilationSpec) signature() Signature {
return Signature{
Image: cs.Image,
Steps: cs.Steps,
PackageDir: cs.PackageDir,
Prelude: cs.Prelude,
Seed: cs.Seed,
Env: cs.Env,
Retrieve: cs.Retrieve,
Unpack: cs.Unpack,
Includes: cs.Includes,
Excludes: cs.Excludes,
Copy: cs.Copy,
Requires: cs.Package.GetRequires(),
RequiresFinalImages: cs.RequiresFinalImages,
}
}
func NewLuetCompilationSpec(b []byte, p pkg.Package) (*LuetCompilationSpec, error) {
var spec LuetCompilationSpec
var packageDefinition pkg.DefaultPackage
err := yaml.Unmarshal(b, &spec)
if err != nil {
return &spec, err
}
spec.Package = p.(*pkg.DefaultPackage)
err = yaml.Unmarshal(b, &packageDefinition)
if err != nil {
return &spec, err
}
// Update requires/conflict/provides
// When we have been passed a bytes slice, parse it as a package
// and updates requires/conflicts/provides.
// This is required in order to allow manipulation of such fields with templating
copy := *p.(*pkg.DefaultPackage)
spec.Package = &copy
if len(packageDefinition.GetRequires()) != 0 {
spec.Package.Requires(packageDefinition.GetRequires())
}
if len(packageDefinition.GetConflicts()) != 0 {
spec.Package.Conflicts(packageDefinition.GetConflicts())
}
if len(packageDefinition.GetProvides()) != 0 {
spec.Package.SetProvides(packageDefinition.GetProvides())
}
return &spec, nil
}
func (cs *LuetCompilationSpec) GetSourceAssertion() solver.PackagesAssertions {
@@ -196,7 +265,7 @@ func (cs *LuetCompilationSpec) SetSeedImage(s string) {
}
func (cs *LuetCompilationSpec) EmptyPackage() bool {
return len(cs.BuildSteps()) == 0 && len(cs.GetPreBuildSteps()) == 0 && !cs.UnpackedPackage()
return len(cs.BuildSteps()) == 0 && !cs.UnpackedPackage()
}
func (cs *LuetCompilationSpec) UnpackedPackage() bool {
@@ -213,7 +282,21 @@ func (cs *LuetCompilationSpec) UnpackedPackage() bool {
// a compilation spec has an image source when it depends on other packages or have a source image
// explictly supplied
func (cs *LuetCompilationSpec) HasImageSource() bool {
return (cs.Package != nil && len(cs.GetPackage().GetRequires()) != 0) || cs.GetImage() != ""
return (cs.Package != nil && len(cs.GetPackage().GetRequires()) != 0) || cs.GetImage() != "" || (cs.RequiresFinalImages && len(cs.Package.GetRequires()) != 0)
}
func (cs *LuetCompilationSpec) Hash() (string, error) {
// build a signature, we want to be part of the hash only the fields that are relevant for build purposes
signature := cs.signature()
h, err := hashstructure.Hash(signature, hashstructure.FormatV2, nil)
if err != nil {
return "", err
}
sum, err := dirhash.HashDir(cs.Package.Path, "", dirhash.DefaultHash)
if err != nil {
return fmt.Sprint(h), err
}
return fmt.Sprint(h, sum), err
}
func (cs *LuetCompilationSpec) CopyRetrieves(dest string) error {
@@ -256,6 +339,13 @@ ADD ` + s + ` /luetbuild/`
}
}
for _, c := range cs.Copy {
if c.Image != "" {
copyLine := fmt.Sprintf("\nCOPY --from=%s %s %s\n", c.Image, c.Source, c.Destination)
spec = spec + copyLine
}
}
for _, s := range cs.Env {
spec = spec + `
ENV ` + s

Some files were not shown because too many files have changed in this diff Show More