Compare commits

...

888 Commits

Author SHA1 Message Date
Ettore Di Giacinto
01638567a7 Tag 0.21.0 2021-12-16 00:22:17 +01:00
Ettore Di Giacinto
fbe9b038dd 🔧 Consider removals when appending packages to be uninstalled 2021-12-15 21:11:21 +01:00
Ettore Di Giacinto
0a90129e34 🔧 Restore tree imglist hash output
Fixes #271
2021-12-15 18:38:47 +01:00
Ettore Di Giacinto
b05b00c615 🔧 🎨 Enhance package upgrade strategy order
Enhance package upgrade ordering during swap taking into accounts of files
shipped by packages.

This change also introduce a new method for clients to get the
underlying cache data, thus consuming it in installer to fix progressbar display
2021-12-15 18:04:45 +01:00
Ettore Di Giacinto
938d41fe9e 🔧 Allow to perform automatically oscheck after upgrades 2021-12-12 12:23:30 +01:00
Ettore Di Giacinto
163bd77d27 🔧 Emit post/pre upgrade events 2021-12-12 10:45:28 +01:00
Ettore Di Giacinto
309f5c0559 📒 update vendor/ 2021-12-07 18:26:35 +01:00
Ettore Di Giacinto
1f6d0cc66c 🆕 Update go-pluggable 2021-12-07 18:23:49 +01:00
Ettore Di Giacinto
07e37ea059 🔧 Add luet reinstall --installed
Fixes #273
2021-12-07 18:22:05 +01:00
Ettore Di Giacinto
432b1db116 🆕 Tag 0.20.13 2021-12-06 21:47:12 +01:00
Ettore Di Giacinto
8e16d3abd3 🔧 Use ImageID for generating dockerfile names
It is safer, and plays better with buildx
2021-12-06 21:46:15 +01:00
Ettore Di Giacinto
1f29fdd680 🔧 Add oscheck
Fixes #50
2021-12-05 23:22:56 +01:00
Ettore Di Giacinto
da85a7306f 🔧 Consistently use Tempdir in compiler 2021-12-04 21:48:43 +01:00
Ettore Di Giacinto
78307eef57 🔧 Add contextual logging accessors 2021-12-04 21:40:32 +01:00
Ettore Di Giacinto
e11521ddce 📒 Update CONTRIBUTING 2021-12-04 21:35:59 +01:00
Ettore Di Giacinto
1e6aca0ba1 🔧 CLI: add quiet mode 2021-12-04 21:35:34 +01:00
Ettore Di Giacinto
79e98af604 Handle error if we can't generate a compilation spec from a package 2021-11-27 21:12:14 +01:00
Ettore Di Giacinto
71d5b03382 Tag 0.20.12 2021-11-25 15:04:16 +01:00
Ettore Di Giacinto
a02ab16510 Don't load requires while parsing compilespec that consume final images
When depending on those package otherwise we try to compile the full
tree instead of reconstrucing the image which is result of a join while
keeping the revdep tree invariate
2021-11-25 14:18:15 +01:00
Ettore Di Giacinto
ba0551caab Tag 0.20.11 2021-11-22 12:11:39 +01:00
Ettore Di Giacinto
44e66cc729 Use tarball.LayerFromOpener
tarball.LayerFromReader slurps the whole src in memory. The payoff is
that we might read the file multiple time as internally it's called
multiple times.
2021-11-22 11:27:46 +01:00
Ettore Di Giacinto
80412e2e5d Add luet util pack 2021-11-18 15:33:18 +01:00
Ettore Di Giacinto
df2be8acfe Tag 0.20.10 2021-11-15 22:14:45 +01:00
Ettore Di Giacinto
a2d91a2aee fixup: sanitize metadata images name 2021-11-15 21:10:15 +01:00
Ettore Di Giacinto
bb88fe7e9c 🆕 Tag 0.20.9 2021-11-10 16:29:48 +01:00
Ettore Di Giacinto
702a9f17db Drop code which is called already by containerd
Drop also direct xattrs handling
2021-11-10 16:28:22 +01:00
Ettore Di Giacinto
c58a462e79 🆕 Tag 0.20.8 2021-11-05 23:44:27 +01:00
Ettore Di Giacinto
1e78570c50 Allow to push final images while compiling
Add --push-final-images, --push-final-images-repository and
--push-final-images-force to luet build.

--push-final-images enables pushing final artifact during build. Each
package built will be packed and pushed to the final repository
specified with --push-final-images-repository. By default if no
final-repository is specified and pushing final images is enabled will
default to the cache repository.

--push-final-images-force allows to force-push final images even if
there are already available on the specified registry
2021-11-05 23:03:29 +01:00
Ettore Di Giacinto
0589bead99 Tag 0.20.7 2021-11-04 13:02:23 +01:00
Ettore Di Giacinto
fba420865a 🔧 Preserve suid,sgid and sticky bits when extracting images 2021-11-04 11:34:36 +01:00
Ettore Di Giacinto
9857bea5ff 🎨 Lazy progressbar start 2021-10-31 21:21:08 +01:00
Ettore Di Giacinto
100c313804 Tag 0.20.6 2021-10-31 12:21:15 +01:00
Ettore Di Giacinto
d43b8c4af0 Attach platform data when creating images from tars 2021-10-31 10:48:35 +01:00
Ettore Di Giacinto
384ae8e833 Tag 0.20.5 2021-10-29 11:34:28 +02:00
Ettore Di Giacinto
c7f9708f90 Add CreateTar to image API
Add api call which uses go-container registry to create OCI images from
standard tar archives.

Consume new API when generating final images instead of docker building them
and adapts/add tests as necessary.

This change now allows to carry over xattrs to final images.

Fixes #266
2021-10-29 10:35:03 +02:00
Itxaka
1b35a674ea Print plugin success messages + print plugin location on load (#267)
* report plugin state if succeed

We havbe a state field in the plugin response that its not being used
for anything. This patch makes luet print the state reported from the
plugin if its not empty as a way for plugins to report data on success
to users. If the field is empty it will be ignored.

Signed-off-by: Itxaka <igarcia@suse.com>

* Print plugin path

This patch adds the plugin location to the printed plugin list for a
more rich view of the loaded plugins

Signed-off-by: Itxaka <igarcia@suse.com>
2021-10-28 11:42:56 +02:00
Ettore Di Giacinto
5e8a9c75dc Tag 0.20.4 2021-10-27 12:05:03 +02:00
Ettore Di Giacinto
b5def989ac Drop unused code 2021-10-27 11:22:06 +02:00
Ettore Di Giacinto
fdb49ce70d cli: render table/lists only on terminal output 2021-10-27 11:17:38 +02:00
Ettore Di Giacinto
37cc186c0b delta: trim path when computing src files set
The path contains an ending "/" which wouldn't match when we walk dst as
it's not there.

That had the unpleasant effect of creating empty folders in the
destinations
2021-10-27 11:00:10 +02:00
Ettore Di Giacinto
f2f85a2384 ci: Add back -race 2021-10-26 18:05:34 +02:00
Ettore Di Giacinto
9c17432ee9 Tag 0.20.3 2021-10-26 17:34:03 +02:00
Ettore Di Giacinto
9799b7c94b Add Image reference by pipe, refactor 2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
5a7e97d0fb Update vendor 2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
262d09dfbc Lower message levels 2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
b974f44095 Add cache to avoid RAM consumption
When we have huge file lists we can burst too much into RAM which would
cause OOMs in certain devices. Use instead a smart cache which
automatically drops to disk when necessary.
2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
35fcd868ee Switch to ondisk also when unpacking FS
From benchmarks it seems to be still faster. Add a note for a future
improvement
2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
aea3cdff8d Use ondisk reference for deltas 2021-10-26 16:56:49 +02:00
Ettore Di Giacinto
daa9eb98d2 Walk destination only once when computing delta
Avoid the double pass by constructing the list on the fly
2021-10-26 16:56:49 +02:00
Itxaka
1f0324c452 Log debug before failing (#263)
If a plugin failed, we were skipping the debug info which is kind of
useful :=)

Signed-off-by: Itxaka <igarcia@suse.com>
2021-10-26 11:18:56 +02:00
Ettore Di Giacinto
e705c471eb Tag 0.20.2 2021-10-26 00:30:12 +02:00
Itxaka
7cd455fff4 Set proper error message on plugin failure
Currently we are setting the error message in a no-space full sentence
which is pretty ugly:

| FATA[0000] Pluginluet-cosignat/usr/local/bin/luet-cosignErrorerror while executing plugin: exit status 1

Signed-off-by: Itxaka <igarcia@suse.com>
2021-10-26 00:28:30 +02:00
Ettore Di Giacinto
144c409908 Disable buffer on docker remote
This causes to load otherwise the full tarball into memory
2021-10-25 23:57:09 +02:00
Ettore Di Giacinto
f6bb7a9405 Make sure to pull images before generating artifacts
Fixes #262
2021-10-25 23:56:38 +02:00
Ettore Di Giacinto
9d3af649f1 Tag 0.20.1 2021-10-24 22:31:20 +02:00
Ettore Di Giacinto
1b1ab6225c Use table lookup for checking addition files 2021-10-24 21:55:42 +02:00
Ettore Di Giacinto
bdcf26401c Prepare for tagging 0.20.0 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
21247331e0 Update README 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
b77b71f6cd cmd: Create output build dir if doesn't exist already 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
bb40b5d1b7 update vendor 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
c220eac061 Move bus to api/core 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
67a07e7c5a Drop link to moby fork 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
c897bffdfc Drop untar 2021-10-24 19:07:41 +02:00
Ettore Di Giacinto
52ad2b5cfa Fixup config protect 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
6ff22d923c Make default build dir over context temp 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
37a9a3ef55 use containerd to uncompress 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
4a45b5410d Introduce lock for installation
It is used to ensure integrity and that we do install one package at
once. This is to ensure that we extract correctly, and that we are not
too much I/O intensive depending on CPU
2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
6b7e77df65 test: drop --race 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
819271b9bd Fixup tests 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
063f704057 update vendor 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
ebbb3aad27 Use API also when pulling from helpers used in client 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
ad489c2157 tests: pull image before running 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
454a560f4c Take count of os separator in extraction 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
a0e7e9ba08 ci: split integration tests 2021-10-24 18:26:30 +02:00
Ettore Di Giacinto
acd685b927 Extract with new image API 2021-10-24 18:26:25 +02:00
Ettore Di Giacinto
ab251fefce update vendor 2021-10-24 18:26:25 +02:00
Ettore Di Giacinto
6a9f19941a Add crane-based methods for extraction
- create a new api package to encapsulate image manipulation
- use new api method to calculate delta

Fixes #258
Fixes #204
Fixes #90
2021-10-24 18:26:08 +02:00
Ettore Di Giacinto
d44befe9ff tests: add context unit tests 2021-10-23 15:26:45 +02:00
Ettore Di Giacinto
73c6cff15b Tag 0.19.2 2021-10-23 12:40:55 +02:00
Ettore Di Giacinto
65892f9bfc ux: Display only success on green 2021-10-23 12:40:55 +02:00
Ettore Di Giacinto
315bfb5a54 Move http timeout to the general configuration
Fixes https://github.com/mudler/luet/issues/250 as now it is documented
in the cli --help too.
2021-10-23 11:41:15 +02:00
Ettore Di Giacinto
57c8236184 fixup: cache miss with docker client 2021-10-23 01:50:01 +02:00
Ettore Di Giacinto
4d60795fdc Tag 0.19.1 2021-10-22 18:44:42 +02:00
Ettore Di Giacinto
6b45b1d61c ux: rework displaying of success messages 2021-10-22 17:55:38 +02:00
Ettore Di Giacinto
5eb5a42bf7 Generate snapshot and push it along
Fixes #260
Fixes #223
2021-10-22 17:55:08 +02:00
Ettore Di Giacinto
0bacdc75f2 Allow clients to pick a specific referenceID
Related to #260
2021-10-22 12:39:36 +02:00
Ettore Di Giacinto
917d0935ad Show progressbar only if terminal is big enough
Fixes #259

The bug is caused by
4c725e56bf/progressbar_printer.go (L190)
which is not taking into account when barCurrentLength is <=0.

As a workaround display it only if the terminal width is big enough.

Now the context drives this kind of runtime state, so we keep everything
tight and we inject only that into the workers
2021-10-22 12:39:07 +02:00
Ettore Di Giacinto
50dfc47bee Tag 0.19.0 2021-10-22 09:25:12 +02:00
Ettore Di Giacinto
8d34a6ebb1 Use the cache to store temporary download files 2021-10-21 23:58:45 +02:00
Ettore Di Giacinto
d58a563d52 uninstall: limit cleanup to s.Target
This was specific to when s.Target differs from /. The helpers creating
the path were considering the full traversal, instead they should stop
at target.

E.g. our rootfs is in /foo/bar, and while we build the list of paths to
prune for /foo/bar/baz/bar, we should stop looking at /foo/bar.
2021-10-21 23:58:45 +02:00
Ettore Di Giacinto
7b56e915fa cleanup now should take into account of folders 2021-10-21 23:58:45 +02:00
Ettore Di Giacinto
a1c669d3ae Define context for scoped operation across core types
It holds necessary state plus additional information relative to the
context which we are being run to (e.g. if we are in a terminal or not).
Besides in the future we can use it also as a contextual logger to
provide more smart logging capabilities.

This also replace the general global configuration instance that
previously was share between the core components.
2021-10-21 23:58:24 +02:00
Ettore Di Giacinto
b9895c9e05 update vendor 2021-10-21 23:58:19 +02:00
Ettore Di Giacinto
fe14d56afe Massive UX rewrite
- Ditch multiple libraries for progressbar, spinner, colors and replace
  with pterm
- Detect when running on terminal and disable automatically spinner
- Add support for multiple progress bars
- Huge rewrite of the configuration part. No more crazy stuff with viper
  CLI commands now correctly overrides default config file as expected
- Limit banner to be displayed on relevant parts

Fixes #211
Fixes #105
Fixes #247
Fixes #233
2021-10-21 23:58:00 +02:00
Ettore Di Giacinto
d4edaa9de8 Adapt unit tests 2021-10-19 21:25:01 +02:00
Ettore Di Giacinto
4700d27f0e Adapt integration tests 2021-10-19 21:25:01 +02:00
Ettore Di Giacinto
a6b6909dc4 Update vendor 2021-10-19 21:25:01 +02:00
Ettore Di Giacinto
5b4e930fc3 Do implement a real cache
Instead of merely storing files into disk use a real cache.

This also makes possible finally to reference artifacts in the cache with the
package checksum, which solves the cache hit checksum failures we had
previously.
2021-10-19 21:25:01 +02:00
Ettore Di Giacinto
2eeb464946 Make repositories arch-aware
Introduce an arch field that can be used to filter repositories based on
the local architecture.

If arch is provided and matching with the current GOARCH then the
repository is enabled
2021-10-19 14:19:53 +02:00
Ettore Di Giacinto
b00c2ff3cc ci: unit tests now need the luet executable 2021-10-15 21:30:40 +02:00
Ettore Di Giacinto
e764b1cd29 Add CLI helpers and BuildTree heuristic
The BuildTree implementation was in parallel-build-tools, which actually makes
sense to share in the compiler as can be used as another way to trigger
builds.

The CLI helpers were split in several CIs
implementations. This moves a new layout where we expose several
packages, as we will refactor things slowly.
2021-10-15 21:14:48 +02:00
Ettore Di Giacinto
619c9aeda0 update vendor 2021-10-14 22:06:48 +02:00
Ettore Di Giacinto
6ea05e59ea Drop imgworker client
Cleanup code which allows to extract images in a privileged way. the
containerd way supersedes it.
2021-10-14 22:06:01 +02:00
Ettore Di Giacinto
9c19a7ec35 ci: trigger image pipeline also when tagging 2021-10-14 15:36:39 +02:00
Ettore Di Giacinto
70866c3281 ci: build multi-arch images 2021-10-14 15:28:25 +02:00
Ettore Di Giacinto
c536aaa53c Tag 0.18.1 2021-10-10 21:27:35 +02:00
Ettore Di Giacinto
d5819bb4cb Fixup benchmark tests 2021-10-10 19:07:46 +02:00
Ettore Di Giacinto
6ba028f0ea Make sure we do compute the best fit
While install calls upgrade which in turns calls a relaxed install on
its results, this doesn't make sure that the new results are at the best
available version. We do iterate here over the results to compute the
best set.

It also expands computeUpgrade with the possibility to selectively
choose which packages to upgrade and which not.
2021-10-10 19:04:55 +02:00
Ettore Di Giacinto
780b7aa610 Tag 0.18.0 2021-10-10 00:29:13 +02:00
Ettore Di Giacinto
9de6b22764 Make singlecore default solver 2021-10-10 00:29:13 +02:00
Ettore Di Giacinto
097e310d3a Drop join from compilation specs
Adapt also hash and integration tests
2021-10-10 00:28:54 +02:00
Ettore Di Giacinto
5a63bbd0d2 Deprecate parallel solver 2021-10-10 00:28:54 +02:00
Ettore Di Giacinto
e64f68d36b Introduce install --relaxed
It introduces a relaxed way to install packages with loose deps. Default
installation now will by default prefer up-to-date packages during
selection.

Also:
- Upgrade now it's used in install so it have to return the full system view also when there is nothing to upgrade
- Avoid checking upgrade upfront if relaxed is on
2021-10-10 00:27:58 +02:00
Daniele Rondina
77b4c9a972 Update vendor Sabayon/pkgs-checker@v0.8.4 (#255)
* Update vendor Sabayon/pkgs-checker@v0.8.4

* Drop golang.org/x/text

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2021-10-06 13:41:14 +02:00
Ettore Di Giacinto
585b72c3d0 Tag 0.17.13 2021-10-06 12:20:05 +02:00
Ettore Di Giacinto
f7aa6c3428 fix: always append templates from tree path
Otherwise shared templates from trees are not working when
--from-repositories is enabled.
2021-10-06 11:08:53 +02:00
Ettore Di Giacinto
2970d8e52e Tag 0.17.12 2021-09-22 09:46:17 +02:00
Ettore Di Giacinto
ff46bc7641 Don't use semver library for ordering by default
Use debian to parse versions and selections.
This covers ordering of versions like -0,-1 in sequence plus many others
not available with semver. It is backward compatible as we do support
the same featureset as before as tests are passing (adding more actually
to cover interesting cases)
2021-09-21 14:16:39 +02:00
Ettore Di Giacinto
e3063985b2 Tag bugfix release 0.17.11 2021-09-17 15:54:35 +02:00
Ettore Di Giacinto
a348fd4835 Replace Yaml with YAML in function, add debug output when extracting runtime data 2021-09-17 14:46:32 +02:00
Ettore Di Giacinto
fc45eae80a create-repo: annotate runtime definition in artifacts 2021-09-17 14:30:06 +02:00
Ettore Di Giacinto
b73ac21004 create-repo: don't inherit build requirements in runtime 2021-09-17 12:06:39 +02:00
Ettore Di Giacinto
bdd51fa221 Tag: Bump to 0.17.10 2021-09-16 14:46:30 +02:00
Ettore Di Giacinto
4039050449 ci: run in same concurrent group due to registry tests 2021-09-16 13:05:28 +02:00
Ettore Di Giacinto
14914f3c8e Update tests for including packages in tree from metadata
Also switches GenerateRepository to functional interface to allow
more parametrization
2021-09-16 12:59:54 +02:00
Ettore Di Giacinto
e4fff77d43 Source packages metadata to update repository tree
This allows to generate trees with `create-repo` by just having
the metadata files, making tree(s) an optional requirement.
2021-09-16 10:56:09 +02:00
Ettore Di Giacinto
972421ae81 Tag 0.17.9 2021-09-06 15:46:58 +02:00
Ettore Di Giacinto
0bd373be2b Merge pull request #252 from Itxaka/consider_provides_names_on_install
installer: Take into consideration provides names
2021-09-06 15:45:25 +02:00
Itxaka
aba89db204 installer: Take into consideration provides names
When installing a package that has a provides values, the installer only
checks the original package against the resolved package. This leads to
the requires keyword not working as expected as the solved final package
is never gonna match the name of the old/superseeded package.

This patch checks the asked-to-install package name against the list of
names that the matched name provides.

Signed-off-by: Itxaka <igarcia@suse.com>
2021-09-06 14:58:50 +02:00
Itxaka
178690842f Show the package name on uninstall error (#251)
* Show the package name on uninstall error

Currently there is a generic message of error when uninstalling if one
of the provided packages is not found in the system. This is quite
obnoxious if you are providing luet a big list to uninstall as you dont
know which package is not installed and whats preventing you from
running the uninstall command.

This pathc is a very simple change that prints the packag name along
with the error to provide more info to the end user

Signed-off-by: Itxaka <igarcia@suse.com>

* Update pkg/installer/installer.go

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>

Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2021-08-26 14:45:19 +02:00
Ettore Di Giacinto
58f4997a0f Makefile: multiarch-build should only build 2021-08-19 16:41:27 +02:00
Ettore Di Giacinto
5bb65e5b30 Get goreleaser from GH actions 2021-08-19 16:22:20 +02:00
Ettore Di Giacinto
4e918e6bd1 ci: fixup getting goreleaser before build 2021-08-19 11:53:59 +02:00
Daniele Rondina
0f545952cd cmd/config: simplify code (#246) 2021-08-11 16:26:34 +02:00
Ettore Di Giacinto
3402641241 Tag 0.17.8 2021-08-11 14:33:30 +02:00
Ettore Di Giacinto
b81d33f182 Fixup luet tree pkglist/images while having shared templates 2021-08-11 12:52:16 +02:00
Daniele Rondina
0cc8930708 Finalizer envs (#242)
* Allow to define envs for finalizer

Fixes: #241

* tests: Add integration test for finalizer with envs
2021-08-11 11:18:16 +02:00
Ettore Di Giacinto
db784597d7 Move unpack where it belongs
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-11 10:36:41 +02:00
Ettore Di Giacinto
35eb63a31c Create dest-dir if doesn't exist while unpacking without snapshotter
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-11 10:36:41 +02:00
Ettore Di Giacinto
16bb93e165 Don't check file conflicts while running installops
We already check at beginning during install/upgrade. Also, better
to not fail while upgrade has already started for a check that we already did.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-11 10:36:41 +02:00
Itxaka
220f8700ce Update vendor and go.mod (#245)
Due to https://github.com/mudler/luet/pull/244 being rebased on top of
master instead of develop there is a mismatch in the dependencies.

This should fix it.

Signed-off-by: Itxaka <igarcia@suse.com>
2021-08-11 10:35:48 +02:00
Daniele Rondina
540e8151ad compiler: Speedup & troubleshooting debug messages (#240)
* compiler: Speedup stripFromRootfs and add debug log

* compiler: on stripFromRootfs show files removed with debug
2021-08-11 10:06:52 +02:00
Itxaka
4adc0dc9b9 Use goreleaser to build and release (#244)
Instead of using gox on one side and an action to release, we can merge
them together with goreleaser which will build for extra targets (arm,
mips if needed in the future) and it also takes care of creating
checksums, a source archive, and a changelog and creating a release with
all the artifacts.

All binaries should respect the old naming convention, so any scripts
out there should still work.

Signed-off-by: Itxaka <igarcia@suse.com>
2021-08-11 08:30:55 +02:00
Ettore Di Giacinto
0a4fe57f33 Tag 0.17.7 2021-08-09 16:18:51 +02:00
Ettore Di Giacinto
ccf83b0d5f Avoid deadlocking by FindPackages calls 2021-08-09 15:13:53 +02:00
Ettore Di Giacinto
57cefc7d6b Fixup data race when updating revdeps 2021-08-09 14:09:10 +02:00
Ettore Di Giacinto
97ff647f07 Turn tmpdir in abs path if required
Fixes #239
2021-08-09 13:17:16 +02:00
Ettore Di Giacinto
b7ac1e03d5 Add test for file conflicts on upgrade of the same package
This allows to be certain that we don't raise a conflict error while
upgrading the same package
2021-08-09 13:02:08 +02:00
Ettore Di Giacinto
ff092db97d Increase http timeout to 120s by default 2021-08-09 12:58:01 +02:00
Ettore Di Giacinto
2789f59f53 Add --values flag also to luet tree images 2021-08-09 12:57:36 +02:00
Ettore Di Giacinto
10ae872a3e Simplify revdep code
Isolate common code into a function and also fix a sublte bug hiding in
that code.

We need to stab a copy of the package inside our PackageMap to avoid
having symlinks pointing at the same values when iterating over
requires. ( e.g. happened in this case:
https://github.com/rancher-sandbox/cOS-toolkit/pull/467#issuecomment-895060115
)
2021-08-09 12:47:27 +02:00
Ettore Di Giacinto
65a55e242e Tag 0.17.6 2021-08-07 18:04:48 +02:00
Ettore Di Giacinto
77c4bf1fd1 update vendor 2021-08-07 14:46:05 +02:00
Ettore Di Giacinto
4d6cccb2fa Give explaination when formulas are unsat
Fixes #168

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-07 14:46:05 +02:00
Ettore Di Giacinto
ec7be63418 Tag 0.17.5 2021-08-05 09:02:35 +02:00
Ettore Di Giacinto
33b1c63815 Allow to have shared templates across packages
This changeset allows to have shared templates in a static folder
"templates" present in each luet tree. If the directory is present, it
gets scanned and templated accordingly on top of each package. This
allows to use such folder to store custom blocks to share between
packages.

This is still experimental and subject to change, this is just a first
pass version to provide the feature. It needs to be refined still as it
would be more elegant to use the helm engine properly and map our
structure to the engine instead of adapting it roughly.

Fixes #224
2021-08-04 16:35:28 +02:00
Ettore Di Giacinto
86bd6c5fc0 Tag 0.17.4 2021-08-04 11:55:23 +02:00
Ettore Di Giacinto
658612fcf3 Check file conflicts also on packages that are going to be swapped out
Fixes #237
2021-08-04 10:43:12 +02:00
Ettore Di Giacinto
7128c88da6 Tag 0.17.3 2021-08-03 18:15:40 +02:00
Ettore Di Giacinto
74402fae81 Re-organize common CLI code
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-03 16:48:34 +02:00
Ettore Di Giacinto
9d1594c036 Check if the system-target path supplied is absolute
docker.Untar
(https://github.com/mudler/luet/blob/master/vendor/github.com/docker/docker/pkg/archive/archive.go#L942) requires absolute paths.
We didn't do any input validation before, assuming the path passed by
were absolute since they were coming from YAML configuration files, now
that this is not the truth anymore we need to sanitize the input.

With this change we check if the given path is absolute or relative, if
it's relative we calculate the absolute path and use it in place.
2021-08-03 16:22:59 +02:00
Ettore Di Giacinto
75906c4198 Add missing check for error 2021-08-03 13:36:38 +02:00
Ettore Di Giacinto
cb032dc714 Allow to manipulate requires/conflicts/provides
This allows to manipulate requires, conflicts and provides with
templating in build time.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-03 13:27:44 +02:00
Ettore Di Giacinto
2c7e495fa1 Add http timeout to grab
grab.NewClient() doesn't set a specific timeout
https://github.com/cavaliercoder/grab/blob/v2.0.0/client.go#L37 even if
the project advertize "default sane settings".

We default to 30, and allow to set it up with HTTP_TIMEOUT

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-08-03 11:57:44 +02:00
Ettore Di Giacinto
db8bf2b85e Tag 0.17.2 2021-07-30 13:33:29 +02:00
Ettore Di Giacinto
5eb586ddb0 Speedup conflict check by caching in memory files 2021-07-30 12:40:55 +02:00
Ettore Di Giacinto
f9747cdf87 Tag 0.17.1 2021-07-29 15:07:32 +02:00
Ettore Di Giacinto
becac7d853 Add tests 2021-07-29 11:03:02 +02:00
Ettore Di Giacinto
5aa5bffb48 Add reinstall command
Fixes #51
2021-07-29 11:03:02 +02:00
Ettore Di Giacinto
9aa3159787 Check for file conflicts before install
Fixes #88
2021-07-29 11:03:02 +02:00
Ettore Di Giacinto
9cb6e65bb6 Don't display banner on cleanup 2021-07-29 11:03:02 +02:00
Ettore Di Giacinto
92b243d7aa Add git-chglog template and config 2021-07-12 10:26:22 +02:00
Ettore Di Giacinto
29ec19a8a1 Tag 0.17.0 2021-07-11 11:55:35 +02:00
Ettore Di Giacinto
440e07c418 Display version banner only on specific commands
Avoid cumbersome text on areas that can be parsed by machines

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-11 11:00:59 +02:00
Ettore Di Giacinto
acf74f5896 config_protect: don't protect files during uninstall by default
Otherwise during uninstall we would retain the files which are
protected. We introduced a specific flag for it to pass during
uninstall, but for now we choose semplicity and expected default first.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-11 10:40:36 +02:00
Ettore Di Giacinto
1ee1894ffa ci: split integration/unit tests 2021-07-09 17:15:17 +02:00
Ettore Di Giacinto
c8573f9535 Adapt hash tests
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 17:15:17 +02:00
Ettore Di Giacinto
76b70ebeb4 Add dirhash to package signatures
While this breaks current hashing, it ties also the spec content to the
hash, in this way if we change something in the spec folder, it breaks
the hashing for the package.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 12:37:38 +02:00
Ettore Di Giacinto
2efb17a06c Add specific field to resolve final images
This commit starts deprecation of `join` keyword in favor of
`requires_final_images` as boolean in the compilation spec.

The change is driven by two reasons: syntax and guarantee unique hashes.

- the hashtree when computing a hash it analizes the requires field of
  each spec, ignoring the join field
- the join field doesn't add much value. Having it separate suggests
  that a spec can contain both `requires` and `join`, but that's not
  actually true. We just act differently on the same list.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 12:37:38 +02:00
Ettore Di Giacinto
64ab3711ca Display version and license on each run
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 12:25:55 +02:00
Ettore Di Giacinto
eb5d7ba35b Merge branch 'master' into develop 2021-07-09 12:25:04 +02:00
Daniele Rondina
b6b91cfd7a compiler: Avoid generation of delta if there are only prelude steps (#232) 2021-07-09 12:23:32 +02:00
Daniele Rondina
4d8a9a544b cmd/database/create: Avoid creation of the same package multiple times (#222) 2021-06-25 13:09:32 +02:00
Ettore Di Giacinto
654b5b48cd Tag 0.16.7 2021-06-17 07:55:01 +02:00
Ettore Di Giacinto
92e18d5782 Support priv/unpriv image extraction
Optionally add back privileged extraction which can be enabled with
LUET_PRIVILEGED_EXTRACT=true

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-06-16 23:30:28 +02:00
Ettore Di Giacinto
8780e4f16f Unpack using our moby fork
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-06-16 19:16:02 +02:00
Ettore Di Giacinto
a7b4ae67c9 Tag 0.16.6 2021-06-04 16:23:51 +02:00
Ettore Di Giacinto
68edfd58e7 Keep emitting plugin events 2021-06-04 12:08:57 +02:00
Ettore Di Giacinto
0658020c60 Update go.mod and vendor 2021-06-04 11:15:10 +02:00
Ettore Di Giacinto
c3b552103f Return a specific struct for the image
And drop imgworker
2021-06-04 11:09:41 +02:00
Darren Shepherd
796967cc9d Allow luet install to work inside a non-privileged container
This switches from using the containerd snapshotter to go-containerregistry
library which requires no additional privileges beyond root file system
access.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-06-04 10:29:57 +02:00
Ettore Di Giacinto
5cccc34f32 Tag 0.16.5 2021-06-01 22:54:10 +02:00
Ettore Di Giacinto
5ef1d04055 Create CONTRIBUTING.md 2021-06-01 17:13:23 +02:00
Itxaka
1bd4d520a4 Add 2 new events for image unpacking (#226)
* Reduce possibility of circular dependency

Just by adding an import for bus to anything in the helper dir, we would
run into a circular dependency due to how things are structured. That
means that we cannot set any events for unpacking or docker helper
pulling an image.

This commit tries to work around this by doing several things.
 - Remove full imports of the helper module by segmentating some modules
   into their own submodule, like docker or match so just using a small match
   function doesnt bring the whole module
 - Removing a simple function to check if a dir exists from importing
   the full helper module and instead write the function (5 lines)
 - Using logrus in the bus module instead of logger, which avoids a
   circular dependency

Signed-off-by: Itxaka <igarcia@suse.com>

* Add two new events for unpacking an image

Both pre and post unpacking an image

Signed-off-by: Itxaka <igarcia@suse.com>
2021-06-01 16:43:31 +02:00
Daniele Rondina
b12c7678d4 get_luet_root.sh: Fix installation on alpine/busybox env (#218)
Co-authored-by: Ettore Di Giacinto <mudler@users.noreply.github.com>
2021-05-27 17:17:49 +02:00
Ettore Di Giacinto
32a99a4a49 Update readme with badges and a small logo 2021-05-26 17:57:41 +02:00
Ettore Di Giacinto
56e9c6f82e Tag 0.16.4 2021-05-25 14:55:55 +02:00
Ettore Di Giacinto
92ea69a2b9 Don't always generate artifact
Distinguish rather if we should generate artifact of the deps of a
package or not, and ensure that we just force artifact generation for
targets when computing the join image.

Fixes #217
2021-05-25 13:28:14 +02:00
Ettore Di Giacinto
838899aa83 Respect create-repo --force-push with metadata files
Don't force push package metadata with docker repository.

We were pushing regardless .metadata.yaml files even if packages were
not, also when calling create-repo without --force-push.
2021-05-25 12:13:23 +02:00
Ettore Di Giacinto
76695b2fc8 docker: check during extraction if image exists and pull if needed 2021-05-25 11:04:06 +02:00
Ettore Di Giacinto
5c84e5b0a7 Tag 0.16.3 2021-05-24 22:23:57 +02:00
Ettore Di Giacinto
06fa8b1c87 fixup: jointag index 2021-05-24 22:13:27 +02:00
Ettore Di Giacinto
ff153f367f fixup: enhance output of copy tag when cycling over packages 2021-05-24 21:35:22 +02:00
Ettore Di Giacinto
459676397c Enhance multi-stage output 2021-05-24 19:40:39 +02:00
Ettore Di Giacinto
93057fbf6d Respect PackageTargetOnly during multi-stage copy image generation 2021-05-24 19:40:17 +02:00
Ettore Di Giacinto
5e1a7c50df fixup: display hash of join image 2021-05-23 18:22:14 +02:00
Ettore Di Giacinto
0ceaf09615 fixup: propagate outputpath 2021-05-23 17:36:09 +02:00
Ettore Di Giacinto
0dc78ebe41 Tag 0.16.2 2021-05-22 14:49:58 +02:00
Ettore Di Giacinto
27c2e3c51f fixup: make sure the outputdir exists 2021-05-21 22:25:18 +02:00
Ettore Di Giacinto
e83f600ed3 Tag 0.16.1 2021-05-21 21:28:33 +02:00
Ettore Di Giacinto
6344e47eb3 fixup: resolve multistage and join also when building deps 2021-05-21 19:47:23 +02:00
Ettore Di Giacinto
8b1c5558b2 Tag 0.16.0 2021-05-21 16:10:28 +02:00
Ettore Di Giacinto
c277ac0f94 Add join keyword to generate parent image from final artifacts
A new keyword `join` is introduced to generate the parent image. It
takes precedence over a `requires` or a `image` already defined in a
spec.

It will generate all the artifacts from the packages listed and join
them in a single image which will be used as parent for the package
build process.

This is a change which invalidates priorly generated hashes.

Fixes #173
2021-05-21 14:52:48 +02:00
Ettore Di Giacinto
d8c8c2194f Tag 0.15.0 2021-05-20 13:53:53 +02:00
Ettore Di Giacinto
4494385f5b Fixup test rewording match 2021-05-20 11:17:39 +02:00
Ettore Di Giacinto
85a7968ecc Check if we have the image locally and extract directly it 2021-05-20 10:29:41 +02:00
Ettore Di Giacinto
1ba987b0f1 cmd/tree: consume Hashtree to display image hashes 2021-05-19 17:43:14 +02:00
Ettore Di Giacinto
c72b5be364 Update vendor 2021-05-19 17:34:54 +02:00
Ettore Di Giacinto
1ef18ed2c5 Add salted method for assertion hashing
- Add the spec Hash as salt for image hashes
- Add tests and adapt existing ones
- Use a signature to build a spec hash

Fixes: #207
2021-05-19 17:34:01 +02:00
Ettore Di Giacinto
4b1b711a5c Add single suite tests for spec and artifact
They went missing when the refactoring of the compiler occurred
2021-05-19 17:34:01 +02:00
Ettore Di Giacinto
7f047e4fc2 Multi-stage builds from hash images
Implement multi-stage copy from images part of the build cache of a
package.

Note, this is not the final images where are we copying files from, but
the underlying build container.

Skipping the test on img backend because it fails when pulling external
images during multi-stage build...

Fixes #190
2021-05-18 12:16:35 +02:00
Ettore Di Giacinto
356350f724 Tag 0.14.7 2021-05-17 19:11:44 +02:00
Ettore Di Giacinto
9d2ee1b760 Add hashtree and extract hash logic from compiler
Add unit tests, consume imagehashtree in compiler and cleanup

Fixes: #203
2021-05-17 15:22:08 +02:00
Ettore Di Giacinto
fd12227d53 plugins: Make execution fail if loaded plugins are erroring, print debug output on emitted responses
Now plugins failing to answer make execution fail, adapt tests
2021-05-17 15:17:03 +02:00
Ettore Di Giacinto
1e617b0c67 Add copy field
Initial implementation that just works with standard docker image
references

Related: #190
2021-05-17 15:16:15 +02:00
Ettore Di Giacinto
77b49d9c4a Tag 0.14.6 patchfix 2021-05-16 23:32:54 +02:00
Ettore Di Giacinto
4c3532e3c6 fixup: unchecked err causes invalid read in certain cases 2021-05-16 23:12:54 +02:00
Ettore Di Giacinto
f2ec065a89 Tag 0.14.5 2021-05-16 21:12:32 +02:00
Ettore Di Giacinto
7193ea03f9 Use hasher to work out also big files 2021-05-16 17:21:19 +02:00
Ettore Di Giacinto
beeb0dcaaa Don't write protect file if it is same
Add checksum check to config protect files
Add test file

Fixes #214
2021-05-16 11:54:08 +02:00
Ettore Di Giacinto
0de3177ddd Tag 0.14.4 2021-05-14 09:48:45 +02:00
Ettore Di Giacinto
45c8dfa19f Update go-pluggable 2021-05-13 18:13:44 +02:00
Ettore Di Giacinto
186ac33156 Tag 0.14.3 2021-05-11 15:24:26 +02:00
Ettore Di Giacinto
bdc24b84a4 Update go-pluggable
- Update vendor and run go mod tidy
2021-05-11 14:02:15 +02:00
Ettore Di Giacinto
e5d6d21178 Tag 0.14.2 2021-05-07 22:11:45 +02:00
Ettore Di Giacinto
0379855592 Drop docker squash support
It can be easily implemented as a plugin

Fixes: #206
2021-05-04 11:17:10 +02:00
Ettore Di Giacinto
958b8c32e1 Add DeepCopyFile for copies with additional permission bits
It's not needed for most of all the copying we do, except when we
generate the deltas

See also #204
2021-05-04 11:14:16 +02:00
Ettore Di Giacinto
b0b95d1721 Skip perms test on img backend 2021-05-01 00:33:46 +02:00
Ettore Di Giacinto
f85891e362 Ensure we carry permissions from dirs while we create packages by delta 2021-05-01 00:14:01 +02:00
Ettore Di Giacinto
946524f90d Tag 0.14.1 2021-04-24 21:09:52 +02:00
Ettore Di Giacinto
2cbd97ff3a Respect replace options 2021-04-24 21:09:25 +02:00
Ettore Di Giacinto
a4d77f8f99 Fixup clean path uninstall
While uninstalling, we weren't checking if we left any empty dir behind.
Now we walk the full path to the file in the artifact, and check each
subdir if it's empty. If it is, we delete it as it is claimed by the
package
2021-04-24 19:21:15 +02:00
Ettore Di Giacinto
adcb459fd2 Inplace upgrades 2021-04-24 18:48:57 +02:00
Ettore Di Giacinto
55ae67be0f Pass by options to compute functions in install 2021-04-24 14:26:26 +02:00
Ettore Di Giacinto
848215eef0 Tag 0.14.0 2021-04-23 19:46:21 +02:00
Ettore Di Giacinto
7bfff97f57 Fixup race when updating compiler options
Instead of updating the compiler options that can be accessed by each
worker, update the compilespec BuildOptions directly
2021-04-23 12:02:41 +02:00
Ettore Di Giacinto
a73f5f9b65 Add more tests 2021-04-23 12:02:41 +02:00
Ettore Di Giacinto
0288eedbc3 Always resolve buildhash image, add --rebuild to build 2021-04-23 12:02:41 +02:00
Ettore Di Giacinto
b27237b7ff Allow to pull images from multiple-repo while generating all artifacts
before, without --only-target-package, we were forcing to build the
images locally. Now we extend it also to that use-case.

Also revisit how we pass by the builder image hash, so it's easier to
read.

This change also re-enables tagging and bulding builder images all the
times.
Previously, we regressed into not tagging build images coming out-of-the
tree, with the unpleasant side-effect of not be able to re-build the
same artifacts for those leaf packages.
2021-04-23 12:02:41 +02:00
Ettore Di Giacinto
c9aed37fa7 Fixup on values interpolation and metadata retrieve
- Fixup search path on metadata spec load. Previously we were reading
  the package being passed, and not the one resolved (it failed against
selectors)
- Do inherit first pushrepositories, so they take precedence over pull
- Add test cases to cover build values interpolation by remote
  repositories
- Enhance test cases to check image cache repository inheritance when
  --from-repositories is passed
- Fix race condition when inheriting buildspec options: Instead of consuming the compiler one, annotate the updates in the
package BuildOption spec which is passed by
- Update vendor
2021-04-23 12:02:19 +02:00
Ettore Di Giacinto
788b889d14 Enhance integration test to check invalid characters for docker images tags 2021-04-21 11:05:50 +02:00
Ettore Di Giacinto
ef92f23221 Inherit pullimages as pushimages while parsing compilespec metadata
Fixes #200
2021-04-19 17:18:37 +02:00
Ettore Di Giacinto
562fcc2421 Strip invalid chars from docker images also for metadata files
Fixes #199
2021-04-19 17:18:37 +02:00
Ettore Di Giacinto
54be45dcff Tag 0.13.1 2021-04-17 10:20:20 +02:00
Ettore Di Giacinto
413572a8e3 Adapt tests at new behavior change 2021-04-17 00:44:10 +02:00
Ettore Di Giacinto
ecc41ce370 Check if there are upgrades before attempting install 2021-04-16 23:38:10 +02:00
Ettore Di Giacinto
dd6501a642 Check if there are upgrades before attempting install 2021-04-16 22:28:30 +02:00
Ettore Di Giacinto
a7b355ed2f Tag 0.13.0 2021-04-16 21:33:32 +02:00
Ettore Di Giacinto
9f3e7fd0b2 PackageIndex is in repository metadata file only
This was a regression in those changeset, don't replicate the info, and
at the same time, write the artifact path containing the name of the
file only (excluding the path)
2021-04-16 15:12:51 +02:00
Ettore Di Giacinto
ac3843e342 Add integration tests 2021-04-16 14:01:23 +02:00
Ettore Di Giacinto
612477718e Add values interpolation inheritance test 2021-04-16 14:01:23 +02:00
Ettore Di Giacinto
802b0b5201 Add integration test to build with no tree and interpolation values 2021-04-16 14:01:23 +02:00
Ettore Di Giacinto
c5587f9dfc Add simple integration test about remote-repo builds 2021-04-16 14:01:23 +02:00
Ettore Di Giacinto
c27d4d258e Allow create-repo to source trees from remote repositories
This makes possible to create the repositorie from external ones,
It also required to address #26.

Fixes #26
2021-04-16 14:01:13 +02:00
Ettore Di Giacinto
9202bcbbbe Convert raw value to templatedata before merging 2021-04-16 14:00:11 +02:00
Ettore Di Giacinto
fadd5a10a3 Load trees separately while reconstructing spectrees 2021-04-16 14:00:11 +02:00
Ettore Di Giacinto
d297b92483 Update vendor 2021-04-16 14:00:11 +02:00
Ettore Di Giacinto
7ba7add2a8 Allow to pull specfiles from published repositories
- Interpolates values from the repositories compilespec if present
- Automatically merge cache images coming from specified repository when
  necessary

Fixes #194
2021-04-16 13:58:51 +02:00
Ettore Di Giacinto
57c769b4a5 Refactor compiler and annotate buildoptions into compiler metadata
This allows to later pick up values used during build of each package
2021-04-16 13:57:54 +02:00
Ettore Di Giacinto
44cae094e8 Allow multiple build values files
Fixes #198
2021-04-16 13:56:51 +02:00
Ettore Di Giacinto
c022c75239 Write BuildTree as an additional repository file
Also split Docker repository generation a bit more for readability
2021-04-16 13:56:51 +02:00
Ettore Di Giacinto
3250d63072 Don't merge install and compilation DB. Use a temporary DB for generation 2021-04-16 13:56:51 +02:00
Ettore Di Giacinto
b8961be793 Add accessor to embed custom files to a repository while writing
Split up into methods so it's easier to add custom files
2021-04-16 13:56:45 +02:00
Ettore Di Giacinto
036b5c08c6 Split up repository generators 2021-04-08 15:24:32 +02:00
Ettore Di Giacinto
c7b79bf630 Compiler recipe now saves the entire tree 2021-04-08 15:22:00 +02:00
Ettore Di Giacinto
83cb6a2804 Check target with PathSeparator in finalizer.go 2021-04-08 10:22:24 +02:00
Ettore Di Giacinto
182afa315a Tag 0.12.0 2021-04-08 10:21:22 +02:00
Ettore Di Giacinto
ec19b34ca8 Allow to pull from multiple repositories
Adds a new cli flag to luet build `--pull-repository` which allows to
pass-by a list of docker image references which are used to pull the
cache from

Fixes #185
Fixes #184
Closes #161
2021-04-08 09:17:54 +02:00
Daniele Rondina
88307b1912 Restore parsing of gentoo string with condition (#195)
* cmd/helpers: Permit to parse gentoo package str with condition

* Update vendor pkgs-checker @v0.8.1
2021-03-30 09:17:16 +02:00
Ettore Di Giacinto
a83be204e8 Tag 0.11.8 2021-03-18 13:53:04 +01:00
Ettore Di Giacinto
b8352a81a2 Use Lchown when copying bits, lower message to warn 2021-03-18 11:23:42 +01:00
Ettore Di Giacinto
ebf907fb45 Add owner permissions tests 2021-03-18 10:57:09 +01:00
Ettore Di Giacinto
f0a34f1cf0 Enable NoLchown
This caused to drop file permissions
2021-03-18 10:48:23 +01:00
Ettore Di Giacinto
4f1e4c0b41 Switch to containerd when unpacking layers 2021-03-18 10:47:58 +01:00
Ettore Di Giacinto
c736c002af build: Set privileged to true by default 2021-03-18 10:47:39 +01:00
Ettore Di Giacinto
2c32af2951 Tag 0.11.7 2021-03-16 15:47:37 +01:00
Ettore Di Giacinto
ce349ca1b7 Se the path relative to the artifact when generating docker repositories 2021-03-16 14:46:46 +01:00
Ettore Di Giacinto
662742851a Generate backend bus events in the backends 2021-03-16 14:46:28 +01:00
Ettore Di Giacinto
f8ef1c0889 Print error when failing downloading docker image 2021-03-16 11:34:36 +01:00
Ettore Di Giacinto
23513f2c75 Include files in search only if we have the artifact 2021-03-15 11:40:34 +01:00
Ettore Di Giacinto
268239a561 Propagate verify when serializing 2021-03-15 11:37:56 +01:00
Ettore Di Giacinto
f8989e464e Update vendor 2021-03-11 17:57:59 +01:00
Ettore Di Giacinto
0028dd3a92 Support content trust images and pull with authentication
Contact the notary server if ```--verify``` is specified (or `verify:
true` is enabled on the repo config) and verify if the image is signed,
use the returned value to pull the verified image.
2021-03-11 17:57:59 +01:00
Ettore Di Giacinto
caa1cfad5c Tag 0.11.6 2021-03-09 11:37:41 +01:00
Ettore Di Giacinto
39839edda9 Add util to rootcmd 2021-03-09 11:37:22 +01:00
Ettore Di Giacinto
ecd4be4ad3 Tag 0.11.5 2021-03-09 10:55:34 +01:00
Ettore Di Giacinto
675170939d Expose DownloadAndExtractDockerImage as a util
Create a util sub cmd to add all utils that are handy for development
and already present in the luet codebase. We expose in this case `luet
util unpack` to unpack a docker image without a docker daemon running.
2021-03-09 09:22:37 +01:00
Ettore Di Giacinto
0f1acac89b Tag 0.11.4 2021-03-07 12:38:37 +01:00
Ettore Di Giacinto
42f5210764 Add DownloadOnly option
It allows to download only the packages, without
installing/upgrading/replacing them

Fixes #179
2021-03-07 11:39:59 +01:00
Ettore Di Giacinto
9eda81667b Use common ImageID() when computing final images
It also adds tests and strip invalid "+" character which is not
supported in docker image tags
2021-03-07 11:01:08 +01:00
Ettore Di Giacinto
94f692266c Tag 0.11.3 2021-03-06 20:36:37 +01:00
Ettore Di Giacinto
c13a2174c4 Try to chown after copy 2021-03-06 18:49:14 +01:00
Ettore Di Giacinto
f07cf6c245 Add tests for file search 2021-03-06 18:49:14 +01:00
Ettore Di Giacinto
515017cabd ci: login with sudo -E 2021-03-06 16:52:20 +01:00
Ettore Di Giacinto
0bfb33db92 Allow to set db/rootfs from CLI in cleanup 2021-03-06 11:28:39 +01:00
Ettore Di Giacinto
c9c24dd174 Add integration test 2021-03-06 10:41:15 +01:00
Ettore Di Giacinto
194cfda8a4 Pass by the cli args to the underlying system struct
We didn't set previously what we catched from CLI. Note, this is a
temporary solution until we refactor the cli code

Fixes #186
2021-03-06 10:21:49 +01:00
Ettore Di Giacinto
233429bbeb Allow to search by file
Also make possible to retrieve the artifact when searching for matches
between repositories list. This made possible to show the package list
when calling `luet search`.
2021-02-28 18:42:58 +01:00
Ettore Di Giacinto
d84f6b31fd Add database get command 2021-02-28 18:42:16 +01:00
Ettore Di Giacinto
98b01ce00b Tag 0.11.2 2021-02-22 14:46:36 +01:00
Ettore Di Giacinto
749a4cb615 Add --backend-args
Allow to add arguments to the backend build arguments

Fixes #146
2021-02-22 13:49:29 +01:00
Ettore Di Giacinto
57e19c61e7 Start spinner before pulling docker artifacts 2021-02-22 11:54:09 +01:00
Ettore Di Giacinto
5ee1e28b9c Display informative message when pulling docker artifacts 2021-02-22 11:50:52 +01:00
Ettore Di Giacinto
21bd76af9c Uncomplicate runCommand and return command output
Fixes #189
2021-02-22 11:44:46 +01:00
Ettore Di Giacinto
89bd7c2281 Tag 0.11.1 2021-02-17 13:58:21 +01:00
Ettore Di Giacinto
49d7efa9ea Update vendor 2021-02-17 13:01:36 +01:00
Ettore Di Giacinto
b3e3abec8f Fixup spinner data race
Add spinner lock
2021-02-17 13:01:32 +01:00
Ettore Di Giacinto
92e73051a0 Initialize cached in singleton 2021-02-17 09:34:54 +01:00
Ettore Di Giacinto
fd7405c2cc Add inmemory DB integration test 2021-02-15 18:53:56 +01:00
Ettore Di Giacinto
2448f3175e Initialize cache db if empty 2021-02-15 18:53:37 +01:00
Ettore Di Giacinto
101df40eec Merge pull request #183 from mudler/docker-debug-output
Add realtime output for building phase
2021-02-13 12:02:20 +01:00
Daniele Rondina
c22adb3a47 compiler: Move spinner at the low level 2021-02-13 09:28:54 +01:00
Ettore Di Giacinto
b93357e36c Tag 0.11.0 2021-02-10 09:07:02 +01:00
Ettore Di Giacinto
518fb16067 Add IsVirtual() to compile spec 2021-02-09 19:05:16 +01:00
Ettore Di Giacinto
4d9297e3da Be sure to copy exact folder structure when generating final images
'COPY *' has a different behavior than 'COPY .' - when regexes are
involved, COPY behave differently, by unpacking directory content to the
root.

Enhance unit test to cover the scenario as well
2021-02-09 18:27:53 +01:00
Ettore Di Giacinto
544895e051 Handle empty packages when pushing final images
We used to create dockerfiles blindly assuming there is content, but
that's not the case for virtual packages.

Due to https://github.com/moby/moby/issues/38039 we are forced for a
"unpleasant" workaround, as we can't create empty FROM scratch images
and export them.
2021-02-09 18:27:53 +01:00
Ettore Di Giacinto
fd80bb526e Add DirectoryIsEmpty 2021-02-09 16:51:10 +01:00
Daniele Rondina
c1fe3278fa backend: Add realtime output on building phase
The realtime output could be configured through
LUET_GENERAL__SHOW_BUILD_OUTPUT environment
variable or related config option or through
`--live-output` option.
2021-02-02 12:58:34 +01:00
Daniele Rondina
2854c68209 logger: Add ln option for writer log 2021-02-01 19:10:05 +01:00
Ettore Di Giacinto
505f07f056 Add asciinema to README 2021-02-01 12:25:31 +01:00
Ettore Di Giacinto
8bce3f1f00 Merge pull request #181 from mudler/trim_domain_name_from_cached_image_reference
Trim the Domain Name from cached image references
2021-01-29 16:25:50 +01:00
David Cassany
18e9ce4557 Trim the Domain Name from cached image references
This commit removes the Domain Name, if any, from the cached image
reference before computing the image fingerprint. This way the same
image, if stored in some oter mirror, is still seen as the same one.

Fixes #158
2021-01-29 15:11:52 +01:00
Daniele Rondina
9f73a334b3 cmd/tree/validate: Avoid nil pointer if solution doesn't contain the dependency 2021-01-25 19:13:33 +01:00
Ettore Di Giacinto
4eab1eb738 Tag bugfix release 0.10.2 2021-01-25 14:36:01 +01:00
Ettore Di Giacinto
685bbf46a6 Refactor common code while compiling regexes 2021-01-25 12:20:04 +01:00
Ettore Di Giacinto
d89225f37d Handle namedpipe copy
CopyFile relies on copy.Copy from https://github.com/otiai10/copy which
doesn't handle named pipes copy. Handle it here until
https://github.com/otiai10/copy/issues/47 is fixed.

This causes luet to hang while copying packages that have named pipes in
it.

Also invert compression argument for gzip, it causes slowliness.
2021-01-25 12:20:04 +01:00
Ettore Di Giacinto
55d34a3b40 Adapt artifact tests after delta changes 2021-01-24 20:07:33 +01:00
Ettore Di Giacinto
85b5c96bdd Promote to info building image messages
The user wants to know whats going on in this case. Image builds can
take up also long time
2021-01-24 19:09:09 +01:00
Ettore Di Giacinto
6f5f400765 Don't bail out if image doesn't exist locally
The backend will figure out if we have the image or not, otherwise will
atempt to pull if not there.

Skip retrieve integration test with img as its not supported.
2021-01-24 19:05:21 +01:00
Ettore Di Giacinto
be87861657 img: pull image if not locally present while extracting 2021-01-24 13:17:11 +01:00
Ettore Di Giacinto
76e5d37895 ci: temporary disable final image tests with img 2021-01-24 13:06:23 +01:00
Ettore Di Giacinto
8aca246f51 ci: login to quay for integration tests 2021-01-24 12:57:15 +01:00
Ettore Di Giacinto
be7b56bae3 Split ImageDefinitionToTar test
ImageDefinitionToTar it is not actually used by compiler code, but can
be handy from an API perspective, so we keep it.
2021-01-24 12:56:25 +01:00
Ettore Di Giacinto
eae2382764 unpackDelta needs a rootfs where to extract files from 2021-01-24 12:41:56 +01:00
Ettore Di Giacinto
76076c8f51 Run integration tests on img as well 2021-01-24 12:34:44 +01:00
Ettore Di Giacinto
7d11df3225 Simplify delta generation, and avoid two-pass with img backend
This changeset also drops --keep-exported-images, which is quite unused
and can be replaced with a plugin, or either by manually exporting the
resulting images.
2021-01-24 12:27:07 +01:00
Ettore Di Giacinto
0ae8cbb877 Tag 0.10.1 2021-01-23 22:02:18 +01:00
Ettore Di Giacinto
b9f0ef1c55 Implement ImageExists in the img backend 2021-01-23 22:01:29 +01:00
Ettore Di Giacinto
8b4b249211 Tag 0.10.0 2021-01-22 21:21:59 +01:00
Niklas Engvall
23bc42bb15 Latest version, sudo check, autoinstall repos
Grab latest version, if not get a default since luet auto updates.
Check is ran as normal user or sudo/root.
Autoinstall repos -y.
2021-01-22 21:19:02 +01:00
Ettore Di Giacinto
715ee1db08 Add unit test for creating docker repositories 2021-01-22 19:59:26 +01:00
Ettore Di Giacinto
d5f70aea26 Add test for Artifact GetUncompressedName() 2021-01-22 19:15:54 +01:00
Ettore Di Giacinto
3bbc2c4691 Add multiarch-build-small target to shrink release builds 2021-01-22 19:07:17 +01:00
Ettore Di Giacinto
c7e5c9b1fd Enhance create-repo CLI args help 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
7c507fe272 Set GHA workflow to access to testing docker image 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
a037bc545b Add comment on required permission to unpack 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
d2e6409451 Add docker client unit test 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
a2df02e1bf Add comments on repository files #159 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
f0b8e4556e Make warning messages less prominent 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
161b5f40f7 Add file/line/function to debug messages 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
485b8d8c89 Add integration test for docker repository types 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
cad1deb2c6 Allow to run single integration tests 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
928c305ff1 Respect metadata fields about the tree filename
This caused to always ignore what was explictly asked during repo
creation
2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
a192d48610 Reword CLI help for --force-push 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
a6e7a3059c Respect artifact extension when populating cache
This caused cache to not hit correctly
2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
9a2ff0a3e2 Don't cleanup images from system during integration tests
Should fix #167
2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
9b2b877a53 Avoid to build images if already present 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
75fad993f3 Fixup repository revision bump 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
4b4c3a2e14 Adapt tests to new constructor changes 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
c24a3a35f1 Update gomod and vendor 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
163f93067c Temporarly rework genuinetools/img code for pull/unpack without Docker 2021-01-22 16:55:51 +01:00
Ettore Di Giacinto
91ea2ed99f Make docker image repositories actually working
Several changes are included:
- Expose ensureDir in helpers, and call it in the Docker client. In
  other implementations that was handled by CopyFile behind the scenes,
  but that's not the case here
- Create accessor in Artifact to create Artifact objects from files.
  This is handy when we have to carry over downloaded package content
  into caches when artifacts are already verified
- Fix various issues around the imagePush flag, so now trees are pushed
  forcefully each time
- Take into consideration the real artifact name when pushing single
  files in the docker image. This behavior should be changed eventually,
  because single files which aren't repository packages now are in its
  own docker image, but we should have just one that brings the required
  metadata alltogether.
2021-01-22 16:54:19 +01:00
Ettore Di Giacinto
b27b146b45 Refactor artifact Verify() 2021-01-22 16:54:19 +01:00
Ettore Di Giacinto
7b25a54653 Update gomod and vendor 2021-01-22 16:54:19 +01:00
Ettore Di Giacinto
dbd37afced Add docker client #169 2021-01-22 16:54:19 +01:00
Ettore Di Giacinto
2f459c0469 Use only one docker image reference to push repository.
Instead of generating different images, which are harder to track and
clean, we generate a single image with various tags, corresponding to
the packages available in the repositories.

Tagging, and pushing separate images will be possible with the plugin
mechanism
2021-01-22 16:54:19 +01:00
Ettore Di Giacinto
ad455edafc Allow to push images in create-repo
Add also the --force flag to allow image overwrite

Related to #169
2021-01-22 16:54:19 +01:00
Ettore Di Giacinto
d9286a1a1e Download repository metadata with client DownloadFile, uniform downloads for Docker repositories 2021-01-22 16:54:19 +01:00
Ettore Di Giacinto
322fe72ef2 Generate repository metadata and packages for docker repository type
Drop image-repository on create-repo. In case of a docker repository, --output is the image reference to use.
Also restore default output build dir.

See also: #169
2021-01-22 16:53:52 +01:00
Ettore Di Giacinto
88b5576611 Expect full image name to GenerateFinalImage
We will re-use this method also when generating repository metadata
2021-01-18 12:26:22 +01:00
Ettore Di Giacinto
a1f4c28973 Add GenerateFinalImage to package artifacts
GenerateFinalImage generates a docker image from scratch with the
artifact content.

Related to #169
2021-01-18 12:08:47 +01:00
Ettore Di Giacinto
43b0b11028 Define a build context for backends 2021-01-18 11:06:54 +01:00
Ettore Di Giacinto
429e9757db Select cwd as default tree path for commands 2021-01-18 10:40:41 +01:00
Ettore Di Giacinto
3d086c9b17 Merge pull request #172 from trappiz/patch-2
Update version, utilize curl fully instead of mixing.
2021-01-16 20:26:08 +01:00
Niklas Engvall
bb610dff49 Update version, utilize curl fully instead of mixing. 2021-01-16 19:55:46 +01:00
Ettore Di Giacinto
21d8ce6050 Add link to gophersat 2021-01-12 15:54:51 +01:00
Ettore Di Giacinto
f8c64c38d3 Link to building strategies in README 2021-01-12 15:54:12 +01:00
Ettore Di Giacinto
e219caf720 Fixup broken link in README 2021-01-12 15:53:08 +01:00
Ettore Di Giacinto
ecae2873d6 zstd extension suffix is zst, not zstd
Fixes #163
2021-01-12 15:52:34 +01:00
Ettore Di Giacinto
48f17dbc7a Tag 0.9.26 2021-01-12 11:25:05 +01:00
Ettore Di Giacinto
bd3e483f0f Be sure to cache packages with same fingerprint only once
this makes sure that we are fast to process also invalid trees
2021-01-12 10:30:31 +01:00
Ettore Di Giacinto
40fc948c6e Stabilize tests after changes
With BuildWorld() we get more results back (now we return the whole
model, including the false assertions).

Besides, now solving with BuildWorld() detects an invalid case:
when we supply a provided, the definitionDB shouldn't explictly supply
also the package that has to be provided. This would cause to 'shadow'
packages between repositories.

The test was invalid before, and shouldn't have contained A1. Moved the
test to Pending to inspect it further in subsequent dev iterations
2021-01-11 23:35:18 +01:00
Ettore Di Giacinto
186fa58ab0 Use BuildWorld() instead of BuildPartialWorld() in solver.Solve
We now have a stronger cache system while we pre-compute also RevDeps in
a hashmap, this makes now makes BuildWorld() much more performant.
2021-01-11 20:11:51 +01:00
Ettore Di Giacinto
7e04ad67f6 Merge branch 'master' into develop 2021-01-08 17:43:25 +01:00
Ettore Di Giacinto
6e7ec890ca Update issue templates 2021-01-08 17:42:35 +01:00
Ettore Di Giacinto
7390623e40 Don't warn user of accepting license in case of uninstall
Closes #160
2021-01-07 10:36:57 +01:00
Ettore Di Giacinto
bd0d2765aa Mark executed finalizers at beginning
don't retry failing finalizers, but mark as executed right away
2021-01-04 17:04:20 +01:00
Ettore Di Giacinto
ddd61f769c Tag 0.9.25 2021-01-04 00:19:14 +01:00
Ettore Di Giacinto
1dd91b06bd Cleanup docker images on test teardown 2021-01-03 23:56:59 +01:00
Ettore Di Giacinto
11df314a26 Avoid clashing fixture version 2021-01-03 23:56:49 +01:00
Ettore Di Giacinto
2cbf547873 Add new fixtures 2021-01-03 23:37:34 +01:00
Ettore Di Giacinto
43f5b69c18 Let the build fail when depending on virtuals
This is currently not a valid use case. Virtuals are empty packages and
if the `build.yaml` is completely empty, nothing could depend on them.

Let's try to not be too smart and build the package image if a source
image is supplied, and fail hardly when we depend on a virtual in build
time.
2021-01-03 23:03:01 +01:00
Ettore Di Giacinto
1fdef757b6 Adapt other bunch of fixtures to changes 2021-01-03 22:22:32 +01:00
Ettore Di Giacinto
6c27af18c8 Adapt fixture to changes 2021-01-03 21:40:35 +01:00
Ettore Di Giacinto
f57f0f9588 Adapt complex selection fixtures to new changes
We don't generate anymore images if packages are empty - those are now
virtuals which just generates empty artifacts.

Virtuals are not meant to be required by other packages in build time,
because it would violate the virtual packages purpose (they are just
useful for runtime).

This test was used to verify version selection of the best match during build
time, not to actually test any build process. Inject steps so images are
actually generated, and they can depend on each others.
2021-01-03 20:50:32 +01:00
Ettore Di Giacinto
457acd0d8a Add virtual packages support 2021-01-03 20:08:04 +01:00
Ettore Di Giacinto
f2ba9e02d7 Tag 0.9.24 2021-01-02 22:53:08 +01:00
Ettore Di Giacinto
a81d0bc3a3 Build assertions when swapping
When we are swapping packages, we do not run the solver to gather things
to install, but we trust the given list when calling computeInstall. In this case, the assertion
returned by computeInstall is empty, as we force l.Options.NoDeps.

This change generates the assertion list while calling computeSwap so
it's available later when we call ExecuteFinalizer.
2021-01-02 21:28:54 +01:00
Ettore Di Giacinto
45e8553d26 Tag 0.9.23 2020-12-30 02:51:19 +01:00
Ettore Di Giacinto
bb48326039 Adapt solver tests after changes 2020-12-30 02:05:55 +01:00
Ettore Di Giacinto
dce8b52293 Use Conflicts() which already lists revdeps on failure 2020-12-30 01:17:31 +01:00
Ettore Di Giacinto
0652fce55e Update revdeps table while populating Cache
When we cycle, we don't necessarly have all the packages into the DB
yet.

With this change, luet annotates the reverse dependency without any version, and we try to
update revdeps table when new items gets added, by checking the version
required in the selector.

Thanks to @joostruis for noticing the issue
2020-12-30 01:12:35 +01:00
Ettore Di Giacinto
38c9540a1d Use DB copy in GetRevdeps in BoltDB 2020-12-30 01:12:09 +01:00
Ettore Di Giacinto
90278a034b Use ConflictsWith to check conflicts when uninstalling packages 2020-12-29 23:43:39 +01:00
Ettore Di Giacinto
55ab1894e9 Add unit test for Uninstall in Installer 2020-12-29 22:58:03 +01:00
Ettore Di Giacinto
ddebe66859 Merge pull request #157 from trappiz/patch-1
Set import name for zstd
2020-12-29 22:25:14 +01:00
Niklas Engvall
bfbcb81210 Set import name for zstd 2020-12-29 22:22:53 +01:00
Ettore Di Giacinto
062e75bc25 Add unit test for Uninstall without full 2020-12-29 22:13:26 +01:00
Ettore Di Giacinto
498edc95c8 Tag 0.9.22 2020-12-27 21:17:22 +01:00
Ettore Di Giacinto
b81ce66914 Reduce download verbosity 2020-12-27 20:21:05 +01:00
Ettore Di Giacinto
68030baf98 Revert "Dockerfile: Initialize /tmp dir"
There is no mkdir, nor sh in a image from scratch

This reverts commit 981fe5b04a.
2020-12-26 18:29:48 +01:00
Ettore Di Giacinto
9e868b69fc Update vendor 2020-12-25 10:35:18 +01:00
Ettore Di Giacinto
f871111e50 Collect errors from finalizer runs
Instead of failing and depend on the --force flag, always execute
finalizer and collect errors to determine if install was successfull or
not
2020-12-25 10:35:09 +01:00
Daniele Rondina
981fe5b04a Dockerfile: Initialize /tmp dir 2020-12-21 18:09:22 +01:00
Ettore Di Giacinto
8371d7aa7b Tag 0.9.21 2020-12-19 18:23:08 +01:00
Ettore Di Giacinto
736c9470cf Add db copy and clone 2020-12-19 17:45:50 +01:00
Ettore Di Giacinto
e52bc4f2b2 Refactor: get systemdb from config, which knows which one to load 2020-12-19 17:23:59 +01:00
Ettore Di Giacinto
96e877fc0b Allow uninstall to take multiple packages
And treat those as a list, instead of each single of them
2020-12-19 17:16:58 +01:00
Ettore Di Giacinto
1331c3551a Renaming clashing test func 2020-12-19 16:23:30 +01:00
Ettore Di Giacinto
bfb2bdc230 Add test for replace --nodeps 2020-12-19 15:44:42 +01:00
Ettore Di Giacinto
525bfb5ebf Respect --nodeps when calling Swap from the public interface 2020-12-19 15:26:18 +01:00
Ettore Di Giacinto
f4e2f32aff Return candidate not found when appropriate 2020-12-19 14:57:42 +01:00
Ettore Di Giacinto
7cf650a8f6 Break Swap in computeSwap() and display uninstall dialog only when asked 2020-12-19 14:55:59 +01:00
Ettore Di Giacinto
2b6fe2baa1 Add luet build --wait
It allows to wait for intermediate images to be available instead of
building all of them
2020-12-18 23:19:18 +01:00
Ettore Di Giacinto
34bba0319b Tag 0.9.20 2020-12-18 21:50:10 +01:00
Ettore Di Giacinto
71c06913aa Update vendor 2020-12-18 21:24:12 +01:00
Ettore Di Giacinto
f0fae82ad9 Add experimental zstd support
Closes #97
2020-12-18 21:24:01 +01:00
Ettore Di Giacinto
93f5f5d0b2 Add integration tests for luet replace and luet build --only-target-package 2020-12-18 20:52:07 +01:00
Ettore Di Giacinto
1c9b821058 Drop unneeded if 2020-12-18 00:50:20 +01:00
Ettore Di Giacinto
0e21548bc0 Lookup uninstall and Install in installer.Swap
In this way we resolve selectors from user inputs
2020-12-18 00:49:51 +01:00
Ettore Di Giacinto
39c8895f80 Return provided if selector isn't a range in boltdb 2020-12-17 23:17:40 +01:00
Ettore Di Giacinto
720441be4c Update repository-index url 2020-12-17 21:55:01 +01:00
Ettore Di Giacinto
5082749e90 Tag 0.9.19 2020-12-17 21:09:53 +01:00
Ettore Di Giacinto
ce169f49af If provided isn't a selector, it means we don't have to return a range
Add also more tests about provides
2020-12-16 22:17:34 +01:00
Ettore Di Giacinto
921869d04c Tag bugfix release 0.9.18 2020-12-15 17:45:33 +01:00
Ettore Di Giacinto
8e1a457bf1 Check if we have to pull images before generating delta
As we might skip building entirely, it's possible that the image is not
there yet, so we check if have to pull it or not
2020-12-15 17:01:56 +01:00
Ettore Di Giacinto
d5cfadfded Tag 0.9.17 2020-12-14 20:19:17 +01:00
Ettore Di Giacinto
193f6872a0 Update vendor 2020-12-14 19:20:35 +01:00
Ettore Di Giacinto
0b9b3c0488 Drop --clean from integration tests 2020-12-14 18:58:45 +01:00
Ettore Di Giacinto
70f05f41e8 Check only if package image exists
We don't need to look after the builder image as its optional. In this
way we can also reduce the compiler options, as we don't require a
--clean flag anymore. --only-target-package is sufficient to determine
what we can skip and how.
2020-12-14 18:41:39 +01:00
Ettore Di Giacinto
ef034d87b0 Detect if images are available if we don't have to generate a Package
While building, if we aren't doing a clean build, we scan now to see if
images are available and we skip, in case we don't find a metadata
already.
2020-12-14 18:32:32 +01:00
Ettore Di Giacinto
6a86bf3f04 Tag 0.9.16 2020-12-12 16:05:18 +01:00
Ettore Di Giacinto
265e2371b4 Add ArtifactNode to test, now we get the gen Dockerfile in the diff 2020-12-12 16:04:54 +01:00
Daniele Rondina
78442c91fc events: Review description of build_artifact events 2020-12-12 16:03:40 +01:00
Ettore Di Giacinto
d97e606a31 Adapt fixtures and tests 2020-12-12 12:10:24 +01:00
Ettore Di Giacinto
95da20e366 Context files are immutable 2020-12-12 11:55:25 +01:00
Ettore Di Giacinto
797a34ba49 Reuse same dockerfile gen logic between prelude and steps
As now we build only when necessary, we need to make sure the images are
built similarly. The discrepancies between the two are less now, and
they can share the same logic.

This fixes a regresion where when no prelude is defined, the build
context isn't copied over
2020-12-12 11:16:34 +01:00
Ettore Di Giacinto
fdba8dea71 Tag 0.9.15 2020-12-11 23:04:14 +01:00
Ettore Di Giacinto
a1453b7242 Fixup error messages 2020-12-11 23:03:56 +01:00
Ettore Di Giacinto
7d4b612173 Update README 2020-12-11 23:03:45 +01:00
Ettore Di Giacinto
9eef7e5c6d Clean up if condition 2020-12-09 22:58:33 +01:00
Ettore Di Giacinto
332824fd42 Fail in the downloader goroutine and don't skip errors with force 2020-12-09 22:56:55 +01:00
Ettore Di Giacinto
737fbdbdc1 Don't make artifact checksum skippable 2020-12-09 21:31:07 +01:00
Ettore Di Giacinto
f109bab2b4 Enable PreserveSystemEssentialData on Upgrade/Uninstall 2020-12-09 21:07:50 +01:00
Ettore Di Giacinto
d472dee19b Tag 0.9.14 2020-12-09 19:33:11 +01:00
Ettore Di Giacinto
b5990b5333 Generate changes from CompilerBackendOptions and pass by image name so img can unpack images 2020-12-09 00:27:37 +01:00
Ettore Di Giacinto
767488327b Update README 2020-12-08 17:48:21 +01:00
Ettore Di Giacinto
be3933998b Update links in README 2020-12-08 17:47:33 +01:00
Ettore Di Giacinto
fa9dc9da53 Tag 0.9.13 2020-12-08 15:49:40 +01:00
Ettore Di Giacinto
9911888d18 Stabilize test 2020-12-08 14:56:51 +01:00
Ettore Di Giacinto
2906180c43 Upgrade universe test expectations are changed 2020-12-08 14:15:52 +01:00
Ettore Di Giacinto
cf5e4e1305 Detect removed also when availables aren't found 2020-12-08 12:28:20 +01:00
Ettore Di Giacinto
519586f6bc Search for removed in Def DB 2020-12-08 12:07:28 +01:00
Ettore Di Giacinto
6dbc422b8f Apply solver change to UpgradeUniverse also to the parallel variant and adapt tests
Similarly, we want just to consider what is being uninstalled and the
new rules of the package that is going to be upgraded
2020-12-08 11:43:38 +01:00
Ettore Di Giacinto
a3cfebf438 Create BuildFormula from installed with InstallDatabase
Instead of using the DefinitionDB which supposedly contains only the
relations present in the online repositories. In this way the solver its
more consistent and tries to solve with only the internal definitions.

This also fixes quirks with luet upgrade --universe
2020-12-08 10:58:08 +01:00
Ettore Di Giacinto
24201b25ef Apply solver change also to the parallel variant 2020-12-08 10:41:03 +01:00
Ettore Di Giacinto
7c53296530 Adapt tests 2020-12-08 10:39:15 +01:00
Ettore Di Giacinto
a3cb0ed17f When attempting to uninstall, do it from the internal db so it can resolve the current versions 2020-12-08 02:04:54 +01:00
Ettore Di Giacinto
9ca5d24856 Tag 0.9.12 2020-12-07 20:18:49 +01:00
Ettore Di Giacinto
9a34296be0 Build step is always required for tagging images 2020-12-07 19:39:56 +01:00
Ettore Di Giacinto
ebd18ae22c Set builderTagged image afterwards 2020-12-07 18:58:14 +01:00
Ettore Di Giacinto
7f10a19be5 Don't hide build output 2020-12-07 18:56:39 +01:00
Ettore Di Giacinto
6bf7368993 Don't replace buildertaggedImage if there aren't build steps 2020-12-07 18:39:15 +01:00
Ettore Di Giacinto
338f310d67 Tag and push an image when virtual is supplied, to have a track of it in the image graph tree 2020-12-07 17:59:30 +01:00
Ettore Di Giacinto
3fd1bdbfc8 ADD automatically extracts as well 2020-12-07 17:21:06 +01:00
Ettore Di Giacinto
59d78c3f5c While upgrading always use nodeps while computing uninstall 2020-12-07 17:20:55 +01:00
Ettore Di Giacinto
86c256a062 Generate empty tar 2020-12-07 17:20:32 +01:00
Ettore Di Giacinto
876e3659fb Turn full off by default on upgrade 2020-12-07 00:48:28 +01:00
Ettore Di Giacinto
3c0dd2b71d Adapt test 2020-12-07 00:07:57 +01:00
Ettore Di Giacinto
e9b4d66a3e Retrieve should be rendered also for step images 2020-12-07 00:00:32 +01:00
Ettore Di Giacinto
5047316b70 Try to build only when strictly necessary 2020-12-06 23:50:51 +01:00
Ettore Di Giacinto
02edc10c58 Tag 0.9.11 2020-12-06 22:52:15 +01:00
Ettore Di Giacinto
d479ada402 Don't consider deps while uninstalling during package Swap
Beside being forced, it also doesn't need to look deep into the deps, as
we already have precalculated those
2020-12-06 22:48:48 +01:00
Ettore Di Giacinto
7b800c9a20 Pre-compute swap step
Otherwise, while upgrading, it could happen that package dependencies
aren't downloaded before, and they would just be installed in the middle
of installation, after removal already happened.
2020-12-06 22:11:17 +01:00
Ettore Di Giacinto
18e6e085d5 Sort correctly also subfolders 2020-12-05 23:17:05 +01:00
Ettore Di Giacinto
6d19f8d2cc Tag 0.9.10 2020-12-03 21:02:57 +01:00
Ettore Di Giacinto
67c43eb936 Don't bail out if package is installed and we have a list 2020-12-03 20:03:37 +01:00
Ettore Di Giacinto
cf80e5fc09 Resolvers might omit packages 2020-12-03 18:53:57 +01:00
Ettore Di Giacinto
d668d8344b Accept selectors on uninstall and fixup failure logic 2020-12-03 18:32:24 +01:00
Ettore Di Giacinto
b17ac447f1 Display matched packages only, and check if they are available 2020-12-03 17:25:29 +01:00
Ettore Di Giacinto
c8bcd88f1f Add command usage in CLI
Add Long description for missing commands along with practical examples
2020-12-02 23:15:23 +01:00
Ettore Di Giacinto
034fb54c25 Update README 2020-12-02 21:18:21 +01:00
Ettore Di Giacinto
6dbf19f085 Use single image to build packages 2020-12-02 21:18:12 +01:00
Ettore Di Giacinto
43db64c089 Tag 0.9.9 2020-12-02 19:12:43 +01:00
Ettore Di Giacinto
9423b7c1e3 Add image build events, and add luet replace
Enhance also some commands descriptions
2020-12-02 18:24:35 +01:00
Ettore Di Giacinto
75dbc2dcb4 Adapt integration tests 2020-11-29 13:56:58 +01:00
Ettore Di Giacinto
f3e2e0a184 Add CLI tests 2020-11-29 11:51:27 +01:00
Ettore Di Giacinto
8237506bd3 Accept specific versions in cli input and avoid gentoo parser by default
This is a breaking change as changes the way packages can be given as
arguments to luet.

From this change, the following applies:

- If a package string contains @, the right part is parsed as version
  (e.g. foo/bar@1.1)
- If a package contains "/" and no "@", cat/name is applied (e.g.
  foo/bar)
- If a package doesn't contain either, is implied its just a name
  without category
- If a package contains "=" at the beginning, the gentoo parsing default
  is being used ( e.g. =foo/bar-1.1 )

Fixes #154
2020-11-29 11:48:49 +01:00
Ettore Di Giacinto
9784d6192a Don't hide error on pulling image 2020-11-28 18:03:43 +01:00
Ettore Di Giacinto
87004c8e78 Tag 0.9.8 2020-11-28 16:29:38 +01:00
Ettore Di Giacinto
0fe30ddcfd Add ability to interpolate during build
Now build takes a --values argument, which is a yaml file that can be
used to interpolate the specs that are going to be compiled.
2020-11-28 15:47:29 +01:00
Ettore Di Giacinto
44d33eceba Set workdir also on step image
Otherwise with DOCKER_SQUASH=true it wouldn't be coherent on where to
find the package files
2020-11-28 12:07:07 +01:00
Ettore Di Giacinto
ca994b07ab Tag 0.9.7 2020-11-28 00:34:46 +01:00
Ettore Di Giacinto
8ce135fe12 Add DOCKER_SQUASH 2020-11-27 23:38:31 +01:00
Ettore Di Giacinto
18d9366bca Minor fixes 2020-11-24 18:27:49 +01:00
Ettore Di Giacinto
c0206e5849 Tag 0.9.6 2020-11-23 20:18:42 +01:00
Ettore Di Giacinto
9fab46aa9e Add also description 2020-11-23 19:15:54 +01:00
Ettore Di Giacinto
5b54aeb822 Update vendor 2020-11-23 19:14:07 +01:00
Ettore Di Giacinto
7a10ff2742 Enhance search output with tables and alias to '.' when no args are specified 2020-11-23 19:13:54 +01:00
Ettore Di Giacinto
db1b190fb5 Minor fixup and cleanups around the new prompt feature 2020-11-23 18:20:30 +01:00
Ettore Di Giacinto
b349665ff2 Add user prompts
Fixes #106
2020-11-22 23:43:29 +01:00
Ettore Di Giacinto
3959cfd623 Tag 0.9.5 2020-11-20 19:02:54 +01:00
Ettore Di Giacinto
53ab0e0dd2 Merge pull request #151 from mudler/download-progress-bar
Download progress bar
2020-11-20 19:00:25 +01:00
Daniele Rondina
651ea17548 Update vendor/ (progress bar deps) 2020-11-20 18:16:49 +01:00
Daniele Rondina
60d5c9dfd5 Add download progress bar 2020-11-20 18:12:23 +01:00
Ettore Di Giacinto
1f807f369a Move revdeps computation to db 2020-11-20 17:23:21 +01:00
Ettore Di Giacinto
4e1b006a08 Cleanup vendor 2020-11-19 18:53:08 +01:00
Ettore Di Giacinto
47f0049efa Tag 0.9.4 2020-11-19 18:52:22 +01:00
Ettore Di Giacinto
0cc2b72831 Drop converter code, will be in a separate extension 2020-11-19 18:10:16 +01:00
Ettore Di Giacinto
f2df3faee5 Now Uninstall takes multiple packages 2020-11-19 18:05:27 +01:00
Daniele Rondina
287098f101 Update vendor github.com/cavaliercoder/grab 2020-11-19 00:56:59 +01:00
Daniele Rondina
f9a7113ab9 client/http: Add experimental download info 2020-11-19 00:56:28 +01:00
Ettore Di Giacinto
c3559d952c Tag 0.9.3 2020-11-15 13:38:30 +01:00
Ettore Di Giacinto
fc863fc8e5 Add collections integration test 2020-11-15 13:22:21 +01:00
Ettore Di Giacinto
ac149e9336 Use candidate for search, as doesn't have a selector 2020-11-15 11:47:32 +01:00
Ettore Di Giacinto
b9c8e50e42 Allow to define multiple templated packages with collections
Collections, similarly to packages, have a `build.yaml` and
a `finalize.yaml` that are templated for each package.
They have a `collection.yaml` containing a list of
packages that are part of the tree.
2020-11-15 00:13:46 +01:00
Ettore Di Giacinto
cf7df00a65 Add luet tree images command to show images tree 2020-11-14 14:51:11 +01:00
Daniele Rondina
83f924da35 spectools: Add DefaultPackageSanitized.Clone() 2020-11-14 12:42:49 +01:00
Ettore Di Giacinto
c82d23f9f2 Update go-pluggable 2020-11-13 19:50:10 +01:00
Ettore Di Giacinto
0e46e763d5 Move bus implementation to a separate repo, hook to events in luet 2020-11-13 18:25:44 +01:00
Ettore Di Giacinto
a793b44e83 Wip 2020-11-12 23:21:10 +01:00
Ettore Di Giacinto
19e6054574 Tag 0.9.2 2020-11-10 20:20:27 +01:00
Ettore Di Giacinto
a8624fe451 Move image removal in compileWithImage and further cleanup 2020-11-10 18:48:39 +01:00
Ettore Di Giacinto
14c1d6ef24 Refactor and optimize build process 2020-11-10 18:14:18 +01:00
Ettore Di Giacinto
36c58307e2 Don't export unless needed 2020-11-10 16:57:24 +01:00
Ettore Di Giacinto
665261e526 Tag 0.9.1 2020-11-09 19:42:34 +01:00
Ettore Di Giacinto
794c5984a2 Add pack command 2020-11-09 18:16:22 +01:00
Ettore Di Giacinto
a765147c1d Add templated finalizers 2020-11-08 21:14:19 +01:00
Ettore Di Giacinto
088adf6f3a Tag 0.9 2020-11-08 18:25:59 +01:00
Ettore Di Giacinto
cead09fb9f Merge pull request #148 from mudler/respect_rootfs4conf
Respect rootfs path for configs and url
2020-11-08 18:25:29 +01:00
Daniele Rondina
9a1787ddaf client/local: Handle config_from_host on DownloadFile 2020-11-08 17:06:05 +01:00
Ettore Di Giacinto
b1316b50b4 Add excludes tests 2020-11-08 16:02:11 +01:00
Ettore Di Giacinto
d92ee9e1d9 Add preliminar support for excludes 2020-11-08 15:35:24 +01:00
Ettore Di Giacinto
e7b58eec41 Use sane default for installer script 2020-11-08 14:33:34 +01:00
Ettore Di Giacinto
6a1b64acea Order files before uninstall
Fixes #149
2020-11-08 12:36:41 +01:00
Ettore Di Giacinto
df14fe60fc Tag 0.8.15 2020-11-08 11:07:33 +01:00
Ettore Di Giacinto
459eb01a59 Don't write err to stdout if not present 2020-11-08 10:02:00 +01:00
Daniele Rondina
e6c597c7d3 test-integration/12_config_protect.sh: Use repo url related with rootfs path 2020-11-08 00:05:06 +01:00
Daniele Rondina
e70cdbaaf7 Respect rootfs on repositories urls 2020-11-08 00:00:15 +01:00
Daniele Rondina
eea9dad2c6 tests/integration: Add option config_from_host 2020-11-07 19:14:44 +01:00
Daniele Rondina
513f441bb3 Add option config_from_host 2020-11-07 18:56:25 +01:00
Daniele Rondina
ebe7466fdc Respect rootfs path for load config 2020-11-07 18:28:23 +01:00
Ettore Di Giacinto
76328176c1 Tag 0.8.14 2020-11-07 12:29:07 +01:00
Ettore Di Giacinto
46ed6423ad Merge pull request #147 from mudler/fix-protect-uninstall
Fix protect uninstall
2020-11-07 12:28:24 +01:00
Daniele Rondina
d5df40512b installer: Improve message for protected files 2020-11-07 12:27:18 +01:00
Daniele Rondina
d219a2e0fb Run travis task with/without buildkit 2020-11-07 11:41:44 +01:00
Daniele Rondina
4048138dcb Add test suite for ConfigProtect 2020-11-07 11:39:31 +01:00
Daniele Rondina
e5f44eee09 ConfigProtect: support annotation without initial / 2020-11-07 11:39:13 +01:00
Daniele Rondina
6819a28f07 Add support to DOCKER_BUILDKIT on test 2020-11-07 11:37:58 +01:00
Daniele Rondina
24eb6eaef5 Fix test with docker buildkit 2020-11-07 11:37:58 +01:00
Daniele Rondina
58c4866289 .travis.yml: Enable Docker buildkit 2020-11-07 11:37:58 +01:00
Daniele Rondina
c72565e019 Integrate tests for config protects with uninstall 2020-11-06 23:30:37 +01:00
Daniele Rondina
0f59c207b0 Load config protect files on upgrade/uninstall 2020-11-06 23:30:08 +01:00
Daniele Rondina
68bc8d4d27 ConfigProtect: Permit to obtain the list of files without initial / 2020-11-06 23:29:08 +01:00
Daniele Rondina
b24d335538 GetProtectFiles() is used also for tree tarball without specs 2020-11-06 23:00:37 +01:00
Ettore Di Giacinto
dcc5aae3cd Tag 0.8.13 2020-11-06 22:25:26 +01:00
Ettore Di Giacinto
99bf9e291d Use LStat and attempt removing before bailing out on first failure 2020-11-06 21:34:56 +01:00
Daniele Rondina
51417ecb5d pkg/compiler/artifact.go: permit to support config protect with only annotation 2020-11-06 20:23:46 +01:00
Daniele Rondina
130eb8de1a Integrate config protection on uninstall too 2020-11-06 20:14:25 +01:00
Daniele Rondina
f1604c3b6f contrib: Add get_luet_root.sh script 2020-11-06 07:46:00 +01:00
Ettore Di Giacinto
5b5735266a Calculate provides for parallel solver too 2020-11-05 21:00:24 +01:00
Ettore Di Giacinto
984366d3a5 Consider provides during upgrades 2020-11-05 20:52:02 +01:00
Ettore Di Giacinto
55ec38ffc7 Tag 0.8.12 2020-11-03 20:02:44 +01:00
Ettore Di Giacinto
9aa352dec8 Add json output to build 2020-11-03 18:06:56 +01:00
Ettore Di Giacinto
d7a04465fd update vendor/ 2020-11-03 17:21:32 +01:00
Ettore Di Giacinto
25f69d4f1c Bump topsort 2020-11-03 17:20:52 +01:00
Ettore Di Giacinto
102a788c91 Revert "Revert "Stabilize ordering graph""
This reverts commit 2b23016a51.
2020-11-02 15:43:35 +01:00
Ettore Di Giacinto
2b23016a51 Revert "Stabilize ordering graph"
This reverts commit 940f553e1c.
2020-11-02 15:43:15 +01:00
Ettore Di Giacinto
940f553e1c Stabilize ordering graph
In this way when we order, we always return the same solution order in
case there are weak deps.

The following is optional - it doesn't change the "correctness" of the
solver results: We add an extra edge between deps that
share common dependendencies. This makes the link more stronger and
balances the graph so it doesn't show different results for the same query, as they
could be shuffled as don't have a direct connection.
2020-11-02 14:30:41 +01:00
Ettore Di Giacinto
c3ef549673 Warn user only when required when uninstalling directories 2020-10-31 11:56:03 +01:00
Ettore Di Giacinto
0e764e525e Filter packages to install instead of looping solver result 2020-10-31 01:25:18 +01:00
Ettore Di Giacinto
f401e2b37f Add install benchmark test for solver 2020-10-30 22:20:08 +01:00
Ettore Di Giacinto
2b67b8dd24 Bump version 2020-10-30 19:15:10 +01:00
Ettore Di Giacinto
91dfb8ce3a Enhance CLI output 2020-10-30 19:15:04 +01:00
Ettore Di Giacinto
f6a4b634c1 Don't always walk all World() packages
With this change the solver during install now considers only the part
of the tree which is required to calculate the solution, it doesn't
consider anymore World() as the search space.

The search space now is narrowed down to the packages that related to
the one which we are considering.

In this subset of changes we are also optimizing the Parallel solver
avoiding an useless loop.

This change boost overall performance on large datasets which don't
necessarly have relations touching the whole tree.
2020-10-30 19:12:12 +01:00
Ettore Di Giacinto
2fa58fc7db Bump gophersat 2020-10-30 18:37:26 +01:00
Ettore Di Giacinto
529a827c5f Tag 0.8.10 2020-10-29 16:50:17 +01:00
Ettore Di Giacinto
39bc74fc73 Add boltDB test and fixup range over interface cast 2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
99c59643a1 Add benchmarks tests 2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
ffea4d8cf9 Fix priority constraint formula
The parallel solver made the issue more visible, the constraints needed
to be less relaxed and needed to be exclusive so our candidate is looked
up at it first
2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
e459ddf470 Optimize BoltDB World() call 2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
eb2c240e84 Adapt installer tests 2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
1956f476cc Set concurrency when building 2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
a216f71d53 Inverted options 2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
95e640c9d0 Make solver type switchable 2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
9f1a182eee Add tests and various fixes to parallel implementation 2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
9a7d92b02e Make the parallel solver completely parallel in building formulas from dataset 2020-10-29 16:32:13 +01:00
Ettore Di Giacinto
c5ed36b2bd Sketch concourrent solver when building formulas 2020-10-29 16:32:13 +01:00
Daniele Rondina
5f8b836335 Update vendor github.com/Sabayon/pkgs-checker@v0.7.2 2020-10-28 17:48:30 +01:00
Daniele Rondina
6806103b3e installer: Start spinner of upgrade calculation 2020-10-25 14:01:45 +01:00
Ettore Di Giacinto
4e9313ed55 Tag 0.8.9 2020-10-22 18:36:37 +02:00
Ettore Di Giacinto
c9952b12a8 Get file list from parsed yaml 2020-10-22 17:33:37 +02:00
Ettore Di Giacinto
abae9c320a Tag 0.8.8 2020-10-19 18:47:36 +02:00
Ettore Di Giacinto
94937cc88a update vendor/ 2020-10-19 17:58:50 +02:00
Ettore Di Giacinto
0aa0411c6e Bump copy dep and handle shallow symlinks 2020-10-19 17:58:43 +02:00
Daniele Rondina
c0cc9ec703 convert: Now use slot for category name 2020-10-18 19:58:15 +02:00
Daniele Rondina
07dff7f197 installer: log packages ignored on create-repo 2020-10-12 08:42:19 +02:00
Ettore Di Giacinto
4028c62367 Tag 0.8.7 2020-10-11 15:17:16 +02:00
Daniele Rondina
51f32c0614 Update vendor/ 2020-10-10 19:59:40 +02:00
Ettore Di Giacinto
3261b2af98 Drop also package file list from db entry 2020-10-10 18:53:43 +02:00
Ettore Di Giacinto
b88a81c7ed Add database command
- Allows to override the system db and create/remove entries as desired.
  The input format is the same metadata as the one generated by the
  artifacts. It contains the Package and the file list that we need.
- Add integration test

Closes #47
2020-10-10 18:26:40 +02:00
Daniele Rondina
d67cf2fa33 Revert "Do image export only if we have to generate the package"
This reverts commit 0857e53b03.
2020-10-07 10:14:06 +02:00
Ettore Di Giacinto
0857e53b03 Do image export only if we have to generate the package 2020-10-06 19:01:25 +02:00
Ettore Di Giacinto
1c1bdca343 Add only-target-package option to luet build 2020-10-06 17:57:57 +02:00
Ettore Di Giacinto
2cb0f3ab5d Tag 0.8.6 2020-10-04 19:58:13 +02:00
Ettore Di Giacinto
74246780d4 Support templated packages 2020-10-04 19:33:15 +02:00
Daniele Rondina
097ea37c97 compiler/artefact: remove debug println 2020-10-02 22:37:38 +02:00
Daniele Rondina
8e23bf139a contrib: Add example of config protect confile 2020-10-02 22:28:08 +02:00
Daniele Rondina
3ba70ae9bd contrib/config/luet.yaml: Add config_protect_skip option 2020-10-02 22:27:38 +02:00
Daniele Rondina
c64660b8d1 Permit to ignore config protect rules
* Added command line option --skip-config-protect

* Added config option config_protect_skip
2020-10-02 22:25:21 +02:00
Daniele Rondina
c8c53644f3 Avoid segfault if tree path doesn't exist 2020-09-17 08:05:05 +02:00
Daniele Rondina
9b381e5d19 Update github.com/Sabayon/pkgs-checker vendor 2020-09-12 16:07:04 +02:00
Ettore Di Giacinto
6f623ae016 Tag 0.8.5 2020-09-10 17:40:33 +02:00
Daniele Rondina
bd80f9acd2 cmd/tree/bump: Add --pkg-version/-p to set specific version 2020-08-30 08:54:38 +02:00
Daniele Rondina
045d25bb28 pkg/package: Add method SetVersion to DefaultPackage 2020-08-30 08:53:37 +02:00
Daniele Rondina
908b6d2bd4 cmd/tree/validate: Fix race and drop errs chan 2020-08-23 12:27:51 +02:00
Ettore Di Giacinto
a3ada624a7 Merge pull request #132 from mudler/validate-buildtime-deps
cmd/tree/validate: Integrate validation of buildtime deps
2020-08-22 12:10:34 +02:00
Daniele Rondina
09c7609a7f cmd/tree/validate: Add error summary 2020-08-20 11:36:56 +02:00
Daniele Rondina
a1acab0e52 cmd/tree/validate: Integrate validation of buildtime deps
By default both runtime and buildtime deps are checked.
With the option --only-buildtime is possible analyze only
buildtime deps or instead with the option --only-runtime
only the runtime deps.

Signed-off-by: Daniele Rondina <geaaru@sabayonlinux.org>
2020-08-20 11:09:39 +02:00
Daniele Rondina
93187182e5 pkg/compiler: Fix typo on error message 2020-08-19 19:24:46 +02:00
Daniele Rondina
5e7cd183be contrib/config/luet.yaml: Update config example 2020-08-08 17:55:26 +02:00
Ettore Di Giacinto
9c0f0e3457 ci: release with GH Actions 2020-08-08 11:54:11 +02:00
Ettore Di Giacinto
1120b1ee59 ci: fix typo 2020-08-07 23:36:33 +02:00
Ettore Di Giacinto
4010033e0c ci: fixup workflows 2020-08-07 19:30:30 +02:00
Ettore Di Giacinto
a076613f66 Fixup import path 2020-08-07 19:30:08 +02:00
Ettore Di Giacinto
c184b4b3bc ci: Pass env by in GH actions 2020-08-06 18:52:42 +02:00
Ettore Di Giacinto
40d1f1785b ci: Get deps before running unit tests 2020-08-06 18:22:13 +02:00
Ettore Di Giacinto
11944f4b8c Disable tty on docker integration test 2020-08-06 18:11:50 +02:00
Ettore Di Giacinto
6f41f8bd8d Add GH action workflows 2020-08-06 18:03:35 +02:00
Ettore Di Giacinto
95b125cb91 Pull images before executing diff tests 2020-08-06 18:03:00 +02:00
Ettore Di Giacinto
f676b50735 Tag 0.8.4 2020-08-05 19:37:32 +02:00
Ettore Di Giacinto
0c0401847e ci: pass PATH also on deploy steps 2020-08-05 19:35:41 +02:00
Ettore Di Giacinto
02a506a5c5 ci: pass PATH by 2020-08-05 19:24:17 +02:00
Ettore Di Giacinto
6f0b657e69 ci: keep envs 2020-08-05 19:21:20 +02:00
Ettore Di Giacinto
51378bdfb6 Run tests as root to verify caps 2020-08-05 19:19:33 +02:00
Ettore Di Giacinto
66513955c7 Compute image diffs internally
Is it more faster in this way as we already have all the needed folders
to the comparison extracted. In this way we don't repeat I/O operation
twice by calling container-diff.

Do not depend on container-diff anymore
2020-08-05 19:09:45 +02:00
Ettore Di Giacinto
694d8656d9 Add xattrs tests 2020-08-05 18:58:50 +02:00
Ettore Di Giacinto
c339e0fed2 Add symlink test 2020-08-05 18:57:27 +02:00
Ettore Di Giacinto
e30bb056d5 Drop IsFlagged from tests 2020-08-02 12:22:43 +02:00
Ettore Di Giacinto
052a551c0c Add "hidden" field to packages
Also drop residual of IsSet which isn't actually used

Related to #26
2020-08-02 11:31:23 +02:00
Ettore Di Giacinto
ffa6fc3829 Tag 0.8.3 2020-07-17 22:42:49 +02:00
Ettore Di Giacinto
07a1058ac1 Add cli option to skip packages if only metadata is present (without checking the image) 2020-07-17 22:42:03 +02:00
Ettore Di Giacinto
3af9109b99 Tag 0.8.2 2020-07-12 15:57:46 +02:00
Ettore Di Giacinto
6e3650d3af Add upgrade_old_repo_revision fixture 2020-07-12 15:38:02 +02:00
Ettore Di Giacinto
5dcf77c987 Add package buildtimestamp and luet upgrade --sync
Annotate the package build time when compiling, and use that from the
client to force upgrade of packages that changed the artifact, but
didn't changed any version.

The client can trigger this behavior with `luet upgrade --sync`
2020-07-12 15:29:38 +02:00
Daniele Rondina
ee0e70ed3d tree/pkglist: Now --deps orders for build/installation order 2020-07-05 10:44:47 +02:00
Daniele Rondina
364b5648b4 repository loader now support .yaml extension 2020-07-04 20:07:32 +02:00
Daniele Rondina
e28a4753f8 tree/validate: Use --deps instead of --rdeps (we support also build deps) 2020-06-27 19:42:03 +02:00
Daniele Rondina
d1d7f5aa74 tree/pkglist: Add --rdeps option for runtime deps 2020-06-27 19:27:45 +02:00
Daniele Rondina
e2260b6956 Add --no-spinner option 2020-06-27 16:45:49 +02:00
Ettore Di Giacinto
764a09ce0c Tag 0.8.1 2020-06-27 13:02:00 +02:00
Ettore Di Giacinto
910f1ad3fe Merge branch 'master' into develop 2020-06-27 13:01:14 +02:00
Ettore Di Giacinto
16e9d7b20c Use packageImage as builder image fingerprint
This allows to have an unique identifier for the builder image id against
different depgraphs combinations. The package fingerprint is not enough,
as an atom could have a difference deptree depending on the requires
constraints.

TODO: Don't use the full image name, but only the hash as a salt
(currently the salt contains ALSO a reference of the image-repository,
as such it doesn't allow to port a tree in a different docker registry)
2020-06-23 18:59:18 +02:00
Ettore Di Giacinto
6088664887 Drop gox-build make target 2020-06-13 18:40:55 +02:00
Ettore Di Giacinto
ee3b59348e Tag 0.8.0 2020-06-12 19:41:37 +02:00
Ettore Di Giacinto
bb41a0c074 Add upx -1 option 2020-06-12 18:47:18 +02:00
Ettore Di Giacinto
6b8f412138 Update vendor 2020-06-12 17:58:13 +02:00
Ettore Di Giacinto
6d68ed073d Show and detect extensions with cobra-extensions 2020-06-12 17:54:57 +02:00
Ettore Di Giacinto
7b51e83902 Merge pull request #119 from mudler/config-protect
Integrate feature config-protect like Gentoo/Funtoo do
2020-06-07 11:33:55 +02:00
Daniele Rondina
3a365c709b Integrate config-protect from package spec 2020-06-06 13:12:54 +02:00
Daniele Rondina
aaa73dc2ac Add integration test for config-protect 2020-06-06 12:35:16 +02:00
Daniele Rondina
0917c2703e helpers/archive: Refactor and add support for config protect when sameOwner is false 2020-06-06 12:34:44 +02:00
Ettore Di Giacinto
264e1e9652 Print inspect output as string 2020-06-06 11:32:47 +02:00
Ettore Di Giacinto
03cc5fcb76 Use cobra for build --full 2020-06-06 11:20:48 +02:00
Ettore Di Giacinto
8aafc7600c Print selected packages 2020-06-06 11:18:54 +02:00
Daniele Rondina
c87db16d31 tarModifierWrapperFunc: Now use strings.HasPrefix for match path 2020-06-06 10:38:11 +02:00
Daniele Rondina
cd903351b3 cmd/config: Add print of config protect data 2020-06-06 10:36:42 +02:00
Daniele Rondina
837eeb04ec config/config_protect: Fix load of configuration files 2020-06-06 10:35:38 +02:00
Ettore Di Giacinto
90a25406a0 Add --full option to build
Don't compile dependencies when computing all the compilation specs from
a tree if are among the target deps

Fixes #41
2020-06-06 08:58:18 +02:00
Daniele Rondina
a19a1488bb Update Luet-lab/moby vendor/ 2020-06-06 08:30:28 +02:00
Daniele Rondina
d946e39a15 pkg/helpers/archive_test: Fix test 2020-06-05 23:53:33 +02:00
Daniele Rondina
a414b4ad4c contrib/config/luet.yaml: Add config_protect_confdir option 2020-06-05 23:40:36 +02:00
Daniele Rondina
415b1dab9a Add test of modifier 2020-06-05 23:12:35 +02:00
Daniele Rondina
16f717f04b tarModifierWrapperFunc: Fix config protect filename 2020-06-05 23:12:19 +02:00
Ettore Di Giacinto
9e0e1199df Check if image exists before skipping compilation 2020-06-03 21:00:30 +02:00
Daniele Rondina
8f0c528c08 Create helpers.UntarProtect for handle protected files
Currently, it's used the archive.ReplaceFileTarWrapper
that requite a []byte of the files replaced. This is not
a good idea if files are big and instead could be better
in the near future reimplement ReplaceFileTarWrapper with
a callback that return io.Reader instead of []byte.

If a protected file is already present on target rootfs
it is created a file with the same prefix used in Gentoo:

._cfgXXXX_<filename>
2020-06-02 11:08:37 +02:00
Daniele Rondina
a2231749ab Begin implementation for catch config protect files 2020-06-02 09:04:40 +02:00
Ettore Di Giacinto
990a5405cf Merge pull request #116 from mudler/review-config-path
Review configuration file parsing logic
2020-05-31 11:05:55 +02:00
Daniele Rondina
1d8a6174bb Drop duplicate code for general.same_owner 2020-05-30 16:51:10 +02:00
Daniele Rondina
341293c403 tests/integration/09_docker.sh: align new logic 2020-05-30 16:47:11 +02:00
Daniele Rondina
9e7c7e69f8 Review configuration file parsing logic
Luet support now these priorities on read configuration file:
- command line option (if available)
- $PWD/.luet.yaml
- $HOME/.luet.yaml
- /etc/luet/luet.yaml
2020-05-30 16:46:29 +02:00
Ettore Di Giacinto
86808ad49b Fixup dockerfile 2020-05-26 21:19:27 +02:00
Ettore Di Giacinto
d59cc42e22 Add target to create smaller binary 2020-05-26 21:07:18 +02:00
Ettore Di Giacinto
cc21e6fa5e Respect user-defined repository naming 2020-05-24 12:16:02 +02:00
geaaru
8c4f5b2911 Merge pull request #113 from mudler/annotations
Add Annotations to package spec
2020-05-23 10:52:32 +02:00
Daniele Rondina
44d68a9583 Add annotations option to package spec 2020-05-23 09:27:38 +02:00
Daniele Rondina
dba6c361c2 Move helpers/cli to cmd/helpers 2020-05-23 08:51:33 +02:00
Ettore Di Giacinto
4197d7af61 Add upgrade by using only the SAT core
- Adds upgrade --universe and upgrade --universe --clean. It will
  attempt to bring the system as much close as the content available in
  the repositories. It differs from a standard upgrade which checks
  directly that what is pulled in doesn't conflict with the system. In
  this new way, we just query the SAT solver to decide that on our
  behalf.
- Add uninstall --full-clean. It uses only the SAT solver to uninstall
  the package and it will drop as many packages as required (including
  revdeps of packages too.
2020-05-22 21:20:58 +02:00
Ettore Di Giacinto
bfde9afc7f Add Nodeps and Full options to upgrade 2020-05-22 21:20:57 +02:00
Ettore Di Giacinto
3237423dde Enhance upgrade output 2020-05-22 21:20:57 +02:00
Ettore Di Giacinto
ab179db96a Don't drop packages that would be re-installed during upgrade
Check for packages that are marked for deletion. If the ones that are
marked for install are depending on it, don't remove them at all
2020-05-22 21:20:52 +02:00
geaaru
916b2a8927 Merge pull request #112 from mudler/package-sanitized
Package sanitized
2020-05-20 11:42:47 +02:00
Daniele Rondina
a16bdddeb2 Add spectooling test suite 2020-05-20 10:26:30 +02:00
Daniele Rondina
e38a4b3d9b Use DefaultPackageSanitized struct for write specs 2020-05-20 09:59:48 +02:00
Ettore Di Giacinto
c52fe9a6b3 Add development version 2020-05-19 23:05:03 +02:00
Ettore Di Giacinto
956e55a1d4 Tag 0.7.9 2020-05-19 23:04:28 +02:00
Ettore Di Giacinto
9971fe9f45 Unique hashes for packages without deps 2020-05-18 19:57:01 +02:00
Daniele Rondina
11759f98e0 package/NewPackage: init Labels on AddLabel 2020-05-18 19:51:47 +02:00
Daniele Rondina
cb2ac15de8 package: Fix typo on labels key 2020-05-17 19:17:14 +02:00
Ettore Di Giacinto
c0b432befa Add development version 2020-05-16 22:46:29 +02:00
Ettore Di Giacinto
8e029a8ee4 Tag 0.7.8 2020-05-16 22:46:04 +02:00
Ettore Di Giacinto
51711dafba Add package_dir to pack a spec dir as the main artifact 2020-05-16 21:34:27 +02:00
Ettore Di Giacinto
2803430515 Add solver test scenario 2020-05-16 11:11:17 +02:00
Daniele Rondina
44213894bc Add commands aliases 2020-05-10 20:24:08 +02:00
Daniele Rondina
a7d1381cb5 cmd/create-repo: Add support for multiple trees 2020-05-10 20:18:10 +02:00
Daniele Rondina
2cb79c0071 cmd/build: Add support for multiple trees 2020-05-10 20:02:12 +02:00
Ettore Di Giacinto
7d17d3babf Merge pull request #109 from mudler/log-cmdline-opts
Log cmdline opts
2020-05-09 11:16:37 +02:00
Daniele Rondina
13df161fc6 logging: permit to disable color and emoji
Now it's possible disable color and emoji on standard
output with:

$> luet <cmd> --color=false --emoji=false

Hereinafter, the list of changes:

* Added logging option logging.color (default true)
* Added logging option logging.enable_emoji (default true)
* Added persistent flag --color
* Added persistent flag --emoji
2020-05-09 10:08:21 +02:00
Daniele Rondina
fe5ab9246f Added option for enable log to file.
Now it's possible logging to file is handled by the enable_logfile
option and by the path.

From cli is now possible:
* enable log to file with the option --enable-logfile
* modify the logfile path with the option --logfile/-l <path>
2020-05-09 10:05:34 +02:00
Daniele Rondina
993bcf9adf tree/validate: Add support for in memory cache with solver check 2020-05-08 20:05:21 +02:00
Ettore Di Giacinto
20cb96e0cc Simplify ordering check
we don't need a map[string]map[string]interface{}, as we don't need to
keep the data around
2020-05-04 17:34:29 +02:00
Daniele Rondina
b68634b58a solver: skip same packages in the order and avoid loop 2020-05-04 12:21:49 +02:00
Ettore Di Giacinto
1b529ea8c5 Add development version 2020-05-03 13:46:53 +02:00
Ettore Di Giacinto
2c2e6065d9 Tag 0.7.7 2020-05-03 13:46:35 +02:00
Ettore Di Giacinto
46014fb9c1 Enhance install output 2020-05-03 13:22:06 +02:00
Ettore Di Giacinto
ada9d886fa Enhance uninstall output 2020-05-03 13:22:00 +02:00
Ettore Di Giacinto
584b980644 Adapt integration test which requires full uninstall 2020-05-03 13:12:14 +02:00
Ettore Di Giacinto
a1d8ef1422 Allow to partially uninstall a package graph, make uninstall --full optional 2020-05-03 13:04:34 +02:00
Ettore Di Giacinto
7b6e4a2176 Add to the Solver the capability to check conflicts with revdeps 2020-05-03 10:34:18 +02:00
Ettore Di Giacinto
3befbfa915 Optimize uninstall computation 2020-05-02 15:43:57 +02:00
Ettore Di Giacinto
8dd756ec96 Add development version 2020-05-02 14:51:08 +02:00
Ettore Di Giacinto
1b6ffac7bd Tag 0.7.6 2020-05-02 14:50:29 +02:00
Ettore Di Giacinto
faef3d093a Add test fixtures 2020-05-02 14:03:34 +02:00
Ettore Di Giacinto
d4c25d74f5 Remove focus from test 2020-05-02 13:27:15 +02:00
Ettore Di Giacinto
2ed9781c88 Don't try to match against repo already installed packages 2020-05-02 13:24:46 +02:00
Ettore Di Giacinto
9d6d6bc0c8 Add more upgrade and reclaim tests scenarios 2020-05-02 12:18:19 +02:00
Ettore Di Giacinto
f8b2837741 Enhance reclaim output 2020-05-02 12:17:54 +02:00
geaaru
878e6d7b9c Merge pull request #103 from mudler/tmpdir-cleanup
Tmpdir cleanup
2020-05-02 09:20:01 +02:00
Daniele Rondina
c5b41946dc Add integration test for tmpdir cleanup 2020-05-02 08:43:25 +02:00
Daniele Rondina
a89c0af2f8 config/config_test: Fix typo 2020-05-01 15:34:17 +02:00
Daniele Rondina
60d9017952 CleanupTmpDir() is not Fatal 2020-05-01 15:27:19 +02:00
Daniele Rondina
1f99fde1c5 Add config test suite 2020-05-01 15:26:53 +02:00
Ettore Di Giacinto
4b63c9eaf9 Add test to be sure we don't index packages not in the tree 2020-05-01 11:48:05 +02:00
Ettore Di Giacinto
286d0fba2c Fix typo 2020-05-01 11:07:14 +02:00
Ettore Di Giacinto
624518bf77 Don't add package to the repository which aren't referenced by the tree 2020-05-01 10:52:40 +02:00
Daniele Rondina
51a4037b1b config: Initialize luet tmp basedir if doesn't exist 2020-05-01 08:18:18 +02:00
Ettore Di Giacinto
b13828c883 Add development version 2020-04-30 22:50:37 +02:00
Ettore Di Giacinto
886cbd0036 Tag 0.7.5 2020-04-30 22:50:12 +02:00
Ettore Di Giacinto
b91288153a Make tree validation cmd concurrent 2020-04-30 21:48:46 +02:00
Ettore Di Giacinto
9cb290b484 Improve database errors 2020-04-30 20:44:34 +02:00
Daniele Rondina
11944873ea Integrate tmpdir_base params and tmpdirs cleanup 2020-04-30 20:29:28 +02:00
Ettore Di Giacinto
322ac99f17 Annotate package runtime definition when reclaiming
This was an issue as we were copying the buildspec instead
2020-04-30 18:56:50 +02:00
Ettore Di Giacinto
6acc5fc97e Make BuildFormula support dependency cycles 2020-04-30 18:56:35 +02:00
Ettore Di Giacinto
6a4557a3b3 Make RequireContains support dependency cycles 2020-04-30 18:56:06 +02:00
Ettore Di Giacinto
fb3c568051 Add development version 2020-04-24 19:46:35 +02:00
Ettore Di Giacinto
14bc26ee22 Tag 0.7.4 2020-04-24 19:46:14 +02:00
Ettore Di Giacinto
18ccdbd6a9 Add --revdep to luet pkglist
Include also the path in the results
2020-04-24 19:05:47 +02:00
Ettore Di Giacinto
7d960b733d Add --revdep to luet search
Fixes #52
2020-04-24 19:05:24 +02:00
Ettore Di Giacinto
919b2c3cfc Don't print misleading messages to the user 2020-04-24 19:04:21 +02:00
Ettore Di Giacinto
a457c53824 Fixups to ExpandedRevdeps 2020-04-24 00:15:18 +02:00
Ettore Di Giacinto
8305d01e76 Walk requires in ExpandedRevdeps
In this way we annotate the visited and we avoid cycles that can be
generated by revdeps
2020-04-23 23:44:42 +02:00
Ettore Di Giacinto
ec32677dc1 Merge branch 'master' into develop 2020-04-22 22:17:49 +02:00
Ettore Di Giacinto
60619c36a3 Merge pull request #95 from mudler/develop (#96) 2020-04-22 22:16:58 +02:00
Ettore Di Giacinto
141f1bf773 Merge pull request #95 from mudler/develop
Merge develop
2020-04-22 22:15:34 +02:00
Ettore Di Giacinto
9321a310a5 Support mount with target folder in box exec
Also mount bind before pivotroot, this fixes bind mounting in general
2020-04-21 17:30:27 +02:00
Ettore Di Giacinto
4cee27da7f Fixup box CLI params and consume new box options 2020-04-20 23:03:03 +02:00
Ettore Di Giacinto
98876c8f20 Add support for env and hostmounts in box 2020-04-20 23:01:49 +02:00
Ettore Di Giacinto
4c2d38be59 Add package ExpandedRevdeps 2020-04-19 11:55:13 +02:00
Ettore Di Giacinto
20aa7c89f8 Use different result structs in pkglist 2020-04-19 11:37:23 +02:00
Ettore Di Giacinto
937609a9f4 Add machine readable output to pkglist 2020-04-19 11:24:41 +02:00
Ettore Di Giacinto
a3f0d848c9 Add builtime search to tree pkglist 2020-04-19 10:53:05 +02:00
Ettore Di Giacinto
8e91c255a3 Make FindPackageMatch match packages HumanReadableString 2020-04-19 10:47:55 +02:00
Ettore Di Giacinto
62381ed46d Structure search machine readable output 2020-04-19 10:31:07 +02:00
Ettore Di Giacinto
1171987ed3 Adapt integration test to search output change 2020-04-19 10:24:39 +02:00
Ettore Di Giacinto
ac871cb0a3 Add --output option to search
In such way it can be parsed by scripts more easily.
It also disable the spinner based on loglevel

Fixes #92
2020-04-18 23:22:00 +02:00
Ettore Di Giacinto
82cd0e17b6 Enhance search output 2020-04-18 22:51:27 +02:00
Ettore Di Giacinto
c972795a30 Tweak upgrade integration test
In this way we also test that the user configuration takes precedence
over the one advertized remotely
2020-04-18 17:35:58 +02:00
Ettore Di Giacinto
4d466d0330 Sync the repository data with user configured data
Fixup a regression, in this way we respect the user configuration in
regards to a repository to be consumed (priority and repository type)
regardless of the one advertized remotely.

Also add informative message about prio and type.
2020-04-18 17:33:46 +02:00
Ettore Di Giacinto
84407e5ae7 Add SetPriority to repository 2020-04-18 17:33:34 +02:00
Ettore Di Giacinto
845797a155 Merge pull request #93 from mudler/develop
Merge develop
2020-04-18 14:45:20 +02:00
Ettore Di Giacinto
83e19359a9 Update bbolt to 1.3.4
Includes fixes for Go 1.14

See: https://github.com/etcd-io/bbolt/issues/187
2020-04-18 13:58:14 +02:00
Ettore Di Giacinto
d77f875a8a Bump go version in travis 2020-04-18 12:41:23 +02:00
Ettore Di Giacinto
3963fb64e5 Update vendor 2020-04-18 11:49:28 +02:00
Ettore Di Giacinto
1d5ce53443 Add luet box command
It adds only the 'exec' subcommand to spawn processes in custom boxes

Relates to #45
2020-04-18 11:43:14 +02:00
Ettore Di Giacinto
a14f0abb5c Update vendor 2020-04-18 11:42:34 +02:00
Ettore Di Giacinto
64bac0823c Consume our moby fork
It handles #91
2020-04-18 11:41:34 +02:00
Ettore Di Giacinto
ee0fe1a86a Allow to finalizer to specify entrypoint command
In this way, finalizer in strict environment can override the default
shell used to run commands.

The shell keyword is a list, as it needs to contain the full command +
args.
2020-04-14 17:46:39 +02:00
Ettore Di Giacinto
333b9cc023 Add reclaim to CLI
Relates to #86
2020-04-13 19:33:30 +02:00
Ettore Di Giacinto
538ba8f5df Add luet reclaim
Reclaim allows to migrate between different system layouts. This
is a experimental feature (yet) and might be revisited in the future.

This change:

- Adds Reclaim(system) to Installer
- Adds unit tests

Relates to #86
2020-04-13 19:32:51 +02:00
Ettore Di Giacinto
eb56956c65 Add Touch file helper 2020-04-13 19:32:50 +02:00
Ettore Di Giacinto
32cf132a0c Drop downloadOnly bool from Installer cli options 2020-04-13 19:32:50 +02:00
Ettore Di Giacinto
4e2d42e397 Drop downloadOnly bool from installer.Install 2020-04-13 19:32:50 +02:00
Ettore Di Giacinto
c2eed1d999 Add quay.io badge 2020-04-13 18:11:02 +02:00
Daniele Rondina
1b8176777a cmd/search: Add filter to result 2020-04-13 15:50:45 +02:00
Ettore Di Giacinto
528f481a7b Annotate package files in metadata
Fixes #87
2020-04-13 10:52:41 +02:00
Ettore Di Giacinto
2f3eabc3ca Annotate git hash and time in displayed version 2020-04-12 14:37:49 +02:00
Ettore Di Giacinto
579a4f20fc Add docker fixture for integration tests 2020-04-11 20:12:43 +02:00
Ettore Di Giacinto
dd45a46ed0 Add docker from scratch integration test 2020-04-11 20:07:10 +02:00
Ettore Di Giacinto
f987ee9fa0 Handle errors from os/user
We use it to generate defaults, if error is found, we imply preserve
permissions
2020-04-11 20:05:56 +02:00
Ettore Di Giacinto
0fe2a8c950 Add development version 2020-04-11 18:30:11 +02:00
Ettore Di Giacinto
9704992c43 Tag 0.7.3 2020-04-11 18:29:46 +02:00
Ettore Di Giacinto
c2beab64cb Merge pull request #84 from mudler/develop
Merge develop
2020-04-11 18:24:02 +02:00
Ettore Di Giacinto
d8919f7250 Check if dependencies are selectors in validate 2020-04-10 20:00:37 +02:00
Daniele Rondina
42c380029c cmd/tree/validate: Fix order analysis 2020-04-10 17:41:27 +02:00
Daniele Rondina
69a82a1ca5 simpledocker: Use debug option for print container-diff results 2020-04-10 09:32:12 +02:00
Daniele Rondina
02c33896d5 simpledocker: Move warning before return 2020-04-10 09:31:08 +02:00
Daniele Rondina
c2e9176ab2 solver.Order now return error 2020-04-09 17:07:57 +02:00
Daniele Rondina
726a749b4b cmd/tree/validate: Integrate order of the solution 2020-04-09 15:58:05 +02:00
Ettore Di Giacinto
0d2668e452 Print warning if container-diffs return errors to stderr 2020-04-08 18:31:40 +02:00
Ettore Di Giacinto
77ba4193aa Add ValidateSelector to versioner interface and consume it
We can refactor furthermore by dropping the package methods, as now we
can consume a versioner in all places that requires it
2020-04-05 15:52:16 +02:00
Ettore Di Giacinto
5a5e7f1dfa Move version logic inside versioner
WIP, it needs yet to be under the interface implementation
2020-04-05 11:16:14 +02:00
Ettore Di Giacinto
cc0999b4c4 Merge pull request #83 from auto-maintainers/branch-be6698fa706d6ccfc3734b805b212f75175fd557
Update dependencies
2020-04-05 09:00:51 +02:00
MarvinHatesOceans
be6698fa70 Update dependencies 2020-04-04 23:56:14 +00:00
Ettore Di Giacinto
6540dcc99d Merge pull request #82 from mudler/packages_type
Add Packages type and Versioner interface
2020-04-04 17:28:49 +02:00
Ettore Di Giacinto
cf8091a7fd Re-enable debian versioning test, add more cases 2020-04-04 16:49:41 +02:00
Ettore Di Giacinto
3caaa01eb8 Add semver detection to versioner
In this way we compare only all version that are compliant to semver,
otherwise we fallback to debian sorting.
2020-04-04 16:48:48 +02:00
Ettore Di Giacinto
4eae0a851c Add versioner unit tests 2020-04-04 16:33:08 +02:00
Ettore Di Giacinto
626419f955 Fix original version mapping in versioner 2020-04-04 16:23:43 +02:00
Ettore Di Giacinto
5bf60deffc Update vendor 2020-04-04 15:35:45 +02:00
Ettore Di Giacinto
84625be9ac Adapt package.Best to take a Versioner interface 2020-04-04 15:33:14 +02:00
Ettore Di Giacinto
aaa8d8b7d6 Update gomod 2020-04-04 15:32:13 +02:00
Ettore Di Giacinto
04f9a88a5f Add versioner interface to wrap versioning operations
This allows to scope responsabilities and enables switching to different
ordering algorithms
2020-04-04 15:29:20 +02:00
Ettore Di Giacinto
9d58b0a0cc Adapt tests to refactor 2020-04-04 14:41:58 +02:00
Ettore Di Giacinto
5e31d940f0 Introduce Packages for []Package 2020-04-04 14:29:08 +02:00
Daniele Rondina
42d1ec5585 cmd/tree/bump: Add --to-stdout option 2020-04-04 13:42:25 +02:00
Daniele Rondina
07e78dd89b Update vendor github.com/Sabayon/pkgs-checker@076438c31739 2020-04-04 11:40:36 +02:00
geaaru
625a6ce773 Merge pull request #80 from mudler/versioning_integration
Versioning integration
2020-04-03 19:24:43 +02:00
Daniele Rondina
d6faa3335f tests/integration/08_versioning.sh: Add version with _ 2020-04-03 18:33:01 +02:00
Daniele Rondina
129ca8da55 Add version sanitized to Best() 2020-04-03 18:32:26 +02:00
Daniele Rondina
9e95d7d2a9 tests/integration/08_versioning.sh: Drop wrong assert 2020-04-03 15:24:56 +02:00
Daniele Rondina
e94674fca3 tests/integration/08_versioning.sh: Simplify test 2020-04-03 15:06:27 +02:00
Daniele Rondina
730b9a087a tests/integration/08_versioning.sh: Fix test 2020-04-02 19:39:31 +02:00
Ettore Di Giacinto
9cf6330146 Build empty packages in versioning fixture 2020-03-29 15:11:20 +02:00
Ettore Di Giacinto
daa0b815c1 Fixup assertEquals on versioning integration test 2020-03-29 14:52:14 +02:00
Ettore Di Giacinto
1779382811 Extend tree validate checks 2020-03-29 14:52:13 +02:00
Ettore Di Giacinto
c705728a2a Build specific package, it triggers version search 2020-03-29 14:52:12 +02:00
Ettore Di Giacinto
15e772d96d Fixup versioning of fixture 2020-03-29 14:52:10 +02:00
Ettore Di Giacinto
0d6dc49a95 Add versioning integration test 2020-03-29 14:52:00 +02:00
Daniele Rondina
8df76c8941 Update vendor github.com/Sabayon/pkgs-checker@fd529912510d 2020-03-29 14:04:55 +02:00
Ettore Di Giacinto
b1ce8d2eba Merge pull request #77 from auto-maintainers/branch-830fdb5bf299f959bcd02fe577c19471daa229cd
Update dependencies
2020-03-29 12:15:14 +02:00
Daniele Rondina
ee66839bd3 pkg/package/version: Better search for last + 2020-03-29 09:10:52 +02:00
Daniele Rondina
9b61ed9e1d Fix panic if luet is executed without args 2020-03-29 08:48:51 +02:00
MarvinHatesOceans
d59fff0740 Update dependencies 2020-03-28 23:19:43 +00:00
Ettore Di Giacinto
aeb10338f6 Add development version 2020-03-28 17:32:06 +01:00
Ettore Di Giacinto
02653e03d8 Tag 0.7.2 2020-03-28 17:31:35 +01:00
Ettore Di Giacinto
2c48fe0524 Merge pull request #76 from mudler/box_finalizers
Add Box finalizers
2020-03-28 17:30:09 +01:00
Ettore Di Giacinto
a0113dcd13 Drop verbose output 2020-03-28 17:05:23 +01:00
Ettore Di Giacinto
d45536505b Add integration test for box finalizers 2020-03-28 16:59:37 +01:00
Ettore Di Giacinto
92d335d5d1 Add exec command to cli
It is the entrypoint for box, to run commands with unshare
2020-03-28 16:58:59 +01:00
Ettore Di Giacinto
49d7b4e2bf Add box structure to handle containerized executions 2020-03-28 16:58:46 +01:00
Ettore Di Giacinto
f15ed3fda1 Move finalizer in its own struct
Also distinguish when we run in a root target dir ("/") or when we want
to run the commands in a separate folder
2020-03-28 16:57:40 +01:00
Ettore Di Giacinto
2ad16fa875 Add ListDir helper 2020-03-28 16:55:16 +01:00
Ettore Di Giacinto
416be23a46 Don't always remove unpacked repository content 2020-03-28 12:07:40 +01:00
Daniele Rondina
98c7d5c450 cmd/create-repo: Fix default value of tree-compression 2020-03-27 08:59:52 +01:00
Daniele Rondina
50091b2a4b pkg/installer: Add test for uncompress tree and minor cleanup 2020-03-25 00:19:52 +01:00
Ettore Di Giacinto
0067fa82a5 Merge pull request #75 from mudler/fixup_upgrade
Don't attempt to upgrade packages already in system
2020-03-24 21:14:53 +01:00
Ettore Di Giacinto
117554792d Don't attempt to upgrade packages already in system 2020-03-24 20:30:59 +01:00
Daniele Rondina
d0b7552aca tests: Align test to new args 2020-03-24 20:01:22 +01:00
Daniele Rondina
454e9d934e Use filename instead of name on repo specs 2020-03-24 19:40:11 +01:00
Ettore Di Giacinto
dd91a61caf Merge pull request #74 from mudler/split_download
Split download
2020-03-24 19:02:16 +01:00
Ettore Di Giacinto
bc5d01c3df Add integration test for search of installed packages 2020-03-24 18:19:09 +01:00
Ettore Di Giacinto
af7f1de9f1 Split download and installer worker
Also, drop downloadOnly from install
2020-03-24 18:19:09 +01:00
Ettore Di Giacinto
3f5aa7db22 Merge pull request #73 from mudler/refactor-repository
Refactor repository
2020-03-24 17:44:37 +01:00
Daniele Rondina
a0f9222068 Add check of repository metafile archive in tests 2020-03-24 14:01:22 +01:00
Daniele Rondina
520768d0ca pkg/installer: Align tests with new default 2020-03-24 12:52:45 +01:00
Daniele Rondina
4f002ab40f pkg/installer/repository: The check for repo_files is not needed for parsing normal repo 2020-03-24 12:52:24 +01:00
Daniele Rondina
60635a03eb Fix propagation of repo file name and add test for meta compressed 2020-03-24 00:31:41 +01:00
Daniele Rondina
202ed2651a 💥 Refactor and split repository.yaml file 2020-03-24 00:05:16 +01:00
Daniele Rondina
4e461fd6be cmd/repo/update: Update only enabled repo 2020-03-23 23:40:27 +01:00
Daniele Rondina
6bd8fe6789 Add omitempty to DefaultPackage fields 2020-03-22 22:23:11 +01:00
Ettore Di Giacinto
7cf6d51355 Tweak ginkgo parameters
- Add flakeAttempts
- Add race detection to CI runs
2020-03-22 22:07:46 +01:00
Ettore Di Giacinto
d5166c55ab Lock only on commands that aren't meant to run in parallel 2020-03-22 15:19:08 +01:00
Ettore Di Giacinto
7de5f6656d Merge branch 'develop' 2020-03-22 14:42:51 +01:00
Ettore Di Giacinto
9e62111e1a Add development version 2020-03-22 14:42:35 +01:00
Ettore Di Giacinto
655da7e883 Merge pull request #72 from mudler/speedup_travis
Speedup travis
2020-03-22 14:40:34 +01:00
Ettore Di Giacinto
8dbc266b39 Drop verbosity in ginkgo params 2020-03-22 13:52:07 +01:00
Ettore Di Giacinto
5942e2f20c Drop unneeded dependencies 2020-03-22 11:00:30 +01:00
Ettore Di Giacinto
1a8fb77771 Make test-coverage in one step, deploy release only on tags 2020-03-22 10:55:04 +01:00
4676 changed files with 883455 additions and 285161 deletions

49
.chglog/CHANGELOG.tpl.md Normal file
View File

@@ -0,0 +1,49 @@
{{ if .Versions -}}
{{ if .Unreleased.CommitGroups -}}
{{ range .Unreleased.CommitGroups -}}
### {{ .Title }}
{{ range .Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
{{ end }}
{{ end -}}
{{ else }}
{{ range .Unreleased.Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
{{ end }}
{{ end -}}
{{ end -}}
{{ range .Versions }}
<a name="{{ .Tag.Name }}"></a>
{{ if .CommitGroups -}}
{{ range .CommitGroups -}}
### {{ .Title }}
{{ range .Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
{{ end }}
{{ end -}}
{{ else }}
{{ range .Commits -}}
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }} (https://github.com/mudler/luet/commit/{{.Hash.Short}})
{{ end }}
{{ end -}}
{{- if .NoteGroups -}}
{{ range .NoteGroups -}}
### {{ .Title }}
{{ range .Notes }}
{{ .Body }}
{{ end }}
{{ end -}}
{{ end -}}
{{ end -}}
{{- if .Versions }}
[Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD
{{ range .Versions -}}
{{ if .Tag.Previous -}}
[{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}
{{ end -}}
{{ end -}}
{{ end -}}

27
.chglog/config.yml Executable file
View File

@@ -0,0 +1,27 @@
style: github
template: CHANGELOG.tpl.md
info:
title: CHANGELOG
repository_url: https://github.com/mudler/luet
options:
commits:
# filters:
# Type:
# - feat
# - fix
# - perf
# - refactor
commit_groups:
title_maps:
feat: Features
fix: Bug Fixes
perf: Performance Improvements
refactor: Code Refactoring
ci: Continous Integration
header:
pattern: "(.*)"
pattern_maps:
- Subject
notes:
keywords:
- BREAKING CHANGE

31
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@@ -0,0 +1,31 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: mudler
---
<!-- Thanks for helping us to improve Luet! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
**Luet version:**
<!-- Provide the output from "luet --version" -->
**CPU architecture, OS, and Version:**
<!-- Provide the output from "uname -a" -->
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
**To Reproduce**
<!-- Steps to reproduce the behavior, including the luet command used -->
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
**Logs**
<!-- If applicable, add logs with the "--debug" flag enabled to help explain your problem. -->
**Additional context**
<!-- Add any other context about the problem here. -->

View File

@@ -0,0 +1,22 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: mudler
---
<!-- Thanks for helping us to improve Luet! We welcome all feature requests. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
**Describe the solution you'd like**
<!-- A clear and concise description of what you want to happen. -->
**Describe alternatives you've considered**
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
**Additional context**
<!-- Add any other context or screenshots about the feature request here. -->

67
.github/workflows/image.yml vendored Normal file
View File

@@ -0,0 +1,67 @@
---
name: 'build container images'
on:
push:
branches:
- master
tags:
- '*'
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Prepare
id: prep
run: |
DOCKER_IMAGE=quay.io/luet/base
VERSION=latest
SHORTREF=${GITHUB_SHA::8}
# If this is git tag, use the tag name as a docker tag
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF#refs/tags/}
fi
TAGS="${DOCKER_IMAGE}:${VERSION},${DOCKER_IMAGE}:${SHORTREF}"
# If the VERSION looks like a version number, assume that
# this is the most recent version of the image and also
# tag it 'latest'.
if [[ $VERSION =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
TAGS="$TAGS,${DOCKER_IMAGE}:latest"
fi
# Set output parameters.
echo ::set-output name=tags::${TAGS}
echo ::set-output name=docker_image::${DOCKER_IMAGE}
- name: Set up QEMU
uses: docker/setup-qemu-action@master
with:
platforms: all
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@master
- name: Login to DockerHub
if: github.event_name != 'pull_request'
uses: docker/login-action@v1
with:
registry: quay.io
username: ${{ secrets.DOCKER_RELEASE_USERNAME }}
password: ${{ secrets.DOCKER_RELEASE_PASSWORD }}
- name: Build
uses: docker/build-push-action@v2
with:
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64,linux/arm
push: true
tags: ${{ steps.prep.outputs.tags }}

77
.github/workflows/pr.yml vendored Normal file
View File

@@ -0,0 +1,77 @@
on: pull_request
name: Build and Test
jobs:
tests-integration-img:
strategy:
matrix:
go-version: [1.16.x]
platform: [ubuntu-latest]
runs-on: ${{ matrix.platform }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Tests with Img backend
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
tests-integration:
strategy:
matrix:
go-version: [1.16.x]
platform: [ubuntu-latest]
runs-on: ${{ matrix.platform }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
- name: Tests
run: sudo -E env "PATH=$PATH" make test-integration
tests-unit:
strategy:
matrix:
go-version: [1.16.x]
platform: [ubuntu-latest]
runs-on: ${{ matrix.platform }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
make build
sudo cp -rf luet /usr/bin/luet
- name: Install GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
install-only: true
- name: Build
run: sudo -E env "PATH=$PATH" make multiarch-build-small
- name: Tests
run: sudo -E env "PATH=$PATH" make test-coverage

87
.github/workflows/push.yml vendored Normal file
View File

@@ -0,0 +1,87 @@
on: push
concurrency:
group: registries-tests
name: Build on push
jobs:
tests-integration-img:
name: Integration tests with img
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Login to quay with img
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Tests with Img backend
run: |
sudo -E env "PATH=$PATH" \
env "LUET_BACKEND=img" \
make test-integration
tests-integration:
name: Integration tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
- name: Tests
run: |
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-integration
tests-unit:
name: Unit tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
make build
sudo cp -rf luet /usr/bin/luet
- name: Install GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
install-only: true
- name: Build test
run: sudo -E env "PATH=$PATH" make multiarch-build-small
- name: Tests
run: |
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-coverage

111
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,111 @@
on:
push:
tags:
- '*' # only test and release when a tag is pushed
concurrency:
group: registries-tests
name: Test and Release on tag
jobs:
tests-integration-img:
name: Integration tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Login to quay with img
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Tests with Img backend
run: |
sudo -E env "PATH=$PATH" \
env "LUET_BACKEND=img" \
make test-integration
tests-integration:
name: Integration tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
- name: Tests
run: |
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-integration
tests-unit:
name: Unit tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
make build
sudo cp -rf luet /usr/bin/luet
- name: Tests
run: |
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-coverage
release:
name: Release
runs-on: ubuntu-latest
needs: [ "tests-integration-img", "tests-integration","tests-unit" ]
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.16.x
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v2
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

3
.gitignore vendored
View File

@@ -1,4 +1,5 @@
*.swp
luet
tests/integration/shunit2
tests/integration/bin
tests/integration/bin
release/

43
.goreleaser.yml Normal file
View File

@@ -0,0 +1,43 @@
before:
hooks:
- go mod tidy
dist: release
source:
enabled: true
name_template: '{{ .ProjectName }}-{{ .Tag }}-source'
checksum:
name_template: '{{ .ProjectName }}-{{ .Tag }}-checksums.txt'
builds:
-
env:
- CGO_ENABLED=0
ldflags:
- -s -w
- -X "github.com/mudler/luet/cmd.BuildTime={{ time "2006-01-02 15:04:05 MST" }}"
- -X "github.com/mudler/luet/cmd.BuildCommit={{ .FullCommit }}"
goos:
- linux
goarch:
- amd64
- arm
- arm64
- 386
goarm:
- 6
- 7
archives:
- format: binary # this removes the tar of the archives, leaving the binaries alone
name_template: luet-{{ .Tag }}-{{ .Os }}-{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
- '^Merge pull request'
release:
header: |
Luet is a multi-platform Package Manager based off from containers - it uses Docker (and others) to build packages.
It has zero dependencies and it is well suitable for "from scratch" environments.
It can also version entire rootfs and enables delivery of OTA-alike updates, making it a perfect fit for the Edge computing era and IoT embedded device

View File

@@ -1,19 +1,35 @@
dist: bionic
language: go
services:
- docker
go:
- "1.12"
- "1.14"
env:
- "GO15VENDOREXPERIMENT=1"
global:
- "GO15VENDOREXPERIMENT=1"
jobs:
- "DOCKER_BUILDKIT=0"
- "DOCKER_BUILDKIT=1"
before_install:
- make deps
- curl -LO https://storage.googleapis.com/container-diff/latest/container-diff-linux-amd64 && chmod +x container-diff-linux-amd64 && mkdir -p $HOME/bin && export PATH=$PATH:$HOME/bin && mv container-diff-linux-amd64 $HOME/bin/container-diff
- sudo rm -rf /var/lib/apt/lists/*
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) edge"
- sudo apt-get update
- echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json
- export DOCKER_CLI_EXPERIMENTAL=enabled
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
- mkdir -vp ~/.docker/cli-plugins/
- curl --silent -L "https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-amd64" > ~/.docker/cli-plugins/docker-buildx
- chmod a+x ~/.docker/cli-plugins/docker-buildx
- docker buildx version
- sudo -E env "PATH=$PATH" apt-get install -y libcap2-bin
- sudo -E env "PATH=$PATH" make deps
script:
- make multiarch-build test-integration test-coverage
after_success:
- |
if [ -n "$TRAVIS_TAG" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]; then
git config --global user.name "Deployer" && git config --global user.email foo@bar.com
go get github.com/tcnksm/ghr
ghr -u mudler -r luet --replace $TRAVIS_TAG release/
fi
- sudo -E env "PATH=$PATH" make multiarch-build test-integration test-coverage
#after_success:
# - |
# if [ -n "$TRAVIS_TAG" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]; then
# sudo -E env "PATH=$PATH" git config --global user.name "Deployer" && git config --global user.email foo@bar.com
# sudo -E env "PATH=$PATH" go get github.com/tcnksm/ghr
# sudo -E env "PATH=$PATH" ghr -u mudler -r luet --replace $TRAVIS_TAG release/
# fi

53
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,53 @@
We love your input! We want to make contributing to this project as easy and transparent as possible, whether it's:
- Reporting a bug
- Discussing the current state of the code
- Submitting a fix
- Proposing new features
- Becoming a maintainer
## We Develop with Github
We use github to host code, to track issues and feature requests, as well as accept pull requests.
## Stay in touch
Join us in [slack](https://luet.slack.com/join/shared_invite/enQtOTQxMjcyNDQ0MDUxLWQ5ODVlNTI1MTYzNDRkYzkyYmM1YWE5YjM0NTliNDEzNmQwMTkxNDRhNDIzM2Y5NDBlOTZjZTYxYWQyNDE4YzY#/) and hang out with the community! It will be much easier to get started and do your first steps in contributing to the project.
## All Code Changes Happen Through Pull Requests
Pull requests are the best way to propose changes to the codebase. We actively welcome your pull requests:
1. Fork the repo you want to contribute to and create your branch from `master`.
2. If you've added code that should be tested, add tests.
3. If you've changed APIs, update the [documentation](https://github.com/Luet-lab/docs).
4. Ensure the test suite passes.
5. Make sure your code lints.
6. Issue that pull request!
## Any contributions you make will be under the Software License of the repository
In short, when you submit code changes, your submissions are understood to be under the same License that covers the project. Feel free to contact the maintainers if that's a concern.
## Report bugs using Github's [issues](https://github.com/mudler/luet/issues)
We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/mudler/luet/issues/new); it's that easy!
## Write bug reports with detail, background, and sample code
Try to be as more descriptive as possible. When opening a new issue you will be prompted to choose between a bug or a feature request, with a small template to fill details with. Be specific!
**Great Bug Reports** tend to have:
- A quick summary and/or background
- Steps to reproduce
- Be specific!
- Give sample code if you can.
- What you expected would happen
- What actually happens
- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work)
People *love* thorough bug reports.
## License
By contributing, you agree that your contributions will be licensed under the project Licenses.
## References
This document was adapted from the open-source contribution guidelines from https://gist.github.com/briandk/3d2e8b3ec8daf5a27a62

View File

@@ -1,6 +1,7 @@
FROM golang as builder
RUN apt-get update && apt-get install -y upx
ADD . /luet
RUN cd /luet && make build
RUN cd /luet && make build-small
FROM scratch
ENV LUET_NOLOCK=true

View File

@@ -1,11 +1,11 @@
# go tool nm ./luet | grep Commit
override LDFLAGS += -X "github.com/mudler/luet/cmd.BuildTime=$(shell date -u '+%Y-%m-%d %I:%M:%S %Z')"
override LDFLAGS += -X "github.com/mudler/luet/cmd.BuildCommit=$(shell git rev-parse HEAD)"
NAME ?= luet
PACKAGE_NAME ?= $(NAME)
PACKAGE_CONFLICT ?= $(PACKAGE_NAME)-beta
REVISION := $(shell git rev-parse --short HEAD || echo dev)
VERSION := $(shell git describe --tags || echo $(REVISION))
VERSION := $(shell echo $(VERSION) | sed -e 's/^v//g')
ITTERATION := $(shell date +%s)
BUILD_PLATFORMS ?= -osarch="linux/amd64" -osarch="linux/386" -osarch="linux/arm"
ROOT_DIR:=$(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
.PHONY: all
@@ -19,7 +19,7 @@ fmt:
test:
GO111MODULE=off go get github.com/onsi/ginkgo/ginkgo
GO111MODULE=off go get github.com/onsi/gomega/...
ginkgo -race -r ./...
ginkgo -race -r -flakeAttempts 3 ./...
.PHONY: test-integration
test-integration:
@@ -59,16 +59,17 @@ deps:
.PHONY: build
build:
CGO_ENABLED=0 go build
CGO_ENABLED=0 go build -ldflags '$(LDFLAGS)'
.PHONY: build-small
build-small:
@$(MAKE) LDFLAGS+="-s -w" build
upx --brute -1 $(NAME)
.PHONY: image
image:
docker build --rm -t luet/base .
.PHONY: gox-build
gox-build:
CGO_ENABLED=0 gox $(BUILD_PLATFORMS) -output="release/$(NAME)-$(VERSION)-{{.OS}}-{{.Arch}}"
.PHONY: lint
lint:
golint ./... | grep -v "be unexported"
@@ -83,6 +84,11 @@ test-docker:
--workdir /go/src/github.com/mudler/luet -ti golang:latest \
bash -c "make test"
.PHONY: multiarch-build
multiarch-build:
gox $(BUILD_PLATFORMS) -output="release/$(NAME)-$(VERSION)-{{.OS}}-{{.Arch}}"
goreleaser build --snapshot --rm-dist
multiarch-build-small:
@$(MAKE) multiarch-build
for file in $(ROOT_DIR)/release/**/* ; do \
upx --brute -1 $${file} ; \
done

View File

@@ -1,39 +1,65 @@
<p align="center">
<img width=150 height=150 src="https://user-images.githubusercontent.com/2420543/119691600-0293d700-be4b-11eb-827f-49ff1174a07a.png">
</p>
# luet - Container-based Package manager
[![Go Report Card](https://goreportcard.com/badge/github.com/mudler/luet)](https://goreportcard.com/report/github.com/mudler/luet)
[![Build Status](https://travis-ci.org/mudler/luet.svg?branch=master)](https://travis-ci.org/mudler/luet)
[![Docker Repository on Quay](https://quay.io/repository/luet/base/status "Docker Repository on Quay")](https://quay.io/repository/luet/base)
[![Build and release on push](https://github.com/mudler/luet/actions/workflows/release.yml/badge.svg)](https://github.com/mudler/luet/actions/workflows/release.yml)
[![GoDoc](https://godoc.org/github.com/mudler/luet?status.svg)](https://godoc.org/github.com/mudler/luet)
[![codecov](https://codecov.io/gh/mudler/luet/branch/master/graph/badge.svg)](https://codecov.io/gh/mudler/luet)
Luet is a multi-platform Package Manager based off from containers - it uses Docker (and other tech) to sandbox your builds and generate packages from them. It has zero dependencies and it is well suitable for "from scratch" environments. It can also version entire rootfs and enables delivery of OTA-alike updates, making it a perfect fit for the Edge computing era and IoT embedded devices.
Luet is a multi-platform Package Manager based off from containers - it uses Docker (and others) to build packages. It has zero dependencies and it is well suitable for "from scratch" environments. It can also version entire rootfs and enables delivery of OTA-alike updates, making it a perfect fit for the Edge computing era and IoT embedded devices.
It offers a simple [specfile format](https://luet-lab.github.io/docs/docs/concepts/specfile/) in YAML notation to define both packages and rootfs. As it is based on containers, it can be used to build seed stages for Linux From Scratch installations and it can build and track updates for those systems.
It offers a simple [specfile format](https://luet-lab.github.io/docs/docs/concepts/packages/specfile/) in YAML notation to define both [packages](https://luet-lab.github.io/docs/docs/concepts/packages/) and [rootfs](https://luet-lab.github.io/docs/docs/concepts/packages/#package-layers). As it is based on containers, it can be also used to build stages for Linux From Scratch installations and it can build and track updates for those systems.
It is written entirely in Golang and where used as package manager, it can run in from scratch environment, with zero dependencies.
[![asciicast](https://asciinema.org/a/388348.svg)](https://asciinema.org/a/388348)
## In a glance
- Luet can reuse Gentoo's portage tree hierarchy, and it is heavily inspired from it.
- It builds, installs, uninstalls and perform upgrades on machines
- It builds from containers, but installs, uninstalls and perform upgrades on machines
- Installer doesn't depend on anything ( 0 dep installer !), statically built
- Support for packages as "layers"
- It uses SAT solving techniques to solve the deptree ( Inspired by [OPIUM](https://ranjitjhala.github.io/static/opium.pdf) )
- You can install it aside also with your current distro package manager, and start building and distributing your packages
- [Support for packages as "layers"](https://luet-lab.github.io/docs/docs/concepts/packages/specfile/#building-strategies)
- [It uses SAT solving techniques to solve the deptree](https://luet-lab.github.io/docs/docs/concepts/overview/constraints/) ( Inspired by [OPIUM](https://ranjitjhala.github.io/static/opium.pdf) )
- Support for [collections](https://luet-lab.github.io/docs/docs/concepts/packages/collections/) and [templated package definitions](https://luet-lab.github.io/docs/docs/concepts/packages/templates/)
- [Can be extended with Plugins and Extensions](https://luet-lab.github.io/docs/docs/concepts/plugins-and-extensions/)
- [Can build packages in Kubernetes (experimental)](https://github.com/mudler/luet-k8s)
- Uses containerd/go-containerregistry to manipulate images - works also daemonless with the img backend
## Install
To install luet, you can grab a release on the [Release page](https://github.com/mudler/luet/releases) or compile it in your machine (requires Golang installed):
To install luet, you can grab a release on the [Release page](https://github.com/mudler/luet/releases) or to install it in your system:
$ git clone https://github.com/mudler/luet.git
$ cd luet
$ make build
```bash
$ curl https://get.mocaccino.org/luet/get_luet_root.sh | sudo sh
$ luet search ...
$ luet install ..
$ luet --help
```
## Status
## Build from source
Luet is not feature-complete yet, it can build, install/uninstall/upgrade packages - but it doesn't support yet all the features you would normally expect from a Package Manager nowadays.
```bash
$ git clone https://github.com/mudler/luet.git
$ cd luet
$ make build
```
## Documentation
[Documentation](https://luet-lab.github.io/docs) is available, or
run `luet --help`, any subcommand is documented as well, try e.g.: `luet build --help`.
# Dependency solving
Luet uses SAT and Reinforcement learning engine for dependency solving.
It encodes the package requirements into a SAT problem, using gophersat to solve the dependency tree and give a concrete model as result.
It encodes the package requirements into a SAT problem, using [gophersat](https://github.com/crillab/gophersat) to solve the dependency tree and give a concrete model as result.
## SAT encoding
@@ -47,10 +73,6 @@ when they arises while trying to validate your queries against the system model.
To leverage it, simply pass ```--solver-type qlearning``` to the subcommands that supports it ( you can check out by invoking ```--help``` ).
## Documentation
[Documentation](https://luet-lab.github.io/docs) is available, or
run `luet --help`, any subcommand is documented as well, try e.g.: `luet build --help`.
## Authors

36
cmd/box.go Normal file
View File

@@ -0,0 +1,36 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
. "github.com/mudler/luet/cmd/box"
"github.com/spf13/cobra"
)
var boxGroupCmd = &cobra.Command{
Use: "box [command] [OPTIONS]",
Short: "Manage luet boxes",
}
func init() {
RootCmd.AddCommand(boxGroupCmd)
boxGroupCmd.AddCommand(
NewBoxExecCommand(),
)
}

81
cmd/box/exec.go Normal file
View File

@@ -0,0 +1,81 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd_box
import (
"os"
b64 "encoding/base64"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/box"
"github.com/spf13/cobra"
)
func NewBoxExecCommand() *cobra.Command {
var ans = &cobra.Command{
Use: "exec [OPTIONS]",
Short: "Execute a binary in a box",
Args: cobra.OnlyValidArgs,
PreRun: func(cmd *cobra.Command, args []string) {
},
Run: func(cmd *cobra.Command, args []string) {
stdin, _ := cmd.Flags().GetBool("stdin")
stdout, _ := cmd.Flags().GetBool("stdout")
stderr, _ := cmd.Flags().GetBool("stderr")
rootfs, _ := cmd.Flags().GetString("rootfs")
base, _ := cmd.Flags().GetBool("decode")
entrypoint, _ := cmd.Flags().GetString("entrypoint")
envs, _ := cmd.Flags().GetStringArray("env")
mounts, _ := cmd.Flags().GetStringArray("mount")
if base {
var ss []string
for _, a := range args {
sDec, _ := b64.StdEncoding.DecodeString(a)
ss = append(ss, string(sDec))
}
//If the command to run is complex,using base64 to avoid bad input
args = ss
}
util.DefaultContext.Info("Executing", args, "in", rootfs)
b := box.NewBox(entrypoint, args, mounts, envs, rootfs, stdin, stdout, stderr)
err := b.Run()
if err != nil {
util.DefaultContext.Fatal(err)
}
},
}
path, err := os.Getwd()
if err != nil {
util.DefaultContext.Fatal(err)
}
ans.Flags().String("rootfs", path, "Rootfs path")
ans.Flags().Bool("stdin", false, "Attach to stdin")
ans.Flags().Bool("stdout", true, "Attach to stdout")
ans.Flags().Bool("stderr", true, "Attach to stderr")
ans.Flags().Bool("decode", false, "Base64 decode")
ans.Flags().StringArrayP("env", "e", []string{}, "Environment settings")
ans.Flags().StringArrayP("mount", "m", []string{}, "List of paths to bind-mount from the host")
ans.Flags().String("entrypoint", "/bin/sh", "Entrypoint command (/bin/sh)")
return ans
}

View File

@@ -15,15 +15,23 @@
package cmd
import (
"io/ioutil"
"fmt"
"os"
"path/filepath"
"github.com/ghodss/yaml"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types/artifact"
"github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/compiler/backend"
. "github.com/mudler/luet/pkg/config"
helpers "github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
"github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/compiler/types/options"
fileHelpers "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
tree "github.com/mudler/luet/pkg/tree"
"github.com/spf13/cobra"
@@ -33,123 +41,180 @@ import (
var buildCmd = &cobra.Command{
Use: "build <package name> <package name> <package name> ...",
Short: "build a package or a tree",
Long: `build packages or trees from luet tree definitions. Packages are in [category]/[name]-[version] form`,
PreRun: func(cmd *cobra.Command, args []string) {
viper.BindPFlag("clean", cmd.Flags().Lookup("clean"))
Long: `Builds one or more packages from a tree (current directory is implied):
$ luet build utils/busybox utils/yq ...
Builds all packages
$ luet build --all
Builds only the leaf packages:
$ luet build --full
Build package revdeps:
$ luet build --revdeps utils/yq
Build package without dependencies (needs the images already in the host, or either need to be available online):
$ luet build --nodeps utils/yq ...
Build packages specifying multiple definition trees:
$ luet build --tree overlay/path --tree overlay/path2 utils/yq ...
`, PreRun: func(cmd *cobra.Command, args []string) {
viper.BindPFlag("tree", cmd.Flags().Lookup("tree"))
viper.BindPFlag("destination", cmd.Flags().Lookup("destination"))
viper.BindPFlag("backend", cmd.Flags().Lookup("backend"))
viper.BindPFlag("privileged", cmd.Flags().Lookup("privileged"))
viper.BindPFlag("database", cmd.Flags().Lookup("database"))
viper.BindPFlag("revdeps", cmd.Flags().Lookup("revdeps"))
viper.BindPFlag("all", cmd.Flags().Lookup("all"))
viper.BindPFlag("compression", cmd.Flags().Lookup("compression"))
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
util.BindValuesFlags(cmd)
viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
viper.BindPFlag("image-repository", cmd.Flags().Lookup("image-repository"))
viper.BindPFlag("push", cmd.Flags().Lookup("push"))
viper.BindPFlag("pull", cmd.Flags().Lookup("pull"))
viper.BindPFlag("wait", cmd.Flags().Lookup("wait"))
viper.BindPFlag("keep-images", cmd.Flags().Lookup("keep-images"))
LuetCfg.Viper.BindPFlag("keep-exported-images", cmd.Flags().Lookup("keep-exported-images"))
util.BindSolverFlags(cmd)
viper.BindPFlag("general.show_build_output", cmd.Flags().Lookup("live-output"))
viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
},
Run: func(cmd *cobra.Command, args []string) {
clean := viper.GetBool("clean")
src := viper.GetString("tree")
treePaths := viper.GetStringSlice("tree")
dst := viper.GetString("destination")
concurrency := LuetCfg.GetGeneral().Concurrency
concurrency := util.DefaultContext.Config.GetGeneral().Concurrency
backendType := viper.GetString("backend")
privileged := viper.GetBool("privileged")
revdeps := viper.GetBool("revdeps")
all := viper.GetBool("all")
databaseType := viper.GetString("database")
compressionType := viper.GetString("compression")
imageRepository := viper.GetString("image-repository")
values := util.ValuesFlags()
wait := viper.GetBool("wait")
push := viper.GetBool("push")
pull := viper.GetBool("pull")
keepImages := viper.GetBool("keep-images")
nodeps := viper.GetBool("nodeps")
onlydeps := viper.GetBool("onlydeps")
keepExportedImages := viper.GetBool("keep-exported-images")
onlyTarget, _ := cmd.Flags().GetBool("only-target-package")
full, _ := cmd.Flags().GetBool("full")
rebuild, _ := cmd.Flags().GetBool("rebuild")
pushFinalImages, _ := cmd.Flags().GetBool("push-final-images")
pushFinalImagesRepository, _ := cmd.Flags().GetString("push-final-images-repository")
pushFinalImagesForce, _ := cmd.Flags().GetBool("push-final-images-force")
compilerSpecs := compiler.NewLuetCompilationspecs()
var compilerBackend compiler.CompilerBackend
var results Results
backendArgs := viper.GetStringSlice("backend-args")
out, _ := cmd.Flags().GetString("output")
if out != "terminal" {
util.DefaultContext.Config.GetLogging().SetLogLevel("error")
}
pretend, _ := cmd.Flags().GetBool("pretend")
fromRepo, _ := cmd.Flags().GetBool("from-repositories")
compilerSpecs := compilerspec.NewLuetCompilationspecs()
var db pkg.PackageDatabase
switch backendType {
case "img":
compilerBackend = backend.NewSimpleImgBackend()
case "docker":
compilerBackend = backend.NewSimpleDockerBackend()
}
switch databaseType {
case "memory":
db = pkg.NewInMemoryDatabase(false)
compilerBackend, err := compiler.NewBackend(util.DefaultContext, backendType)
helpers.CheckErr(err)
case "boltdb":
tmpdir, err := ioutil.TempDir("", "package")
if err != nil {
Fatal(err)
}
db = pkg.NewBoltDatabase(tmpdir)
}
db = pkg.NewInMemoryDatabase(false)
defer db.Clean()
generalRecipe := tree.NewCompilerRecipe(db)
Info("Loading", src)
Info("Building in", dst)
err := generalRecipe.Load(src)
if err != nil {
Fatal("Error: " + err.Error())
if fromRepo {
if err := installer.LoadBuildTree(generalRecipe, db, util.DefaultContext); err != nil {
util.DefaultContext.Warning("errors while loading trees from repositories", err.Error())
}
}
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
for _, src := range treePaths {
util.DefaultContext.Info("Loading tree", src)
helpers.CheckErr(generalRecipe.Load(src))
}
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
util.DefaultContext.Info("Building in", dst)
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
if !fileHelpers.Exists(dst) {
os.MkdirAll(dst, 0600)
util.DefaultContext.Debug("Creating destination folder", dst)
}
opts := compiler.NewDefaultCompilerOptions()
opts.SolverOptions = *LuetCfg.GetSolverOptions()
opts.ImageRepository = imageRepository
opts.Clean = clean
opts.PullFirst = pull
opts.KeepImg = keepImages
opts.Push = push
opts.OnlyDeps = onlydeps
opts.NoDeps = nodeps
opts.KeepImageExport = keepExportedImages
opts := util.SetSolverConfig(util.DefaultContext)
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(), opts)
luetCompiler.SetConcurrency(concurrency)
luetCompiler.SetCompressionType(compiler.CompressionImplementation(compressionType))
if !all {
util.DefaultContext.Config.GetGeneral().ShowBuildOutput = viper.GetBool("general.show_build_output")
util.DefaultContext.Debug("Solver", opts.CompactString())
opts.Options = solver.Options{Type: solver.SingleCoreSimple, Concurrency: concurrency}
compileropts := []options.Option{options.NoDeps(nodeps),
options.WithBackendType(backendType),
options.PushImages(push),
options.WithBuildValues(values),
options.WithPullRepositories(pullRepo),
options.WithPushRepository(imageRepository),
options.Rebuild(rebuild),
options.WithTemplateFolder(util.TemplateFolders(util.DefaultContext, fromRepo, treePaths)),
options.WithSolverOptions(*opts),
options.Wait(wait),
options.OnlyTarget(onlyTarget),
options.PullFirst(pull),
options.KeepImg(keepImages),
options.OnlyDeps(onlydeps),
options.WithContext(util.DefaultContext),
options.BackendArgs(backendArgs),
options.Concurrency(concurrency),
options.WithCompressionType(compression.Implementation(compressionType))}
if pushFinalImages {
compileropts = append(compileropts, options.EnablePushFinalImages)
if pushFinalImagesForce {
compileropts = append(compileropts, options.ForcePushFinalImages)
}
if pushFinalImagesRepository != "" {
compileropts = append(compileropts, options.WithFinalRepository(pushFinalImagesRepository))
} else if imageRepository != "" {
compileropts = append(compileropts, options.WithFinalRepository(imageRepository))
}
}
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(), compileropts...)
if full {
specs, err := luetCompiler.FromDatabase(generalRecipe.GetDatabase(), true, dst)
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
for _, spec := range specs {
util.DefaultContext.Info(":package: Selecting ", spec.GetPackage().GetName(), spec.GetPackage().GetVersion())
compilerSpecs.Add(spec)
}
} else if !all {
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
spec, err := luetCompiler.FromPackage(pack)
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
spec.SetOutputPath(dst)
@@ -161,31 +226,82 @@ var buildCmd = &cobra.Command{
for _, p := range w {
spec, err := luetCompiler.FromPackage(p)
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
Info(":package: Selecting ", p.GetName(), p.GetVersion())
util.DefaultContext.Info(":package: Selecting ", p.GetName(), p.GetVersion())
spec.SetOutputPath(dst)
compilerSpecs.Add(spec)
}
}
var artifact []compiler.Artifact
var artifact []*artifact.PackageArtifact
var errs []error
if revdeps {
artifact, errs = luetCompiler.CompileWithReverseDeps(privileged, compilerSpecs)
} else {
artifact, errs = luetCompiler.CompileParallel(privileged, compilerSpecs)
} else if pretend {
toCalculate := []*compilerspec.LuetCompilationSpec{}
if full {
var err error
toCalculate, err = luetCompiler.ComputeMinimumCompilableSet(compilerSpecs.All()...)
if err != nil {
errs = append(errs, err)
}
} else {
toCalculate = compilerSpecs.All()
}
for _, sp := range toCalculate {
ht := compiler.NewHashTree(generalRecipe.GetDatabase())
hashTree, err := ht.Query(luetCompiler, sp)
if err != nil {
errs = append(errs, err)
}
for _, p := range hashTree.Dependencies {
results.Packages = append(results.Packages,
PackageResult{
Name: p.Package.GetName(),
Version: p.Package.GetVersion(),
Category: p.Package.GetCategory(),
Repository: "",
Hidden: p.Package.IsHidden(),
Target: sp.GetPackage().HumanReadableString(),
})
}
}
y, err := yaml.Marshal(results)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
switch out {
case "yaml":
fmt.Println(string(y))
case "json":
j2, err := yaml.YAMLToJSON(y)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(j2))
case "terminal":
for _, p := range results.Packages {
util.DefaultContext.Info(p.String())
}
}
} else {
artifact, errs = luetCompiler.CompileParallel(privileged, compilerSpecs)
}
if len(errs) != 0 {
for _, e := range errs {
Error("Error: " + e.Error())
util.DefaultContext.Error("Error: " + e.Error())
}
Fatal("Bailing out")
util.DefaultContext.Fatal("Bailing out")
}
for _, a := range artifact {
Info("Artifact generated:", a.GetPath())
util.DefaultContext.Info("Artifact generated:", a.Path)
}
},
}
@@ -193,29 +309,45 @@ var buildCmd = &cobra.Command{
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
buildCmd.Flags().Bool("clean", true, "Build all packages without considering the packages present in the build directory")
buildCmd.Flags().String("tree", path, "Source luet tree")
buildCmd.Flags().StringSliceP("tree", "t", []string{path}, "Path of the tree to use.")
buildCmd.Flags().String("backend", "docker", "backend used (docker,img)")
buildCmd.Flags().Bool("privileged", false, "Privileged (Keep permissions)")
buildCmd.Flags().String("database", "memory", "database used for solving (memory,boltdb)")
buildCmd.Flags().Bool("privileged", true, "Privileged (Keep permissions)")
buildCmd.Flags().Bool("revdeps", false, "Build with revdeps")
buildCmd.Flags().Bool("all", false, "Build all packages in the tree")
buildCmd.Flags().String("destination", path, "Destination folder")
buildCmd.Flags().String("compression", "none", "Compression alg: none, gzip")
buildCmd.Flags().Bool("all", false, "Build all specfiles in the tree")
buildCmd.Flags().Bool("push-final-images", false, "Push final images while building")
buildCmd.Flags().Bool("push-final-images-force", false, "Override existing images")
buildCmd.Flags().String("push-final-images-repository", "", "Repository where to push final images to")
buildCmd.Flags().Bool("full", false, "Build all packages (optimized)")
buildCmd.Flags().StringSlice("values", []string{}, "Build values file to interpolate with each package")
buildCmd.Flags().StringSliceP("backend-args", "a", []string{}, "Backend args")
buildCmd.Flags().String("destination", filepath.Join(path, "build"), "Destination folder")
buildCmd.Flags().String("compression", "none", "Compression alg: none, gzip, zstd")
buildCmd.Flags().String("image-repository", "luet/cache", "Default base image string for generated image")
buildCmd.Flags().Bool("push", false, "Push images to a hub")
buildCmd.Flags().Bool("pull", false, "Pull images from a hub")
buildCmd.Flags().Bool("wait", false, "Don't build all intermediate images, but wait for them until they are available")
buildCmd.Flags().Bool("keep-images", true, "Keep built docker images in the host")
buildCmd.Flags().Bool("nodeps", false, "Build only the target packages, skipping deps (it works only if you already built the deps locally, or by using --pull) ")
buildCmd.Flags().Bool("onlydeps", false, "Build only package dependencies")
buildCmd.Flags().Bool("keep-exported-images", false, "Keep exported images used during building")
buildCmd.Flags().Bool("only-target-package", false, "Build packages of only the required target. Otherwise builds all the necessary ones not present in the destination")
buildCmd.Flags().String("solver-type", "", "Solver strategy")
buildCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
buildCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
buildCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
buildCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
buildCmd.Flags().Bool("live-output", util.DefaultContext.Config.GetGeneral().ShowBuildOutput, "Enable live output of the build phase.")
buildCmd.Flags().Bool("from-repositories", false, "Consume the user-defined repositories to pull specfiles from")
buildCmd.Flags().Bool("rebuild", false, "To combine with --pull. Allows to rebuild the target package even if an image is available, against a local values file")
buildCmd.Flags().Bool("pretend", false, "Just print what packages will be compiled")
buildCmd.Flags().StringArrayP("pull-repository", "p", []string{}, "A list of repositories to pull the cache from")
buildCmd.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
RootCmd.AddCommand(buildCmd)
}

View File

@@ -16,13 +16,13 @@
package cmd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
config "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
"github.com/spf13/cobra"
)
@@ -31,40 +31,41 @@ var cleanupCmd = &cobra.Command{
Use: "cleanup",
Short: "Clean packages cache.",
Long: `remove downloaded packages tarballs and clean cache directory`,
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
},
Run: func(cmd *cobra.Command, args []string) {
var cleaned int = 0
util.SetSystemConfig(util.DefaultContext)
// Check if cache dir exists
if helpers.Exists(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) {
if fileHelper.Exists(util.DefaultContext.Config.GetSystem().GetSystemPkgsCacheDirPath()) {
files, err := ioutil.ReadDir(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath())
files, err := ioutil.ReadDir(util.DefaultContext.Config.GetSystem().GetSystemPkgsCacheDirPath())
if err != nil {
Fatal("Error on read cachedir ", err.Error())
util.DefaultContext.Fatal("Error on read cachedir ", err.Error())
}
for _, file := range files {
if file.IsDir() {
continue
}
if config.LuetCfg.GetGeneral().Debug {
Info("Removing ", file.Name())
}
util.DefaultContext.Debug("Removing ", file.Name())
err := os.RemoveAll(
filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), file.Name()))
filepath.Join(util.DefaultContext.Config.GetSystem().GetSystemPkgsCacheDirPath(), file.Name()))
if err != nil {
Fatal("Error on removing", file.Name())
util.DefaultContext.Fatal("Error on removing", file.Name())
}
cleaned++
}
}
Info("Cleaned: ", cleaned, "packages.")
util.DefaultContext.Info(fmt.Sprintf("Cleaned: %d files from %s", cleaned, util.DefaultContext.Config.GetSystem().GetSystemPkgsCacheDirPath()))
},
}
func init() {
cleanupCmd.Flags().String("system-dbpath", "", "System db path")
cleanupCmd.Flags().String("system-target", "", "System rootpath")
cleanupCmd.Flags().String("system-engine", "", "System DB engine")
RootCmd.AddCommand(cleanupCmd)
}

View File

@@ -18,39 +18,23 @@ package cmd
import (
"fmt"
config "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
"github.com/spf13/cobra"
)
var configCmd = &cobra.Command{
Use: "config",
Short: "Print config",
Long: `Show luet configuration`,
Use: "config",
Short: "Print config",
Long: `Show luet configuration`,
Aliases: []string{"c"},
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(config.LuetCfg.GetLogging())
fmt.Println(config.LuetCfg.GetGeneral())
fmt.Println(config.LuetCfg.GetSystem())
if len(config.LuetCfg.CacheRepositories) > 0 {
fmt.Println("repetitors:")
for _, r := range config.LuetCfg.CacheRepositories {
fmt.Println(" - ", r.String())
}
}
if len(config.LuetCfg.SystemRepositories) > 0 {
fmt.Println("repositories:")
for _, r := range config.LuetCfg.SystemRepositories {
fmt.Println(" - ", r.String())
}
}
if len(config.LuetCfg.RepositoriesConfDir) > 0 {
fmt.Println("repos_confdir:")
for _, dir := range config.LuetCfg.RepositoriesConfDir {
fmt.Println(" - ", dir)
}
data, err := util.DefaultContext.Config.YAML()
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
fmt.Println(string(data))
},
}

View File

@@ -1,101 +0,0 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"io/ioutil"
. "github.com/mudler/luet/pkg/config"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
tree "github.com/mudler/luet/pkg/tree"
"github.com/mudler/luet/pkg/tree/builder/gentoo"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var convertCmd = &cobra.Command{
Use: "convert",
Short: "convert other package manager tree into luet",
Long: `Parses external PM and produces a luet parsable tree`,
PreRun: func(cmd *cobra.Command, args []string) {
viper.BindPFlag("type", cmd.Flags().Lookup("type"))
viper.BindPFlag("database", cmd.Flags().Lookup("database"))
},
Run: func(cmd *cobra.Command, args []string) {
t := viper.GetString("type")
databaseType := viper.GetString("database")
var db pkg.PackageDatabase
if len(args) != 2 {
Fatal("Incorrect number of arguments")
}
input := args[0]
output := args[1]
Info("Converting trees from " + input + " [" + t + "]")
var builder tree.Parser
switch t {
case "gentoo":
builder = gentoo.NewGentooBuilder(
&gentoo.SimpleEbuildParser{},
LuetCfg.GetGeneral().Concurrency,
gentoo.InMemory)
default: // dup
builder = gentoo.NewGentooBuilder(
&gentoo.SimpleEbuildParser{},
LuetCfg.GetGeneral().Concurrency,
gentoo.InMemory)
}
switch databaseType {
case "memory":
db = pkg.NewInMemoryDatabase(false)
case "boltdb":
tmpdir, err := ioutil.TempDir("", "package")
if err != nil {
Fatal(err)
}
db = pkg.NewBoltDatabase(tmpdir)
}
defer db.Clean()
packageTree, err := builder.Generate(input)
if err != nil {
Fatal("Error: " + err.Error())
}
defer packageTree.Clean()
Info("Tree generated")
generalRecipe := tree.NewGeneralRecipe(packageTree)
Info("Saving generated tree to " + output)
err = generalRecipe.Save(output)
if err != nil {
Fatal("Error: " + err.Error())
}
},
}
func init() {
convertCmd.Flags().String("type", "gentoo", "source type")
convertCmd.Flags().String("database", "memory", "database used for solving (memory,boltdb)")
RootCmd.AddCommand(convertCmd)
}

View File

@@ -16,11 +16,15 @@ package cmd
import (
"os"
"path/filepath"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/compiler"
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/pkg/compiler/types/compression"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/logger"
// . "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
"github.com/spf13/cobra"
@@ -30,100 +34,169 @@ import (
var createrepoCmd = &cobra.Command{
Use: "create-repo",
Short: "Create a luet repository from a build",
Long: `Generate and renew repository metadata`,
Long: `Builds tree metadata from a set of packages and a tree definition:
$ luet create-repo
Provide specific paths for packages, tree, and metadata output which is generated:
$ luet create-repo --packages my/packages/path --tree my/tree/path --output my/packages/path ...
Provide name and description of the repository:
$ luet create-repo --name "foo" --description "bar" ...
Change compression method:
$ luet create-repo --tree-compression gzip --meta-compression gzip
Create a repository from the metadata description defined in the luet.yaml config file:
$ luet create-repo --repo repository1
`,
PreRun: func(cmd *cobra.Command, args []string) {
viper.BindPFlag("packages", cmd.Flags().Lookup("packages"))
viper.BindPFlag("tree", cmd.Flags().Lookup("tree"))
viper.BindPFlag("output", cmd.Flags().Lookup("output"))
viper.BindPFlag("backend", cmd.Flags().Lookup("backend"))
viper.BindPFlag("name", cmd.Flags().Lookup("name"))
viper.BindPFlag("descr", cmd.Flags().Lookup("descr"))
viper.BindPFlag("urls", cmd.Flags().Lookup("urls"))
viper.BindPFlag("type", cmd.Flags().Lookup("type"))
viper.BindPFlag("tree-compression", cmd.Flags().Lookup("tree-compression"))
viper.BindPFlag("tree-path", cmd.Flags().Lookup("tree-path"))
viper.BindPFlag("tree-filename", cmd.Flags().Lookup("tree-filename"))
viper.BindPFlag("meta-compression", cmd.Flags().Lookup("meta-compression"))
viper.BindPFlag("meta-filename", cmd.Flags().Lookup("meta-filename"))
viper.BindPFlag("reset-revision", cmd.Flags().Lookup("reset-revision"))
viper.BindPFlag("repo", cmd.Flags().Lookup("repo"))
viper.BindPFlag("from-metadata", cmd.Flags().Lookup("from-metadata"))
viper.BindPFlag("force-push", cmd.Flags().Lookup("force-push"))
viper.BindPFlag("push-images", cmd.Flags().Lookup("push-images"))
},
Run: func(cmd *cobra.Command, args []string) {
var err error
var repo installer.Repository
var repo *installer.LuetSystemRepository
tree := viper.GetString("tree")
treePaths := viper.GetStringSlice("tree")
dst := viper.GetString("output")
packages := viper.GetString("packages")
name := viper.GetString("name")
descr := viper.GetString("descr")
urls := viper.GetStringSlice("urls")
t := viper.GetString("type")
reset := viper.GetBool("reset-revision")
treetype := viper.GetString("tree-compression")
treepath := viper.GetString("tree-path")
treeName := viper.GetString("tree-filename")
metatype := viper.GetString("meta-compression")
metaName := viper.GetString("meta-filename")
source_repo := viper.GetString("repo")
backendType := viper.GetString("backend")
fromRepo, _ := cmd.Flags().GetBool("from-repositories")
treeFile := installer.NewDefaultTreeRepositoryFile()
metaFile := installer.NewDefaultMetaRepositoryFile()
compilerBackend, err := compiler.NewBackend(util.DefaultContext, backendType)
helpers.CheckErr(err)
force := viper.GetBool("force-push")
imagePush := viper.GetBool("push-images")
opts := []installer.RepositoryOption{
installer.WithSource(viper.GetString("packages")),
installer.WithPushImages(imagePush),
installer.WithForce(force),
installer.FromRepository(fromRepo),
installer.WithImagePrefix(dst),
installer.WithDatabase(pkg.NewInMemoryDatabase(false)),
installer.WithCompilerBackend(compilerBackend),
installer.FromMetadata(viper.GetBool("from-metadata")),
installer.WithContext(util.DefaultContext),
}
if source_repo != "" {
// Search for system repository
lrepo, err := LuetCfg.GetSystemRepository(source_repo)
if err != nil {
Fatal("Error: " + err.Error())
}
lrepo, err := util.DefaultContext.Config.GetSystemRepository(source_repo)
helpers.CheckErr(err)
if tree == "" {
tree = lrepo.TreePath
if len(treePaths) <= 0 {
treePaths = []string{lrepo.TreePath}
}
if t == "" {
t = lrepo.Type
}
repo, err = installer.GenerateRepository(lrepo.Name,
lrepo.Description, t,
lrepo.Urls,
lrepo.Priority,
packages,
tree,
pkg.NewInMemoryDatabase(false))
opts = append(opts,
installer.WithName(lrepo.Name),
installer.WithDescription(lrepo.Description),
installer.WithType(t),
installer.WithUrls(lrepo.Urls...),
installer.WithPriority(lrepo.Priority),
installer.WithTree(treePaths...),
)
} else {
repo, err = installer.GenerateRepository(name, descr, t, urls, 1, packages,
tree, pkg.NewInMemoryDatabase(false))
opts = append(opts,
installer.WithName(name),
installer.WithDescription(descr),
installer.WithType(t),
installer.WithUrls(urls...),
installer.WithTree(treePaths...),
)
}
if err != nil {
Fatal("Error: " + err.Error())
}
repo, err = installer.GenerateRepository(opts...)
helpers.CheckErr(err)
if treetype != "" {
repo.SetTreeCompressionType(compiler.CompressionImplementation(treetype))
treeFile.SetCompressionType(compression.Implementation(treetype))
}
if treepath != "" {
repo.SetTreePath(treepath)
if treeName != "" {
treeFile.SetFileName(treeName)
}
err = repo.Write(dst, reset)
if err != nil {
Fatal("Error: " + err.Error())
if metatype != "" {
metaFile.SetCompressionType(compression.Implementation(metatype))
}
if metaName != "" {
metaFile.SetFileName(metaName)
}
repo.SetRepositoryFile(installer.REPOFILE_TREE_KEY, treeFile)
repo.SetRepositoryFile(installer.REPOFILE_META_KEY, metaFile)
err = repo.Write(util.DefaultContext, dst, reset, true)
helpers.CheckErr(err)
},
}
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
}
createrepoCmd.Flags().String("packages", path, "Packages folder (output from build)")
createrepoCmd.Flags().String("tree", path, "Source luet tree")
createrepoCmd.Flags().String("output", path, "Destination folder")
helpers.CheckErr(err)
createrepoCmd.Flags().String("packages", filepath.Join(path, "build"), "Packages folder (output from build)")
createrepoCmd.Flags().StringSliceP("tree", "t", []string{path}, "Path of the source trees to use.")
createrepoCmd.Flags().String("output", filepath.Join(path, "build"), "Destination for generated archives. With 'docker' repository type, it should be an image reference (e.g 'foo/bar')")
createrepoCmd.Flags().String("name", "luet", "Repository name")
createrepoCmd.Flags().String("descr", "luet", "Repository description")
createrepoCmd.Flags().StringSlice("urls", []string{}, "Repository URLs")
createrepoCmd.Flags().String("type", "disk", "Repository type (disk)")
createrepoCmd.Flags().String("type", "disk", "Repository type (disk, http, docker)")
createrepoCmd.Flags().Bool("reset-revision", false, "Reset repository revision.")
createrepoCmd.Flags().String("repo", "", "Use repository defined in configuration.")
createrepoCmd.Flags().String("backend", "docker", "backend used (docker,img)")
createrepoCmd.Flags().String("tree-compression", "none", "Compression alg: none, gzip")
createrepoCmd.Flags().String("tree-path", installer.TREE_TARBALL, "Repository tree filename")
createrepoCmd.Flags().Bool("force-push", false, "Force overwrite of docker images if already present online")
createrepoCmd.Flags().Bool("push-images", false, "Enable/Disable docker image push for docker repositories")
createrepoCmd.Flags().Bool("from-metadata", false, "Consider metadata files from the packages folder while indexing the new tree")
createrepoCmd.Flags().String("tree-compression", "gzip", "Compression alg: none, gzip, zstd")
createrepoCmd.Flags().String("tree-filename", installer.TREE_TARBALL, "Repository tree filename")
createrepoCmd.Flags().String("meta-compression", "none", "Compression alg: none, gzip, zstd")
createrepoCmd.Flags().String("meta-filename", installer.REPOSITORY_METAFILE+".tar", "Repository metadata filename")
createrepoCmd.Flags().Bool("from-repositories", false, "Consume the user-defined repositories to pull specfiles from")
RootCmd.AddCommand(createrepoCmd)
}

42
cmd/database.go Normal file
View File

@@ -0,0 +1,42 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
. "github.com/mudler/luet/cmd/database"
"github.com/spf13/cobra"
)
var databaseGroupCmd = &cobra.Command{
Use: "database [command] [OPTIONS]",
Short: "Manage system database (dangerous commands ahead!)",
Long: `Allows to manipulate Luet internal database of installed packages. Use with caution!
Removing packages by hand from the database can result in a broken system, and thus it's not reccomended.
`,
}
func init() {
RootCmd.AddCommand(databaseGroupCmd)
databaseGroupCmd.AddCommand(
NewDatabaseCreateCommand(),
NewDatabaseGetCommand(),
NewDatabaseRemoveCommand(),
)
}

89
cmd/database/create.go Normal file
View File

@@ -0,0 +1,89 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd_database
import (
"io/ioutil"
"github.com/mudler/luet/cmd/util"
artifact "github.com/mudler/luet/pkg/api/core/types/artifact"
pkg "github.com/mudler/luet/pkg/package"
"github.com/spf13/cobra"
)
func NewDatabaseCreateCommand() *cobra.Command {
var ans = &cobra.Command{
Use: "create <artifact_metadata1.yaml> <artifact_metadata1.yaml>",
Short: "Insert a package in the system DB",
Long: `Inserts a package in the system database:
$ luet database create foo.yaml
"luet database create" injects a package in the system database without actually installing it, use it with caution.
This commands takes multiple yaml input file representing package artifacts, that are usually generated while building packages.
The yaml must contain the package definition, and the file list at least.
For reference, inspect a "metadata.yaml" file generated while running "luet build"`,
Args: cobra.OnlyValidArgs,
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
},
Run: func(cmd *cobra.Command, args []string) {
util.SetSystemConfig(util.DefaultContext)
systemDB := util.DefaultContext.Config.GetSystemDB()
for _, a := range args {
dat, err := ioutil.ReadFile(a)
if err != nil {
util.DefaultContext.Fatal("Failed reading ", a, ": ", err.Error())
}
art, err := artifact.NewPackageArtifactFromYaml(dat)
if err != nil {
util.DefaultContext.Fatal("Failed reading yaml ", a, ": ", err.Error())
}
files := art.Files
// Check if the package is already present
if p, err := systemDB.FindPackage(art.CompileSpec.GetPackage()); err == nil && p.GetName() != "" {
util.DefaultContext.Fatal("Package", art.CompileSpec.GetPackage().HumanReadableString(),
" already present.")
}
if _, err := systemDB.CreatePackage(art.CompileSpec.GetPackage()); err != nil {
util.DefaultContext.Fatal("Failed to create ", a, ": ", err.Error())
}
if err := systemDB.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: art.CompileSpec.GetPackage().GetFingerPrint(), Files: files}); err != nil {
util.DefaultContext.Fatal("Failed setting package files for ", a, ": ", err.Error())
}
util.DefaultContext.Info(art.CompileSpec.GetPackage().HumanReadableString(), " created")
}
},
}
ans.Flags().String("system-dbpath", "", "System db path")
ans.Flags().String("system-target", "", "System rootpath")
ans.Flags().String("system-engine", "", "System DB engine")
return ans
}

85
cmd/database/get.go Normal file
View File

@@ -0,0 +1,85 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd_database
import (
"fmt"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"gopkg.in/yaml.v2"
"github.com/spf13/cobra"
)
func NewDatabaseGetCommand() *cobra.Command {
var c = &cobra.Command{
Use: "get <package>",
Short: "Get a package in the system DB as yaml",
Long: `Get a package in the system database in the YAML format:
$ luet database get system/foo
To return also files:
$ luet database get --files system/foo`,
Args: cobra.OnlyValidArgs,
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
},
Run: func(cmd *cobra.Command, args []string) {
showFiles, _ := cmd.Flags().GetBool("files")
util.SetSystemConfig(util.DefaultContext)
systemDB := util.DefaultContext.Config.GetSystemDB()
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
continue
}
ps, err := systemDB.FindPackages(pack)
if err != nil {
continue
}
for _, p := range ps {
y, err := p.Yaml()
if err != nil {
continue
}
fmt.Println(string(y))
if showFiles {
files, err := systemDB.GetPackageFiles(p)
if err != nil {
continue
}
b, err := yaml.Marshal(files)
if err != nil {
continue
}
fmt.Println("files:\n" + string(b))
}
}
}
},
}
c.Flags().Bool("files", false, "Show package files.")
c.Flags().String("system-dbpath", "", "System db path")
c.Flags().String("system-target", "", "System rootpath")
c.Flags().String("system-engine", "", "System DB engine")
return c
}

66
cmd/database/remove.go Normal file
View File

@@ -0,0 +1,66 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd_database
import (
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"github.com/spf13/cobra"
)
func NewDatabaseRemoveCommand() *cobra.Command {
var ans = &cobra.Command{
Use: "remove [package1] [package2] ...",
Short: "Remove a package from the system DB (forcefully - you normally don't want to do that)",
Long: `Removes a package in the system database without actually uninstalling it:
$ luet database remove foo/bar
This commands takes multiple packages as arguments and prunes their entries from the system database.
`,
Args: cobra.OnlyValidArgs,
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
},
Run: func(cmd *cobra.Command, args []string) {
util.SetSystemConfig(util.DefaultContext)
systemDB := util.DefaultContext.Config.GetSystemDB()
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
if err := systemDB.RemovePackage(pack); err != nil {
util.DefaultContext.Fatal("Failed removing ", a, ": ", err.Error())
}
if err := systemDB.RemovePackageFiles(pack); err != nil {
util.DefaultContext.Fatal("Failed removing files for ", a, ": ", err.Error())
}
}
},
}
ans.Flags().String("system-dbpath", "", "System db path")
ans.Flags().String("system-target", "", "System rootpath")
ans.Flags().String("system-engine", "", "System DB engine")
return ans
}

86
cmd/exec.go Normal file
View File

@@ -0,0 +1,86 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"os"
b64 "encoding/base64"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/box"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var execCmd = &cobra.Command{
Use: "exec --rootfs /path [command]",
Short: "Execute a command in the rootfs context",
Long: `Uses unshare technique and pivot root to execute a command inside a folder containing a valid rootfs`,
PreRun: func(cmd *cobra.Command, args []string) {
},
// If you change this, look at pkg/box/exec that runs this command and adapt
Run: func(cmd *cobra.Command, args []string) {
stdin, _ := cmd.Flags().GetBool("stdin")
stdout, _ := cmd.Flags().GetBool("stdout")
stderr, _ := cmd.Flags().GetBool("stderr")
rootfs, _ := cmd.Flags().GetString("rootfs")
base, _ := cmd.Flags().GetBool("decode")
entrypoint, _ := cmd.Flags().GetString("entrypoint")
envs, _ := cmd.Flags().GetStringArray("env")
mounts, _ := cmd.Flags().GetStringArray("mount")
if base {
var ss []string
for _, a := range args {
sDec, _ := b64.StdEncoding.DecodeString(a)
ss = append(ss, string(sDec))
}
//If the command to run is complex,using base64 to avoid bad input
args = ss
}
util.DefaultContext.Info("Executing", args, "in", rootfs)
b := box.NewBox(entrypoint, args, mounts, envs, rootfs, stdin, stdout, stderr)
err := b.Exec()
if err != nil {
util.DefaultContext.Fatal(errors.Wrap(err, fmt.Sprintf("entrypoint: %s rootfs: %s", entrypoint, rootfs)))
}
},
}
func init() {
path, err := os.Getwd()
if err != nil {
util.DefaultContext.Fatal(err)
}
execCmd.Hidden = true
execCmd.Flags().String("rootfs", path, "Rootfs path")
execCmd.Flags().Bool("stdin", false, "Attach to stdin")
execCmd.Flags().Bool("stdout", false, "Attach to stdout")
execCmd.Flags().Bool("stderr", false, "Attach to stderr")
execCmd.Flags().Bool("decode", false, "Base64 decode")
execCmd.Flags().StringArrayP("env", "e", []string{}, "Environment settings")
execCmd.Flags().StringArrayP("mount", "m", []string{}, "List of paths to bind-mount from the host")
execCmd.Flags().String("entrypoint", "/bin/sh", "Entrypoint command (/bin/sh)")
RootCmd.AddCommand(execCmd)
}

130
cmd/helpers/cli.go Normal file
View File

@@ -0,0 +1,130 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd_helpers
import (
"errors"
"fmt"
"regexp"
"strings"
_gentoo "github.com/Sabayon/pkgs-checker/pkg/gentoo"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
)
func CreateRegexArray(rgx []string) ([]*regexp.Regexp, error) {
ans := make([]*regexp.Regexp, len(rgx))
if len(rgx) > 0 {
for idx, reg := range rgx {
re := regexp.MustCompile(reg)
if re == nil {
return nil, errors.New("Invalid regex " + reg + "!")
}
ans[idx] = re
}
}
return ans, nil
}
func packageData(p string) (string, string) {
cat := ""
name := ""
if strings.Contains(p, "/") {
packagedata := strings.Split(p, "/")
cat = packagedata[0]
name = packagedata[1]
} else {
name = p
}
return cat, name
}
func packageHasGentooSelector(v string) bool {
return (strings.HasPrefix(v, "=") || strings.HasPrefix(v, ">") ||
strings.HasPrefix(v, "<"))
}
func gentooVersion(gp *_gentoo.GentooPackage) string {
condition := gp.Condition.String()
if condition == "=" {
condition = ""
}
pkgVersion := fmt.Sprintf("%s%s%s",
condition,
gp.Version,
gp.VersionSuffix,
)
if gp.VersionBuild != "" {
pkgVersion = fmt.Sprintf("%s%s%s+%s",
condition,
gp.Version,
gp.VersionSuffix,
gp.VersionBuild,
)
}
return pkgVersion
}
func ParsePackageStr(p string) (*pkg.DefaultPackage, error) {
if packageHasGentooSelector(p) {
gp, err := _gentoo.ParsePackageStr(p)
if err != nil {
return nil, err
}
if gp.Version == "" {
gp.Version = "0"
gp.Condition = _gentoo.PkgCondGreaterEqual
}
return &pkg.DefaultPackage{
Name: gp.Name,
Category: gp.Category,
Version: gentooVersion(gp),
Uri: make([]string, 0),
}, nil
}
ver := ">=0"
cat := ""
name := ""
if strings.Contains(p, "@") {
packageinfo := strings.Split(p, "@")
ver = packageinfo[1]
cat, name = packageData(packageinfo[0])
} else {
cat, name = packageData(p)
}
return &pkg.DefaultPackage{
Name: name,
Category: cat,
Version: ver,
Uri: make([]string, 0),
}, nil
}
func CheckErr(err error) {
if err != nil {
util.DefaultContext.Fatal(err)
}
}

View File

@@ -0,0 +1,28 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd_helpers_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestSolver(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "CLI helpers test Suite")
}

102
cmd/helpers/cli_test.go Normal file
View File

@@ -0,0 +1,102 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd_helpers_test
import (
. "github.com/mudler/luet/cmd/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("CLI Helpers", func() {
Context("Can parse package strings correctly", func() {
It("accept single package names", func() {
pack, err := ParsePackageStr("foo")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal(""))
Expect(pack.GetVersion()).To(Equal(">=0"))
})
It("accept unversioned packages with category", func() {
pack, err := ParsePackageStr("cat/foo")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("cat"))
Expect(pack.GetVersion()).To(Equal(">=0"))
})
It("accept versioned packages with category", func() {
pack, err := ParsePackageStr("cat/foo@1.1")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("cat"))
Expect(pack.GetVersion()).To(Equal("1.1"))
})
It("accept versioned ranges with category", func() {
pack, err := ParsePackageStr("cat/foo@>=1.1")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("cat"))
Expect(pack.GetVersion()).To(Equal(">=1.1"))
})
It("accept gentoo regex parsing without versions", func() {
pack, err := ParsePackageStr("=cat/foo")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("cat"))
Expect(pack.GetVersion()).To(Equal(">=0"))
})
It("accept gentoo regex parsing with versions", func() {
pack, err := ParsePackageStr("=cat/foo-1.2")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("cat"))
Expect(pack.GetVersion()).To(Equal("1.2"))
})
It("accept gentoo regex parsing with with condition", func() {
pack, err := ParsePackageStr(">=cat/foo-1.2")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("cat"))
Expect(pack.GetVersion()).To(Equal(">=1.2"))
})
It("accept gentoo regex parsing with with condition2", func() {
pack, err := ParsePackageStr("<cat/foo-1.2")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("cat"))
Expect(pack.GetVersion()).To(Equal("<1.2"))
})
It("accept gentoo regex parsing with with condition3", func() {
pack, err := ParsePackageStr(">cat/foo-1.2")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("cat"))
Expect(pack.GetVersion()).To(Equal(">1.2"))
})
It("accept gentoo regex parsing with with condition4", func() {
pack, err := ParsePackageStr("<=cat/foo-1.2")
Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("cat"))
Expect(pack.GetVersion()).To(Equal("<=1.2"))
})
})
})

View File

@@ -15,112 +15,122 @@
package cmd
import (
"os"
"path/filepath"
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/solver"
. "github.com/mudler/luet/pkg/config"
helpers "github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var installCmd = &cobra.Command{
Use: "install <pkg1> <pkg2> ...",
Short: "Install a package",
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
LuetCfg.Viper.BindPFlag("download-only", cmd.Flags().Lookup("download-only"))
Long: `Installs one or more packages without asking questions:
$ luet install -y utils/busybox utils/yq ...
To install only deps of a package:
$ luet install --onlydeps utils/busybox ...
To not install deps of a package:
$ luet install --nodeps utils/busybox ...
To force install a package:
$ luet install --force utils/busybox ...
`,
Aliases: []string{"i"},
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Long: `Install packages in parallel`,
Run: func(cmd *cobra.Command, args []string) {
var toInstall []pkg.Package
var systemDB pkg.PackageDatabase
var toInstall pkg.Packages
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toInstall = append(toInstall, pack)
}
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
repos := installer.Repositories{}
for _, repo := range LuetCfg.SystemRepositories {
if !repo.Enable {
continue
}
r := installer.NewSystemRepository(repo)
repos = append(repos, r)
force := viper.GetBool("force")
nodeps := viper.GetBool("nodeps")
onlydeps := viper.GetBool("onlydeps")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
finalizerEnvs, _ := cmd.Flags().GetStringArray("finalizer-env")
relax, _ := cmd.Flags().GetBool("relax")
util.SetSystemConfig(util.DefaultContext)
util.SetSolverConfig(util.DefaultContext)
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
// Load finalizer runtime environments
err := util.SetCliFinalizerEnvs(util.DefaultContext, finalizerEnvs)
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
force := LuetCfg.Viper.GetBool("force")
nodeps := LuetCfg.Viper.GetBool("nodeps")
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
downloadOnly := LuetCfg.Viper.GetBool("download-only")
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: nodeps,
Force: force,
OnlyDeps: onlydeps,
PreserveSystemEssentialData: true,
DownloadOnly: downloadOnly,
Ask: !yes,
Relaxed: relax,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
})
inst.Repositories(repos)
if LuetCfg.GetSystem().DatabaseEngine == "boltdb" {
systemDB = pkg.NewBoltDatabase(
filepath.Join(LuetCfg.GetSystem().GetSystemRepoDatabaseDirPath(), "luet.db"))
} else {
systemDB = pkg.NewInMemoryDatabase(true)
}
system := &installer.System{Database: systemDB, Target: LuetCfg.GetSystem().Rootfs}
err := inst.Install(toInstall, system, downloadOnly)
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
err = inst.Install(toInstall, system)
if err != nil {
Fatal("Error: " + err.Error())
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
}
installCmd.Flags().String("system-dbpath", path, "System db path")
installCmd.Flags().String("system-target", path, "System rootpath")
installCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
installCmd.Flags().String("system-dbpath", "", "System db path")
installCmd.Flags().String("system-target", "", "System rootpath")
installCmd.Flags().String("system-engine", "", "System DB engine")
installCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
installCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
installCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
installCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
installCmd.Flags().Bool("nodeps", false, "Don't consider package dependencies (harmful!)")
installCmd.Flags().Bool("relax", false, "Relax installation constraints")
installCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
installCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
installCmd.Flags().Bool("download-only", false, "Download dependencies only")
installCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
installCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
installCmd.Flags().Bool("download-only", false, "Download only")
installCmd.Flags().StringArray("finalizer-env", []string{},
"Set finalizer environment in the format key=value.")
RootCmd.AddCommand(installCmd)
}

140
cmd/oscheck.go Normal file
View File

@@ -0,0 +1,140 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"os"
"strings"
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/mudler/luet/cmd/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var osCheckCmd = &cobra.Command{
Use: "oscheck",
Short: "Checks packages integrity",
Long: `List packages that are installed in the system which files are missing in the system.
$ luet oscheck
To reinstall packages in the list:
$ luet oscheck --reinstall
`,
Aliases: []string{"i"},
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Run: func(cmd *cobra.Command, args []string) {
force := viper.GetBool("force")
onlydeps := viper.GetBool("onlydeps")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
util.SetSystemConfig(util.DefaultContext)
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
packs := system.OSCheck()
if !util.DefaultContext.Config.General.Quiet {
if len(packs) == 0 {
util.DefaultContext.Success("All good!")
os.Exit(0)
} else {
util.DefaultContext.Info("Following packages are missing files or are incomplete:")
for _, p := range packs {
util.DefaultContext.Info(p.HumanReadableString())
}
}
} else {
var s []string
for _, p := range packs {
s = append(s, p.HumanReadableString())
}
fmt.Println(strings.Join(s, " "))
}
reinstall, _ := cmd.Flags().GetBool("reinstall")
if reinstall {
// Strip version for reinstall
toInstall := pkg.Packages{}
for _, p := range packs {
new := p.Clone()
new.SetVersion(">=0")
toInstall = append(toInstall, new)
}
util.SetSolverConfig(util.DefaultContext)
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: true,
Force: force,
OnlyDeps: onlydeps,
PreserveSystemEssentialData: true,
Ask: !yes,
DownloadOnly: downloadOnly,
Context: util.DefaultContext,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
})
err := inst.Swap(packs, toInstall, system)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
}
},
}
func init() {
osCheckCmd.Flags().String("system-dbpath", "", "System db path")
osCheckCmd.Flags().String("system-target", "", "System rootpath")
osCheckCmd.Flags().String("system-engine", "", "System DB engine")
osCheckCmd.Flags().Bool("reinstall", false, "reinstall")
osCheckCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
osCheckCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
osCheckCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
osCheckCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
osCheckCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
osCheckCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
osCheckCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
osCheckCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
osCheckCmd.Flags().Bool("download-only", false, "Download only")
RootCmd.AddCommand(osCheckCmd)
}

99
cmd/pack.go Normal file
View File

@@ -0,0 +1,99 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"os"
"path/filepath"
"time"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types/artifact"
"github.com/mudler/luet/pkg/compiler/types/compression"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var packCmd = &cobra.Command{
Use: "pack <package name>",
Short: "pack a custom package",
Long: `Pack creates a package from a directory, generating the metadata required from a tree to generate a repository.
Pack can be used to manually replace what "luet build" does automatically by reading the packages build.yaml files.
$ mkdir -p output/etc/foo
$ echo "my config" > output/etc/foo
$ luet pack foo/bar@1.1 --source output
Afterwards, you can use the content generated and associate it with a tree and a corresponding definition.yaml file with "luet create-repo".
`,
PreRun: func(cmd *cobra.Command, args []string) {
viper.BindPFlag("destination", cmd.Flags().Lookup("destination"))
viper.BindPFlag("compression", cmd.Flags().Lookup("compression"))
viper.BindPFlag("source", cmd.Flags().Lookup("source"))
},
Run: func(cmd *cobra.Command, args []string) {
sourcePath := viper.GetString("source")
dst := viper.GetString("destination")
compressionType := viper.GetString("compression")
concurrency := util.DefaultContext.Config.GetGeneral().Concurrency
if len(args) != 1 {
util.DefaultContext.Fatal("You must specify a package name")
}
packageName := args[0]
p, err := helpers.ParsePackageStr(packageName)
if err != nil {
util.DefaultContext.Fatal("Invalid package string ", packageName, ": ", err.Error())
}
spec := &compilerspec.LuetCompilationSpec{Package: p}
a := artifact.NewPackageArtifact(filepath.Join(dst, p.GetFingerPrint()+".package.tar"))
a.CompressionType = compression.Implementation(compressionType)
err = a.Compress(sourcePath, concurrency)
if err != nil {
util.DefaultContext.Fatal("failed compressing ", packageName, ": ", err.Error())
}
a.CompileSpec = spec
filelist, err := a.FileList()
if err != nil {
util.DefaultContext.Fatal("failed generating file list for ", packageName, ": ", err.Error())
}
a.Files = filelist
a.CompileSpec.GetPackage().SetBuildTimestamp(time.Now().String())
err = a.WriteYAML(dst)
if err != nil {
util.DefaultContext.Fatal("failed writing metadata yaml file for ", packageName, ": ", err.Error())
}
},
}
func init() {
path, err := os.Getwd()
if err != nil {
util.DefaultContext.Fatal(err)
}
packCmd.Flags().String("source", path, "Source folder")
packCmd.Flags().String("destination", path, "Destination folder")
packCmd.Flags().String("compression", "gzip", "Compression alg: none, gzip")
RootCmd.AddCommand(packCmd)
}

70
cmd/reclaim.go Normal file
View File

@@ -0,0 +1,70 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"github.com/mudler/luet/cmd/util"
installer "github.com/mudler/luet/pkg/installer"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var reclaimCmd = &cobra.Command{
Use: "reclaim",
Short: "Reclaim packages to Luet database from available repositories",
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
},
Long: `Reclaim tries to find association between packages in the online repositories and the system one.
$ luet reclaim
It scans the target file system, and if finds a match with a package available in the repositories, it marks as installed in the system database.
`,
Run: func(cmd *cobra.Command, args []string) {
util.SetSystemConfig(util.DefaultContext)
force := viper.GetBool("force")
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
Force: force,
PreserveSystemEssentialData: true,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
})
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
err := inst.Reclaim(system)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
func init() {
reclaimCmd.Flags().String("system-dbpath", "", "System db path")
reclaimCmd.Flags().String("system-target", "", "System rootpath")
reclaimCmd.Flags().String("system-engine", "", "System DB engine")
reclaimCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
RootCmd.AddCommand(reclaimCmd)
}

126
cmd/reinstall.go Normal file
View File

@@ -0,0 +1,126 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/solver"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var reinstallCmd = &cobra.Command{
Use: "reinstall <pkg1> <pkg2> <pkg3>",
Short: "reinstall a set of packages",
Long: `Reinstall a group of packages in the system:
$ luet reinstall -y system/busybox shells/bash system/coreutils ...
`,
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("for", cmd.Flags().Lookup("for"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Run: func(cmd *cobra.Command, args []string) {
var toUninstall pkg.Packages
var toAdd pkg.Packages
force := viper.GetBool("force")
onlydeps := viper.GetBool("onlydeps")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
installed, _ := cmd.Flags().GetBool("installed")
util.SetSystemConfig(util.DefaultContext)
util.SetSolverConfig(util.DefaultContext)
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: true,
Force: force,
OnlyDeps: onlydeps,
PreserveSystemEssentialData: true,
Ask: !yes,
DownloadOnly: downloadOnly,
Context: util.DefaultContext,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
})
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
if installed {
for _, p := range system.Database.World() {
toUninstall = append(toUninstall, p)
c := p.Clone()
c.SetVersion(">=0")
toAdd = append(toAdd, c)
}
} else {
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toUninstall = append(toUninstall, pack)
toAdd = append(toAdd, pack)
}
}
err := inst.Swap(toUninstall, toAdd, system)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
func init() {
reinstallCmd.Flags().String("system-dbpath", "", "System db path")
reinstallCmd.Flags().String("system-target", "", "System rootpath")
reinstallCmd.Flags().String("system-engine", "", "System DB engine")
reinstallCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
reinstallCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
reinstallCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
reinstallCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
reinstallCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
reinstallCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
reinstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
reinstallCmd.Flags().Bool("installed", false, "Reinstall installed packages")
reinstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
reinstallCmd.Flags().Bool("download-only", false, "Download only")
RootCmd.AddCommand(reinstallCmd)
}

125
cmd/replace.go Normal file
View File

@@ -0,0 +1,125 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/solver"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var replaceCmd = &cobra.Command{
Use: "replace <pkg1> <pkg2> --for <pkg3> --for <pkg4> ...",
Short: "replace a set of packages",
Aliases: []string{"r"},
Long: `Replaces one or a group of packages without asking questions:
$ luet replace -y system/busybox ... --for shells/bash --for system/coreutils ...
`,
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("for", cmd.Flags().Lookup("for"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Run: func(cmd *cobra.Command, args []string) {
var toUninstall pkg.Packages
var toAdd pkg.Packages
f := viper.GetStringSlice("for")
force := viper.GetBool("force")
nodeps := viper.GetBool("nodeps")
onlydeps := viper.GetBool("onlydeps")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
util.SetSystemConfig(util.DefaultContext)
util.SetSolverConfig(util.DefaultContext)
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toUninstall = append(toUninstall, pack)
}
for _, a := range f {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toAdd = append(toAdd, pack)
}
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: nodeps,
Force: force,
OnlyDeps: onlydeps,
PreserveSystemEssentialData: true,
Ask: !yes,
DownloadOnly: downloadOnly,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
})
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
err := inst.Swap(toUninstall, toAdd, system)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
func init() {
replaceCmd.Flags().String("system-dbpath", "", "System db path")
replaceCmd.Flags().String("system-target", "", "System rootpath")
replaceCmd.Flags().String("system-engine", "", "System DB engine")
replaceCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
replaceCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
replaceCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
replaceCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
replaceCmd.Flags().Bool("nodeps", false, "Don't consider package dependencies (harmful!)")
replaceCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
replaceCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
replaceCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
replaceCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
replaceCmd.Flags().StringSlice("for", []string{}, "Packages that has to be installed in place of others")
replaceCmd.Flags().Bool("download-only", false, "Download only")
RootCmd.AddCommand(replaceCmd)
}

View File

@@ -22,10 +22,10 @@ import (
"strconv"
"time"
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
installer "github.com/mudler/luet/pkg/installer"
"github.com/pterm/pterm"
. "github.com/logrusorgru/aurora"
"github.com/spf13/cobra"
)
@@ -43,7 +43,7 @@ func NewRepoListCommand() *cobra.Command {
quiet, _ := cmd.Flags().GetBool("quiet")
repoType, _ := cmd.Flags().GetString("type")
for _, repo := range LuetCfg.SystemRepositories {
for _, repo := range util.DefaultContext.Config.SystemRepositories {
if enable && !repo.Enable {
continue
}
@@ -58,26 +58,26 @@ func NewRepoListCommand() *cobra.Command {
fmt.Println(repo.Name)
} else {
if repo.Enable {
repoColor = Bold(BrightGreen(repo.Name)).String()
repoColor = pterm.LightGreen(repo.Name)
} else {
repoColor = Bold(BrightRed(repo.Name)).String()
repoColor = pterm.LightRed(repo.Name)
}
if repo.Description != "" {
repoText = Yellow(repo.Description).String()
repoText = pterm.LightYellow(repo.Description)
} else {
repoText = Yellow(repo.Urls[0]).String()
repoText = pterm.LightYellow(repo.Urls[0])
}
repobasedir := LuetCfg.GetSystem().GetRepoDatabaseDirPath(repo.Name)
repobasedir := util.DefaultContext.Config.GetSystem().GetRepoDatabaseDirPath(repo.Name)
if repo.Cached {
r := installer.NewSystemRepository(repo)
localRepo, _ := r.(*installer.LuetSystemRepository).ReadSpecFile(filepath.Join(repobasedir,
installer.REPOSITORY_SPECFILE), false)
localRepo, _ := r.ReadSpecFile(filepath.Join(repobasedir,
installer.REPOSITORY_SPECFILE))
if localRepo != nil {
tsec, _ := strconv.ParseInt(localRepo.GetLastUpdate(), 10, 64)
repoRevision = Bold(Red(localRepo.GetRevision())).String() +
" - " + Bold(Green(time.Unix(tsec, 0).String())).String()
repoRevision = pterm.LightRed(localRepo.GetRevision()) +
" - " + pterm.LightGreen(time.Unix(tsec, 0).String())
}
}

View File

@@ -17,9 +17,8 @@
package cmd_repo
import (
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/logger"
"github.com/spf13/cobra"
)
@@ -35,6 +34,7 @@ $> luet repo update
# Update only repo1 and repo2
$> luet repo update repo1 repo2
`,
Aliases: []string{"up"},
PreRun: func(cmd *cobra.Command, args []string) {
},
Run: func(cmd *cobra.Command, args []string) {
@@ -44,32 +44,28 @@ $> luet repo update repo1 repo2
if len(args) > 0 {
for _, rname := range args {
repo, err := LuetCfg.GetSystemRepository(rname)
repo, err := util.DefaultContext.Config.GetSystemRepository(rname)
if err != nil && !ignore {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
} else if err != nil {
continue
}
r := installer.NewSystemRepository(*repo)
Spinner(32)
_, err = r.Sync(force)
_, err = r.Sync(util.DefaultContext, force)
if err != nil && !ignore {
Fatal("Error on sync repository " + rname + ": " + err.Error())
util.DefaultContext.Fatal("Error on sync repository " + rname + ": " + err.Error())
}
SpinnerStop()
}
} else {
for _, repo := range LuetCfg.SystemRepositories {
if repo.Cached {
for _, repo := range util.DefaultContext.Config.SystemRepositories {
if repo.Cached && repo.Enable {
r := installer.NewSystemRepository(repo)
Spinner(32)
_, err := r.Sync(force)
_, err := r.Sync(util.DefaultContext, force)
if err != nil && !ignore {
Fatal("Error on sync repository " + r.GetName() + ": " + err.Error())
util.DefaultContext.Fatal("Error on sync repository " + r.GetName() + ": " + err.Error())
}
SpinnerStop()
}
}
}

View File

@@ -18,16 +18,9 @@ package cmd
import (
"fmt"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
"github.com/marcsauter/single"
config "github.com/mudler/luet/pkg/config"
helpers "github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
repo "github.com/mudler/luet/pkg/repository"
"github.com/mudler/luet/cmd/util"
bus "github.com/mudler/luet/pkg/api/core/bus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@@ -37,135 +30,104 @@ var cfgFile string
var Verbose bool
const (
LuetCLIVersion = "0.7.1"
LuetCLIVersion = "0.21.0"
LuetEnvPrefix = "LUET"
)
var license = []string{
"Luet Copyright (C) 2019-2021 Ettore Di Giacinto",
"This program comes with ABSOLUTELY NO WARRANTY.",
"This is free software, and you are welcome to redistribute it under certain conditions.",
}
// Build time and commit information.
//
// ⚠️ WARNING: should only be set by "-ldflags".
var (
BuildTime string
BuildCommit string
)
func version() string {
return fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime)
}
// RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{
Use: "luet",
Short: "Package manager for the XXth century!",
Long: `Package manager which uses containers to build packages`,
Version: LuetCLIVersion,
Use: "luet",
Short: "Container based package manager",
Long: `Luet is a single-binary package manager based on containers to build packages.
To install a package:
$ luet install package
To search for a package in the repositories:
$ luet search package
To list all packages installed in the system:
$ luet search --installed .
To show hidden packages:
$ luet search --hidden package
To build a package, from a tree definition:
$ luet build --tree tree/path package
`,
Version: version(),
PersistentPreRun: func(cmd *cobra.Command, args []string) {
err := LoadConfig(config.LuetCfg)
err := util.InitContext(util.DefaultContext)
if err != nil {
Fatal("failed to load configuration:", err.Error())
util.DefaultContext.Error("failed to load configuration:", err.Error())
}
util.DisplayVersionBanner(util.DefaultContext, util.IntroScreen, version, license)
// Initialize tmpdir prefix. TODO: Move this with LoadConfig
// directly on sub command to ensure the creation only when it's
// needed.
err = util.DefaultContext.Config.GetSystem().InitTmpDir()
if err != nil {
util.DefaultContext.Fatal("failed on init tmp basedir:", err.Error())
}
viper.BindPFlag("plugin", cmd.Flags().Lookup("plugin"))
plugin := viper.GetStringSlice("plugin")
bus.Manager.Initialize(util.DefaultContext, plugin...)
if len(bus.Manager.Plugins) != 0 {
util.DefaultContext.Info(":lollipop:Enabled plugins:")
for _, p := range bus.Manager.Plugins {
util.DefaultContext.Info(fmt.Sprintf("\t:arrow_right: %s (at %s)", p.Name, p.Executable))
}
}
},
PersistentPostRun: func(cmd *cobra.Command, args []string) {
// Cleanup all tmp directories used by luet
err := util.DefaultContext.Config.GetSystem().CleanupTmpDir()
if err != nil {
util.DefaultContext.Warning("failed on cleanup tmpdir:", err.Error())
}
},
SilenceErrors: true,
}
func LoadConfig(c *config.LuetConfig) error {
// If a config file is found, read it in.
if err := c.Viper.ReadInConfig(); err != nil {
Debug(err)
}
err := c.Viper.Unmarshal(&config.LuetCfg)
if err != nil {
return err
}
Debug("Using config file:", c.Viper.ConfigFileUsed())
NewSpinner()
if c.GetLogging().Path != "" {
// Init zap logger
err = ZapLogger()
if err != nil {
return err
}
}
// Load repositories
err = repo.LoadRepositories(c)
if err != nil {
return err
}
return nil
}
// Execute adds all child commands to the root command sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
// XXX: This is mostly from scratch images.
if os.Getenv("LUET_NOLOCK") != "true" {
s := single.New("luet")
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
Fatal("another instance of the app is already running, exiting")
} else if err != nil {
// Another error occurred, might be worth handling it as well
Fatal("failed to acquire exclusive app lock:", err.Error())
}
defer s.TryUnlock()
}
util.HandleLock(util.DefaultContext)
if err := RootCmd.Execute(); err != nil {
if len(os.Args) > 0 {
for _, c := range RootCmd.Commands() {
if c.Name() == os.Args[1] {
os.Exit(-1) // Something failed
}
}
// Try to load a bin from path.
helpers.Exec("luet-"+os.Args[1], os.Args[1:], os.Environ())
}
fmt.Println(err)
os.Exit(-1)
}
}
func init() {
cobra.OnInitialize(initConfig)
pflags := RootCmd.PersistentFlags()
pflags.StringVar(&cfgFile, "config", "", "config file (default is $HOME/.luet.yaml)")
pflags.BoolP("debug", "d", false, "verbose output")
pflags.Bool("fatal", false, "Enables Warnings to exit")
u, err := user.Current()
if err != nil {
Fatal("failed to retrieve user identity:", err.Error())
}
sameOwner := false
if u.Uid == "0" {
sameOwner = true
}
pflags.Bool("same-owner", sameOwner, "Maintain same owner on uncompress.")
pflags.Int("concurrency", runtime.NumCPU(), "Concurrency")
config.LuetCfg.Viper.BindPFlag("general.same_owner", pflags.Lookup("same-owner"))
config.LuetCfg.Viper.BindPFlag("general.debug", pflags.Lookup("debug"))
config.LuetCfg.Viper.BindPFlag("general.concurrency", pflags.Lookup("concurrency"))
config.LuetCfg.Viper.BindPFlag("general.fatal_warnings", pflags.Lookup("fatal"))
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
Error(err)
os.Exit(1)
}
viper.SetEnvPrefix(LuetEnvPrefix)
viper.SetConfigType("yaml")
viper.SetConfigName(".luet") // name of config file (without extension)
if cfgFile != "" { // enable ability to specify config file via flag
viper.SetConfigFile(cfgFile)
} else {
viper.AddConfigPath(dir)
viper.AddConfigPath(".")
viper.AddConfigPath("$HOME")
viper.AddConfigPath("/etc/luet")
}
viper.AutomaticEnv() // read in environment variables that match
// Create EnvKey Replacer for handle complex structure
replacer := strings.NewReplacer(".", "__")
viper.SetEnvKeyReplacer(replacer)
viper.SetTypeByDefaultValue(true)
util.InitViper(util.DefaultContext, RootCmd)
}

View File

@@ -15,134 +15,390 @@
package cmd
import (
"os"
"path/filepath"
"fmt"
"strings"
. "github.com/mudler/luet/pkg/config"
"github.com/ghodss/yaml"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
type PackageResult struct {
Name string `json:"name"`
Category string `json:"category"`
Version string `json:"version"`
Repository string `json:"repository"`
Target string `json:"target"`
Hidden bool `json:"hidden"`
Files []string `json:"files"`
}
type Results struct {
Packages []PackageResult `json:"packages"`
}
func (r PackageResult) String() string {
return fmt.Sprintf("%s/%s-%s required for %s", r.Category, r.Name, r.Version, r.Target)
}
var rows []string = []string{"Package", "Category", "Name", "Version", "Repository", "License"}
func packageToRow(repo string, p pkg.Package) []string {
return []string{p.HumanReadableString(), p.GetCategory(), p.GetName(), p.GetVersion(), repo, p.GetLicense()}
}
func packageToList(l *util.ListWriter, repo string, p pkg.Package) {
l.AppendItem(pterm.BulletListItem{
Level: 0, Text: p.HumanReadableString(),
TextStyle: pterm.NewStyle(pterm.FgCyan), Bullet: ">", BulletStyle: pterm.NewStyle(pterm.FgYellow),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Category: %s", p.GetCategory()),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Name: %s", p.GetName()),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Version: %s", p.GetVersion()),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Description: %s", p.GetDescription()),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Repository: %s ", repo),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
l.AppendItem(pterm.BulletListItem{
Level: 1, Text: fmt.Sprintf("Uri: %s ", strings.Join(p.GetURI(), " ")),
Bullet: "->", BulletStyle: pterm.NewStyle(pterm.FgDarkGray),
})
}
func searchLocally(term string, l *util.ListWriter, t *util.TableWriter, label, labelMatch, revdeps, hidden bool) Results {
var results Results
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
var err error
iMatches := pkg.Packages{}
if label {
iMatches, err = system.Database.FindPackageLabel(term)
} else if labelMatch {
iMatches, err = system.Database.FindPackageLabelMatch(term)
} else {
iMatches, err = system.Database.FindPackageMatch(term)
}
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
for _, pack := range iMatches {
if !revdeps {
if !pack.IsHidden() || pack.IsHidden() && hidden {
t.AppendRow(packageToRow("system", pack))
packageToList(l, "system", pack)
f, _ := system.Database.GetPackageFiles(pack)
results.Packages = append(results.Packages,
PackageResult{
Name: pack.GetName(),
Version: pack.GetVersion(),
Category: pack.GetCategory(),
Repository: "system",
Hidden: pack.IsHidden(),
Files: f,
})
}
} else {
packs, _ := system.Database.GetRevdeps(pack)
for _, revdep := range packs {
if !revdep.IsHidden() || revdep.IsHidden() && hidden {
t.AppendRow(packageToRow("system", pack))
packageToList(l, "system", pack)
f, _ := system.Database.GetPackageFiles(revdep)
results.Packages = append(results.Packages,
PackageResult{
Name: revdep.GetName(),
Version: revdep.GetVersion(),
Category: revdep.GetCategory(),
Repository: "system",
Hidden: revdep.IsHidden(),
Files: f,
})
}
}
}
}
return results
}
func searchOnline(term string, l *util.ListWriter, t *util.TableWriter, label, labelMatch, revdeps, hidden bool) Results {
var results Results
inst := installer.NewLuetInstaller(
installer.LuetInstallerOptions{
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
},
)
synced, err := inst.SyncRepositories()
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
util.DefaultContext.Info("--- Search results (" + term + "): ---")
matches := []installer.PackageMatch{}
if label {
matches = synced.SearchLabel(term)
} else if labelMatch {
matches = synced.SearchLabelMatch(term)
} else {
matches = synced.Search(term)
}
for _, m := range matches {
if !revdeps {
if !m.Package.IsHidden() || m.Package.IsHidden() && hidden {
t.AppendRow(packageToRow(m.Repo.GetName(), m.Package))
packageToList(l, m.Repo.GetName(), m.Package)
r := &PackageResult{
Name: m.Package.GetName(),
Version: m.Package.GetVersion(),
Category: m.Package.GetCategory(),
Repository: m.Repo.GetName(),
Hidden: m.Package.IsHidden(),
}
if m.Artifact != nil {
r.Files = m.Artifact.Files
}
results.Packages = append(results.Packages, *r)
}
} else {
packs, _ := m.Repo.GetTree().GetDatabase().GetRevdeps(m.Package)
for _, revdep := range packs {
if !revdep.IsHidden() || revdep.IsHidden() && hidden {
t.AppendRow(packageToRow(m.Repo.GetName(), revdep))
packageToList(l, m.Repo.GetName(), revdep)
r := &PackageResult{
Name: revdep.GetName(),
Version: revdep.GetVersion(),
Category: revdep.GetCategory(),
Repository: m.Repo.GetName(),
Hidden: revdep.IsHidden(),
}
if m.Artifact != nil {
r.Files = m.Artifact.Files
}
results.Packages = append(results.Packages, *r)
}
}
}
}
return results
}
func searchLocalFiles(term string, l *util.ListWriter, t *util.TableWriter) Results {
var results Results
util.DefaultContext.Info("--- Search results (" + term + "): ---")
matches, _ := util.DefaultContext.Config.GetSystemDB().FindPackageByFile(term)
for _, pack := range matches {
t.AppendRow(packageToRow("system", pack))
packageToList(l, "system", pack)
f, _ := util.DefaultContext.Config.GetSystemDB().GetPackageFiles(pack)
results.Packages = append(results.Packages,
PackageResult{
Name: pack.GetName(),
Version: pack.GetVersion(),
Category: pack.GetCategory(),
Repository: "system",
Hidden: pack.IsHidden(),
Files: f,
})
}
return results
}
func searchFiles(term string, l *util.ListWriter, t *util.TableWriter) Results {
var results Results
inst := installer.NewLuetInstaller(
installer.LuetInstallerOptions{
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
},
)
synced, err := inst.SyncRepositories()
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
util.DefaultContext.Info("--- Search results (" + term + "): ---")
matches := []installer.PackageMatch{}
matches = synced.SearchPackages(term, installer.FileSearch)
for _, m := range matches {
t.AppendRow(packageToRow(m.Repo.GetName(), m.Package))
packageToList(l, m.Repo.GetName(), m.Package)
results.Packages = append(results.Packages,
PackageResult{
Name: m.Package.GetName(),
Version: m.Package.GetVersion(),
Category: m.Package.GetCategory(),
Repository: m.Repo.GetName(),
Hidden: m.Package.IsHidden(),
Files: m.Artifact.Files,
})
}
return results
}
var searchCmd = &cobra.Command{
Use: "search <term>",
Short: "Search packages",
Long: `Search for installed and available packages`,
Long: `Search for installed and available packages
To search a package in the repositories:
$ luet search <regex>
To search a package and display results in a table (wide screens):
$ luet search --table <regex>
To look into the installed packages:
$ luet search --installed <regex>
Note: the regex argument is optional, if omitted implies "all"
To search a package by label:
$ luet search --by-label <label>
or by regex against the label:
$ luet search --by-label-regex <label>
It can also show a package revdeps by:
$ luet search --revdeps <regex>
Search can also return results in the terminal in different ways: as terminal output, as json or as yaml.
$ luet search --json <regex> # JSON output
$ luet search --yaml <regex> # YAML output
`,
Aliases: []string{"s"},
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
},
Run: func(cmd *cobra.Command, args []string) {
var systemDB pkg.PackageDatabase
if len(args) != 1 {
Fatal("Wrong number of arguments (expected 1)")
var results Results
if len(args) > 1 {
util.DefaultContext.Fatal("Wrong number of arguments (expected 1)")
} else if len(args) == 0 {
args = []string{"."}
}
installed := LuetCfg.Viper.GetBool("installed")
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
hidden, _ := cmd.Flags().GetBool("hidden")
installed := viper.GetBool("installed")
searchWithLabel, _ := cmd.Flags().GetBool("by-label")
searchWithLabelMatch, _ := cmd.Flags().GetBool("by-label-regex")
revdeps, _ := cmd.Flags().GetBool("revdeps")
tableMode, _ := cmd.Flags().GetBool("table")
files, _ := cmd.Flags().GetBool("files")
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
if !installed {
repos := installer.Repositories{}
for _, repo := range LuetCfg.SystemRepositories {
if !repo.Enable {
continue
}
r := installer.NewSystemRepository(repo)
repos = append(repos, r)
}
inst := installer.NewLuetInstaller(
installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
},
)
inst.Repositories(repos)
synced, err := inst.SyncRepositories(false)
if err != nil {
Fatal("Error: " + err.Error())
}
Info("--- Search results: ---")
matches := []installer.PackageMatch{}
if searchWithLabel {
matches = synced.SearchLabel(args[0])
} else if searchWithLabelMatch {
matches = synced.SearchLabelMatch(args[0])
} else {
matches = synced.Search(args[0])
}
for _, m := range matches {
Info(":package:", m.Package.GetCategory(), m.Package.GetName(),
m.Package.GetVersion(), "repository:", m.Repo.GetName())
}
} else {
if LuetCfg.GetSystem().DatabaseEngine == "boltdb" {
systemDB = pkg.NewBoltDatabase(
filepath.Join(LuetCfg.GetSystem().GetSystemRepoDatabaseDirPath(), "luet.db"))
} else {
systemDB = pkg.NewInMemoryDatabase(true)
}
system := &installer.System{Database: systemDB, Target: LuetCfg.GetSystem().Rootfs}
var err error
iMatches := []pkg.Package{}
if searchWithLabel {
iMatches, err = system.Database.FindPackageLabel(args[0])
} else if searchWithLabelMatch {
iMatches, err = system.Database.FindPackageLabelMatch(args[0])
} else {
iMatches, err = system.Database.FindPackageMatch(args[0])
}
if err != nil {
Fatal("Error: " + err.Error())
}
for _, pack := range iMatches {
Info(":package:", pack.GetCategory(), pack.GetName(), pack.GetVersion())
}
util.SetSystemConfig(util.DefaultContext)
util.SetSolverConfig(util.DefaultContext)
out, _ := cmd.Flags().GetString("output")
if out != "terminal" {
util.DefaultContext.Config.GetLogging().SetLogLevel(types.FatalLevel)
}
l := &util.ListWriter{}
t := &util.TableWriter{}
t.AppendRow(rows)
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
switch {
case files && installed:
results = searchLocalFiles(args[0], l, t)
case files && !installed:
results = searchFiles(args[0], l, t)
case !installed:
results = searchOnline(args[0], l, t, searchWithLabel, searchWithLabelMatch, revdeps, hidden)
default:
results = searchLocally(args[0], l, t, searchWithLabel, searchWithLabelMatch, revdeps, hidden)
}
y, err := yaml.Marshal(results)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
switch out {
case "yaml":
fmt.Println(string(y))
case "json":
j2, err := yaml.YAMLToJSON(y)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(j2))
default:
if tableMode {
t.Render()
} else if util.DefaultContext.Config.General.Quiet {
for _, tt := range results.Packages {
fmt.Printf("%s/%s-%s\n", tt.Category, tt.Name, tt.Version)
}
} else {
l.Render()
}
}
},
}
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
}
searchCmd.Flags().String("system-dbpath", path, "System db path")
searchCmd.Flags().String("system-target", path, "System rootpath")
searchCmd.Flags().String("system-dbpath", "", "System db path")
searchCmd.Flags().String("system-target", "", "System rootpath")
searchCmd.Flags().String("system-engine", "", "System DB engine")
searchCmd.Flags().Bool("installed", false, "Search between system packages")
searchCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
searchCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
searchCmd.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
searchCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
searchCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
searchCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
searchCmd.Flags().Bool("by-label", false, "Search packages through label")
searchCmd.Flags().Bool("by-label-regex", false, "Search packages through label regex")
searchCmd.Flags().Bool("revdeps", false, "Search package reverse dependencies")
searchCmd.Flags().Bool("hidden", false, "Include hidden packages")
searchCmd.Flags().Bool("table", false, "show output in a table (wider screens)")
searchCmd.Flags().Bool("files", false, "Search between packages files")
RootCmd.AddCommand(searchCmd)
}

View File

@@ -18,7 +18,7 @@ import (
"net/http"
"os"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
@@ -41,15 +41,15 @@ var serverepoCmd = &cobra.Command{
http.Handle("/", http.FileServer(http.Dir(dir)))
Info("Serving ", dir, " on HTTP port: ", port)
Fatal(http.ListenAndServe(address+":"+port, nil))
util.DefaultContext.Info("Serving ", dir, " on HTTP port: ", port)
util.DefaultContext.Fatal(http.ListenAndServe(address+":"+port, nil))
},
}
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
util.DefaultContext.Fatal(err)
}
serverepoCmd.Flags().String("dir", path, "Packages folder (output from build)")
serverepoCmd.Flags().String("port", "9090", "Listening port")

View File

@@ -34,5 +34,6 @@ func init() {
NewTreePkglistCommand(),
NewTreeValidateCommand(),
NewTreeBumpCommand(),
NewTreeImageCommand(),
)
}

View File

@@ -18,11 +18,11 @@ package cmd_tree
import (
"fmt"
//"os"
//"sort"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/cmd/util"
spectooling "github.com/mudler/luet/pkg/spectooling"
tree "github.com/mudler/luet/pkg/tree"
version "github.com/mudler/luet/pkg/versioner"
"github.com/spf13/cobra"
)
@@ -36,32 +36,53 @@ func NewTreeBumpCommand() *cobra.Command {
PreRun: func(cmd *cobra.Command, args []string) {
df, _ := cmd.Flags().GetString("definition-file")
if df == "" {
Fatal("Mandatory definition.yaml path missing.")
util.DefaultContext.Fatal("Mandatory definition.yaml path missing.")
}
},
Run: func(cmd *cobra.Command, args []string) {
spec, _ := cmd.Flags().GetString("definition-file")
toStdout, _ := cmd.Flags().GetBool("to-stdout")
pkgVersion, _ := cmd.Flags().GetString("pkg-version")
pack, err := tree.ReadDefinitionFile(spec)
if err != nil {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
}
// Retrieve version build section with Gentoo parser
err = pack.BumpBuildVersion()
if err != nil {
Fatal("Error on increment build version: " + err.Error())
if pkgVersion != "" {
validator := &version.WrappedVersioner{}
err := validator.Validate(pkgVersion)
if err != nil {
util.DefaultContext.Fatal("Invalid version string: " + err.Error())
}
pack.SetVersion(pkgVersion)
} else {
// Retrieve version build section with Gentoo parser
err = pack.BumpBuildVersion()
if err != nil {
util.DefaultContext.Fatal("Error on increment build version: " + err.Error())
}
}
if toStdout {
data, err := spectooling.NewDefaultPackageSanitized(&pack).Yaml()
if err != nil {
util.DefaultContext.Fatal("Error on yaml conversion: " + err.Error())
}
fmt.Println(string(data))
} else {
err = tree.WriteDefinitionFile(&pack, spec)
if err != nil {
Fatal("Error on write definition file: " + err.Error())
err = tree.WriteDefinitionFile(&pack, spec)
if err != nil {
util.DefaultContext.Fatal("Error on write definition file: " + err.Error())
}
fmt.Printf("Bumped package %s/%s-%s.\n", pack.Category, pack.Name, pack.Version)
}
fmt.Printf("Bumped package %s/%s-%s.\n", pack.Category, pack.Name, pack.Version)
},
}
ans.Flags().StringP("pkg-version", "p", "", "Set a specific package version")
ans.Flags().StringP("definition-file", "f", "", "Path of the definition to bump.")
ans.Flags().BoolP("to-stdout", "o", false, "Bump package to output.")
return ans
}

160
cmd/tree/images.go Normal file
View File

@@ -0,0 +1,160 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd_tree
import (
"fmt"
"os"
"github.com/ghodss/yaml"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/options"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
tree "github.com/mudler/luet/pkg/tree"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func NewTreeImageCommand() *cobra.Command {
var ans = &cobra.Command{
Use: "images [OPTIONS]",
Short: "List of the images of a package",
PreRun: func(cmd *cobra.Command, args []string) {
t, _ := cmd.Flags().GetStringArray("tree")
if len(t) == 0 {
util.DefaultContext.Fatal("Mandatory tree param missing.")
}
if len(args) != 1 {
util.DefaultContext.Fatal("Expects one package as parameter")
}
util.BindValuesFlags(cmd)
viper.BindPFlag("image-repository", cmd.Flags().Lookup("image-repository"))
},
Run: func(cmd *cobra.Command, args []string) {
var results TreeResults
treePath, _ := cmd.Flags().GetStringArray("tree")
imageRepository := viper.GetString("image-repository")
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
values := util.ValuesFlags()
out, _ := cmd.Flags().GetString("output")
if out != "terminal" {
util.DefaultContext.Config.GetLogging().SetLogLevel("error")
}
reciper := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
for _, t := range treePath {
err := reciper.Load(t)
if err != nil {
util.DefaultContext.Fatal("Error on load tree ", err)
}
}
compilerBackend := backend.NewSimpleDockerBackend(util.DefaultContext)
opts := *util.DefaultContext.Config.GetSolverOptions()
opts.Options = solver.Options{Type: solver.SingleCoreSimple, Concurrency: 1}
luetCompiler := compiler.NewLuetCompiler(
compilerBackend,
reciper.GetDatabase(),
options.WithBuildValues(values),
options.WithContext(util.DefaultContext),
options.WithPushRepository(imageRepository),
options.WithPullRepositories(pullRepo),
options.WithTemplateFolder(util.TemplateFolders(util.DefaultContext, false, treePath)),
options.WithSolverOptions(opts),
)
a := args[0]
pack, err := helpers.ParsePackageStr(a)
if err != nil {
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
spec, err := luetCompiler.FromPackage(pack)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
ht := compiler.NewHashTree(reciper.GetDatabase())
copy, err := compiler.CompilerFinalImages(luetCompiler)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
hashtree, err := ht.Query(copy, spec)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
for _, assertion := range hashtree.Solution { //highly dependent on the order
//buildImageHash := imageRepository + ":" + assertion.Hash.BuildHash
currentPackageImageHash := imageRepository + ":" + assertion.Hash.PackageHash
results.Packages = append(results.Packages, TreePackageResult{
Name: assertion.Package.GetName(),
Version: assertion.Package.GetVersion(),
Category: assertion.Package.GetCategory(),
Image: currentPackageImageHash,
})
}
y, err := yaml.Marshal(results)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
switch out {
case "yaml":
fmt.Println(string(y))
case "json":
j2, err := yaml.YAMLToJSON(y)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(j2))
default:
for _, p := range results.Packages {
fmt.Println(fmt.Sprintf("%s/%s-%s: %s", p.Category, p.Name, p.Version, p.Image))
}
}
},
}
path, err := os.Getwd()
if err != nil {
util.DefaultContext.Fatal(err)
}
ans.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
ans.Flags().StringArrayP("tree", "t", []string{path}, "Path of the tree to use.")
ans.Flags().String("image-repository", "luet/cache", "Default base image string for generated image")
ans.Flags().StringArrayP("pull-repository", "p", []string{}, "A list of repositories to pull the cache from")
return ans
}

View File

@@ -18,17 +18,31 @@ package cmd_tree
import (
"fmt"
"os"
"sort"
//. "github.com/mudler/luet/pkg/config"
helpers "github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
"github.com/ghodss/yaml"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
tree "github.com/mudler/luet/pkg/tree"
"github.com/spf13/cobra"
)
type TreePackageResult struct {
Name string `json:"name"`
Category string `json:"category"`
Version string `json:"version"`
Path string `json:"path"`
Image string `json:"image"`
}
type TreeResults struct {
Packages []TreePackageResult `json:"packages"`
}
func pkgDetail(pkg pkg.Package) string {
ans := fmt.Sprintf(`
@@ Package: %s/%s-%s
@@ -55,31 +69,66 @@ func NewTreePkglistCommand() *cobra.Command {
var ans = &cobra.Command{
Use: "pkglist [OPTIONS]",
Short: "List of the packages found in tree.",
Args: cobra.OnlyValidArgs,
Args: cobra.NoArgs,
PreRun: func(cmd *cobra.Command, args []string) {
t, _ := cmd.Flags().GetString("tree")
if t == "" {
Fatal("Mandatory tree param missing.")
t, _ := cmd.Flags().GetStringArray("tree")
if len(t) == 0 {
util.DefaultContext.Fatal("Mandatory tree param missing.")
}
revdeps, _ := cmd.Flags().GetBool("revdeps")
deps, _ := cmd.Flags().GetBool("deps")
if revdeps && deps {
util.DefaultContext.Fatal("Both revdeps and deps option used. Choice only one.")
}
},
Run: func(cmd *cobra.Command, args []string) {
var results TreeResults
var depSolver solver.PackageSolver
treePath, _ := cmd.Flags().GetString("tree")
treePath, _ := cmd.Flags().GetStringArray("tree")
verbose, _ := cmd.Flags().GetBool("verbose")
buildtime, _ := cmd.Flags().GetBool("buildtime")
full, _ := cmd.Flags().GetBool("full")
reciper := tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))
err := reciper.Load(treePath)
if err != nil {
Fatal("Error on load tree ", err)
revdeps, _ := cmd.Flags().GetBool("revdeps")
deps, _ := cmd.Flags().GetBool("deps")
out, _ := cmd.Flags().GetString("output")
if out != "terminal" {
util.DefaultContext.Config.GetLogging().SetLogLevel("error")
}
var reciper tree.Builder
if buildtime {
reciper = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
} else {
reciper = tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))
}
for _, t := range treePath {
err := reciper.Load(t)
if err != nil {
util.DefaultContext.Fatal("Error on load tree ", err)
}
}
if deps {
emptyInstallationDb := pkg.NewInMemoryDatabase(false)
depSolver = solver.NewSolver(solver.Options{Type: solver.SingleCoreSimple}, pkg.NewInMemoryDatabase(false),
reciper.GetDatabase(),
emptyInstallationDb)
}
regExcludes, err := helpers.CreateRegexArray(excludes)
if err != nil {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
}
regMatches, err := helpers.CreateRegexArray(matches)
if err != nil {
Fatal(err.Error())
util.DefaultContext.Fatal(err.Error())
}
plist := make([]string, 0)
@@ -89,7 +138,7 @@ func NewTreePkglistCommand() *cobra.Command {
if full {
pkgstr = pkgDetail(p)
} else if verbose {
pkgstr = fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(), p.GetVersion())
pkgstr = p.HumanReadableString()
} else {
pkgstr = fmt.Sprintf("%s/%s", p.GetCategory(), p.GetName())
}
@@ -116,21 +165,119 @@ func NewTreePkglistCommand() *cobra.Command {
}
}
if addPkg {
if !addPkg {
continue
}
if revdeps {
packs, _ := reciper.GetDatabase().GetRevdeps(p)
for i := range packs {
revdep := packs[i]
if full {
pkgstr = pkgDetail(revdep)
} else if verbose {
pkgstr = revdep.HumanReadableString()
} else {
pkgstr = fmt.Sprintf("%s/%s", revdep.GetCategory(), revdep.GetName())
}
plist = append(plist, pkgstr)
results.Packages = append(results.Packages, TreePackageResult{
Name: revdep.GetName(),
Version: revdep.GetVersion(),
Category: revdep.GetCategory(),
Path: revdep.GetPath(),
})
}
} else if deps {
solution, err := depSolver.Install(pkg.Packages{p})
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
ass := solution.SearchByName(p.GetPackageName())
solution, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
for _, pa := range solution {
if pa.Value {
// Exclude itself
if pa.Package.GetName() == p.GetName() && pa.Package.GetCategory() == p.GetCategory() {
continue
}
if full {
pkgstr = pkgDetail(pa.Package)
} else if verbose {
pkgstr = pa.Package.HumanReadableString()
} else {
pkgstr = fmt.Sprintf("%s/%s", pa.Package.GetCategory(), pa.Package.GetName())
}
plist = append(plist, pkgstr)
results.Packages = append(results.Packages, TreePackageResult{
Name: pa.Package.GetName(),
Version: pa.Package.GetVersion(),
Category: pa.Package.GetCategory(),
Path: pa.Package.GetPath(),
})
}
}
} else {
plist = append(plist, pkgstr)
results.Packages = append(results.Packages, TreePackageResult{
Name: p.GetName(),
Version: p.GetVersion(),
Category: p.GetCategory(),
Path: p.GetPath(),
})
}
}
y, err := yaml.Marshal(results)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
switch out {
case "yaml":
fmt.Println(string(y))
case "json":
j2, err := yaml.YAMLToJSON(y)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(j2))
default:
if !deps {
sort.Strings(plist)
}
for _, p := range plist {
fmt.Println(p)
}
}
sort.Strings(plist)
for _, p := range plist {
fmt.Println(p)
}
},
}
path, err := os.Getwd()
if err != nil {
util.DefaultContext.Fatal(err)
}
ans.Flags().BoolP("buildtime", "b", false, "Build time match")
ans.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
ans.Flags().Bool("revdeps", false, "Search package reverse dependencies")
ans.Flags().Bool("deps", false, "Search package dependencies")
ans.Flags().BoolP("verbose", "v", false, "Add package version")
ans.Flags().BoolP("full", "f", false, "Show package detail")
ans.Flags().StringP("tree", "t", "", "Path of the tree to use.")
ans.Flags().StringArrayP("tree", "t", []string{path}, "Path of the tree to use.")
ans.Flags().StringSliceVarP(&matches, "matches", "m", []string{},
"Include only matched packages from list. (Use string as regex).")
ans.Flags().StringSliceVarP(&excludes, "exclude", "e", []string{},

View File

@@ -17,13 +17,17 @@
package cmd_tree
import (
"errors"
"fmt"
"os"
"regexp"
"sort"
"strconv"
"sync"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
//. "github.com/mudler/luet/pkg/config"
helpers "github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
tree "github.com/mudler/luet/pkg/tree"
@@ -31,177 +35,461 @@ import (
"github.com/spf13/cobra"
)
type ValidateOpts struct {
WithSolver bool
OnlyRuntime bool
OnlyBuildtime bool
RegExcludes []*regexp.Regexp
RegMatches []*regexp.Regexp
Excludes []string
Matches []string
// Runtime validate stuff
RuntimeCacheDeps *pkg.InMemoryDatabase
RuntimeReciper *tree.InstallerRecipe
// Buildtime validate stuff
BuildtimeCacheDeps *pkg.InMemoryDatabase
BuildtimeReciper *tree.CompilerRecipe
Mutex sync.Mutex
BrokenPkgs int
BrokenDeps int
Errors []error
}
func (o *ValidateOpts) IncrBrokenPkgs() {
o.Mutex.Lock()
defer o.Mutex.Unlock()
o.BrokenPkgs++
}
func (o *ValidateOpts) IncrBrokenDeps() {
o.Mutex.Lock()
defer o.Mutex.Unlock()
o.BrokenDeps++
}
func (o *ValidateOpts) AddError(err error) {
o.Mutex.Lock()
defer o.Mutex.Unlock()
o.Errors = append(o.Errors, err)
}
func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, reciper tree.Builder, cacheDeps *pkg.InMemoryDatabase) error {
var errstr string
var ans error
var depSolver solver.PackageSolver
if opts.WithSolver {
emptyInstallationDb := pkg.NewInMemoryDatabase(false)
depSolver = solver.NewSolver(solver.Options{Type: solver.SingleCoreSimple}, pkg.NewInMemoryDatabase(false),
reciper.GetDatabase(),
emptyInstallationDb)
}
found, err := reciper.GetDatabase().FindPackages(
&pkg.DefaultPackage{
Name: p.GetName(),
Category: p.GetCategory(),
Version: ">=0",
},
)
if err != nil || len(found) < 1 {
if err != nil {
errstr = err.Error()
} else {
errstr = "No packages"
}
util.DefaultContext.Error(fmt.Sprintf("[%9s] %s/%s-%s: Broken. No versions could be found by database %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
errstr,
))
opts.IncrBrokenDeps()
return errors.New(
fmt.Sprintf("[%9s] %s/%s-%s: Broken. No versions could be found by database %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
errstr,
))
}
// Ensure that we use the right package from right recipier for deps
pReciper, err := reciper.GetDatabase().FindPackage(
&pkg.DefaultPackage{
Name: p.GetName(),
Category: p.GetCategory(),
Version: p.GetVersion(),
},
)
if err != nil {
errstr = fmt.Sprintf("[%9s] %s/%s-%s: Error on retrieve package - %s.",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
err.Error(),
)
util.DefaultContext.Error(errstr)
return errors.New(errstr)
}
p = pReciper
pkgstr := fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(),
p.GetVersion())
validpkg := true
if len(opts.Matches) > 0 {
matched := false
for _, rgx := range opts.RegMatches {
if rgx.MatchString(pkgstr) {
matched = true
break
}
}
if !matched {
return nil
}
}
if len(opts.Excludes) > 0 {
excluded := false
for _, rgx := range opts.RegExcludes {
if rgx.MatchString(pkgstr) {
excluded = true
break
}
}
if excluded {
return nil
}
}
util.DefaultContext.Info(fmt.Sprintf("[%9s] Checking package ", checkType)+
fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(), p.GetVersion()),
"with", len(p.GetRequires()), "dependencies and", len(p.GetConflicts()), "conflicts.")
all := p.GetRequires()
all = append(all, p.GetConflicts()...)
for idx, r := range all {
var deps pkg.Packages
var err error
if r.IsSelector() {
deps, err = reciper.GetDatabase().FindPackages(
&pkg.DefaultPackage{
Name: r.GetName(),
Category: r.GetCategory(),
Version: r.GetVersion(),
},
)
} else {
deps = append(deps, r)
}
if err != nil || len(deps) < 1 {
if err != nil {
errstr = err.Error()
} else {
errstr = "No packages"
}
util.DefaultContext.Error(fmt.Sprintf("[%9s] %s/%s-%s: Broken Dep %s/%s-%s - %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
errstr,
))
opts.IncrBrokenDeps()
ans = errors.New(
fmt.Sprintf("[%9s] %s/%s-%s: Broken Dep %s/%s-%s - %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
errstr))
validpkg = false
} else {
util.DefaultContext.Debug(fmt.Sprintf("[%9s] Find packages for dep", checkType),
fmt.Sprintf("%s/%s-%s", r.GetCategory(), r.GetName(), r.GetVersion()))
if opts.WithSolver {
util.DefaultContext.Info(fmt.Sprintf("[%9s] :soap: [%2d/%2d] %s/%s-%s: %s/%s-%s",
checkType,
idx+1, len(all),
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
))
// Check if the solver is already been done for the deep
_, err := cacheDeps.Get(r.HashFingerprint(""))
if err == nil {
util.DefaultContext.Debug(fmt.Sprintf("[%9s] :direct_hit: Cache Hit for dep", checkType),
fmt.Sprintf("%s/%s-%s", r.GetCategory(), r.GetName(), r.GetVersion()))
continue
}
util.DefaultContext.Spinner()
solution, err := depSolver.Install(pkg.Packages{r})
ass := solution.SearchByName(r.GetPackageName())
util.DefaultContext.SpinnerStop()
if err == nil {
if ass == nil {
ans = errors.New(
fmt.Sprintf("[%9s] %s/%s-%s: solution doesn't retrieve package %s/%s-%s.",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
))
if util.DefaultContext.Config.GetGeneral().Debug {
for idx, pa := range solution {
fmt.Println(fmt.Sprintf("[%9s] %s/%s-%s: solution %d: %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(), idx,
pa.Package.GetPackageName()))
}
}
util.DefaultContext.Error(ans.Error())
opts.IncrBrokenDeps()
validpkg = false
} else {
_, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
}
}
if err != nil {
util.DefaultContext.Error(fmt.Sprintf("[%9s] %s/%s-%s: solver broken for dep %s/%s-%s - %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
err.Error(),
))
ans = errors.New(
fmt.Sprintf("[%9s] %s/%s-%s: solver broken for Dep %s/%s-%s - %s",
checkType,
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
err.Error()))
opts.IncrBrokenDeps()
validpkg = false
}
// Register the key
cacheDeps.Set(r.HashFingerprint(""), "1")
}
}
}
if !validpkg {
opts.IncrBrokenPkgs()
}
return ans
}
func validateWorker(i int,
wg *sync.WaitGroup,
c <-chan pkg.Package,
opts *ValidateOpts) {
defer wg.Done()
for p := range c {
if opts.OnlyBuildtime {
// Check buildtime compiler/deps
err := validatePackage(p, "buildtime", opts, opts.BuildtimeReciper, opts.BuildtimeCacheDeps)
if err != nil {
opts.AddError(err)
continue
}
} else if opts.OnlyRuntime {
// Check runtime installer/deps
err := validatePackage(p, "runtime", opts, opts.RuntimeReciper, opts.RuntimeCacheDeps)
if err != nil {
opts.AddError(err)
continue
}
} else {
// Check runtime installer/deps
err := validatePackage(p, "runtime", opts, opts.RuntimeReciper, opts.RuntimeCacheDeps)
if err != nil {
opts.AddError(err)
continue
}
// Check buildtime compiler/deps
err = validatePackage(p, "buildtime", opts, opts.BuildtimeReciper, opts.BuildtimeCacheDeps)
if err != nil {
opts.AddError(err)
}
}
}
}
func initOpts(opts *ValidateOpts, onlyRuntime, onlyBuildtime, withSolver bool, treePaths []string) {
var err error
opts.OnlyBuildtime = onlyBuildtime
opts.OnlyRuntime = onlyRuntime
opts.WithSolver = withSolver
opts.RuntimeReciper = nil
opts.BuildtimeReciper = nil
opts.BrokenPkgs = 0
opts.BrokenDeps = 0
if onlyBuildtime {
opts.BuildtimeReciper = (tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))).(*tree.CompilerRecipe)
} else if onlyRuntime {
opts.RuntimeReciper = (tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))).(*tree.InstallerRecipe)
} else {
opts.BuildtimeReciper = (tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))).(*tree.CompilerRecipe)
opts.RuntimeReciper = (tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))).(*tree.InstallerRecipe)
}
opts.RuntimeCacheDeps = pkg.NewInMemoryDatabase(false).(*pkg.InMemoryDatabase)
opts.BuildtimeCacheDeps = pkg.NewInMemoryDatabase(false).(*pkg.InMemoryDatabase)
for _, treePath := range treePaths {
util.DefaultContext.Info(fmt.Sprintf("Loading :deciduous_tree: %s...", treePath))
if opts.BuildtimeReciper != nil {
err = opts.BuildtimeReciper.Load(treePath)
if err != nil {
util.DefaultContext.Fatal("Error on load tree ", err)
}
}
if opts.RuntimeReciper != nil {
err = opts.RuntimeReciper.Load(treePath)
if err != nil {
util.DefaultContext.Fatal("Error on load tree ", err)
}
}
}
opts.RegExcludes, err = helpers.CreateRegexArray(opts.Excludes)
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
opts.RegMatches, err = helpers.CreateRegexArray(opts.Matches)
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
}
func NewTreeValidateCommand() *cobra.Command {
var excludes []string
var matches []string
var treePaths []string
var opts ValidateOpts
var ans = &cobra.Command{
Use: "validate [OPTIONS]",
Short: "Validate a tree or a list of packages",
Args: cobra.OnlyValidArgs,
PreRun: func(cmd *cobra.Command, args []string) {
onlyRuntime, _ := cmd.Flags().GetBool("only-runtime")
onlyBuildtime, _ := cmd.Flags().GetBool("only-buildtime")
if len(treePaths) < 1 {
Fatal("Mandatory tree param missing.")
util.DefaultContext.Fatal("Mandatory tree param missing.")
}
if onlyRuntime && onlyBuildtime {
util.DefaultContext.Fatal("Both --only-runtime and --only-buildtime options are not possibile.")
}
},
Run: func(cmd *cobra.Command, args []string) {
var depSolver solver.PackageSolver
var errstr string
var reciper tree.Builder
errors := make([]string, 0)
concurrency := util.DefaultContext.Config.GetGeneral().Concurrency
brokenPkgs := 0
brokenDeps := 0
withSolver, _ := cmd.Flags().GetBool("with-solver")
onlyRuntime, _ := cmd.Flags().GetBool("only-runtime")
onlyBuildtime, _ := cmd.Flags().GetBool("only-buildtime")
reciper := tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))
for _, treePath := range treePaths {
err := reciper.Load(treePath)
if err != nil {
Fatal("Error on load tree ", err)
}
opts.Excludes = excludes
opts.Matches = matches
initOpts(&opts, onlyRuntime, onlyBuildtime, withSolver, treePaths)
// We need at least one valid reciper for get list of the packages.
if onlyBuildtime {
reciper = opts.BuildtimeReciper
} else {
reciper = opts.RuntimeReciper
}
emptyInstallationDb := pkg.NewInMemoryDatabase(false)
if withSolver {
depSolver = solver.NewSolver(pkg.NewInMemoryDatabase(false),
reciper.GetDatabase(),
emptyInstallationDb)
}
all := make(chan pkg.Package)
regExcludes, err := helpers.CreateRegexArray(excludes)
if err != nil {
Fatal(err.Error())
}
regMatches, err := helpers.CreateRegexArray(matches)
if err != nil {
Fatal(err.Error())
}
var wg = new(sync.WaitGroup)
for i := 0; i < concurrency; i++ {
wg.Add(1)
go validateWorker(i, wg, all, &opts)
}
for _, p := range reciper.GetDatabase().World() {
pkgstr := fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(),
p.GetVersion())
validpkg := true
if len(matches) > 0 {
matched := false
for _, rgx := range regMatches {
if rgx.MatchString(pkgstr) {
matched = true
break
}
}
if !matched {
continue
}
}
if len(excludes) > 0 {
excluded := false
for _, rgx := range regExcludes {
if rgx.MatchString(pkgstr) {
excluded = true
break
}
}
if excluded {
continue
}
}
Info("Checking package "+fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(), p.GetVersion()), "with", len(p.GetRequires()), "dependencies.")
for _, r := range p.GetRequires() {
deps, err := reciper.GetDatabase().FindPackages(
&pkg.DefaultPackage{
Name: r.GetName(),
Category: r.GetCategory(),
Version: r.GetVersion(),
},
)
if err != nil || len(deps) < 1 {
if err != nil {
errstr = err.Error()
} else {
errstr = "No packages"
}
Error(fmt.Sprintf("%s/%s-%s: Broken Dep %s/%s-%s - %s",
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
errstr,
))
errors = append(errors,
fmt.Sprintf("%s/%s-%s: Broken Dep %s/%s-%s - %s",
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
errstr))
brokenDeps++
validpkg = false
} else {
Debug("Find packages for dep",
fmt.Sprintf("%s/%s-%s", r.GetCategory(), r.GetName(), r.GetVersion()))
if withSolver {
Spinner(32)
_, err := depSolver.Install([]pkg.Package{r})
SpinnerStop()
if err != nil {
Error(fmt.Sprintf("%s/%s-%s: solver broken for dep %s/%s-%s - %s",
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
err.Error(),
))
errors = append(errors,
fmt.Sprintf("%s/%s-%s: solver broken for Dep %s/%s-%s - %s",
p.GetCategory(), p.GetName(), p.GetVersion(),
r.GetCategory(), r.GetName(), r.GetVersion(),
err.Error()))
brokenDeps++
validpkg = false
}
}
}
}
if !validpkg {
brokenPkgs++
}
all <- p
}
close(all)
sort.Strings(errors)
for _, e := range errors {
// Wait separately and once done close the channel
go func() {
wg.Wait()
}()
stringerrs := []string{}
for _, e := range opts.Errors {
stringerrs = append(stringerrs, e.Error())
}
sort.Strings(stringerrs)
for _, e := range stringerrs {
fmt.Println(e)
}
fmt.Println("Broken packages:", brokenPkgs, "(", brokenDeps, "deps ).")
if brokenPkgs > 0 {
os.Exit(1)
// fmt.Println("Broken packages:", brokenPkgs, "(", brokenDeps, "deps ).")
if len(stringerrs) != 0 {
util.DefaultContext.Error(fmt.Sprintf("Found %d broken packages and %d broken deps.",
opts.BrokenPkgs, opts.BrokenDeps))
util.DefaultContext.Fatal("Errors: " + strconv.Itoa(len(stringerrs)))
} else {
util.DefaultContext.Info("All good! :white_check_mark:")
os.Exit(0)
}
},
}
path, err := os.Getwd()
if err != nil {
util.DefaultContext.Fatal(err)
}
ans.Flags().Bool("only-runtime", false, "Check only runtime dependencies.")
ans.Flags().Bool("only-buildtime", false, "Check only buildtime dependencies.")
ans.Flags().BoolP("with-solver", "s", false,
"Enable check of requires also with solver.")
ans.Flags().StringSliceVarP(&treePaths, "tree", "t", []string{},
ans.Flags().StringSliceVarP(&treePaths, "tree", "t", []string{path},
"Path of the tree to use.")
ans.Flags().StringSliceVarP(&excludes, "exclude", "e", []string{},
"Exclude matched packages from analysis. (Use string as regex).")

View File

@@ -15,91 +15,99 @@
package cmd
import (
"os"
"path/filepath"
. "github.com/mudler/luet/pkg/config"
helpers "github.com/mudler/luet/pkg/helpers"
helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var uninstallCmd = &cobra.Command{
Use: "uninstall <pkg> <pkg2> ...",
Short: "Uninstall a package or a list of packages",
Long: `Uninstall packages`,
Use: "uninstall <pkg> <pkg2> ...",
Short: "Uninstall a package or a list of packages",
Long: `Uninstall packages`,
Aliases: []string{"rm", "un"},
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Run: func(cmd *cobra.Command, args []string) {
var systemDB pkg.PackageDatabase
toRemove := []pkg.Package{}
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
Fatal("Invalid package string ", a, ": ", err.Error())
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toRemove = append(toRemove, pack)
}
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
force := LuetCfg.Viper.GetBool("force")
nodeps := LuetCfg.Viper.GetBool("nodeps")
force := viper.GetBool("force")
nodeps, _ := cmd.Flags().GetBool("nodeps")
full, _ := cmd.Flags().GetBool("full")
checkconflicts, _ := cmd.Flags().GetBool("conflictscheck")
fullClean, _ := cmd.Flags().GetBool("full-clean")
yes := viper.GetBool("yes")
keepProtected, _ := cmd.Flags().GetBool("keep-protected-files")
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
util.SetSystemConfig(util.DefaultContext)
util.SetSolverConfig(util.DefaultContext)
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
util.DefaultContext.Config.ConfigProtectSkip = !keepProtected
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
NoDeps: nodeps,
Force: force,
})
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
if LuetCfg.GetSystem().DatabaseEngine == "boltdb" {
systemDB = pkg.NewBoltDatabase(
filepath.Join(LuetCfg.GetSystem().GetSystemRepoDatabaseDirPath(), "luet.db"))
} else {
systemDB = pkg.NewInMemoryDatabase(true)
}
system := &installer.System{Database: systemDB, Target: LuetCfg.GetSystem().Rootfs}
err = inst.Uninstall(pack, system)
if err != nil {
Fatal("Error: " + err.Error())
}
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: nodeps,
Force: force,
FullUninstall: full,
FullCleanUninstall: fullClean,
CheckConflicts: checkconflicts,
Ask: !yes,
PreserveSystemEssentialData: true,
Context: util.DefaultContext,
})
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
if err := inst.Uninstall(system, toRemove...); err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
}
uninstallCmd.Flags().String("system-dbpath", path, "System db path")
uninstallCmd.Flags().String("system-target", path, "System rootpath")
uninstallCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
uninstallCmd.Flags().String("system-dbpath", "", "System db path")
uninstallCmd.Flags().String("system-target", "", "System rootpath")
uninstallCmd.Flags().String("system-engine", "", "System DB engine")
uninstallCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
uninstallCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
uninstallCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
uninstallCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
uninstallCmd.Flags().Bool("nodeps", false, "Don't consider package dependencies (harmful!)")
uninstallCmd.Flags().Bool("nodeps", false, "Don't consider package dependencies (harmful! overrides checkconflicts and full!)")
uninstallCmd.Flags().Bool("force", false, "Force uninstall")
uninstallCmd.Flags().Bool("full", false, "Attempts to remove as much packages as possible which aren't required (slow)")
uninstallCmd.Flags().Bool("conflictscheck", true, "Check if the package marked for deletion is required by other packages")
uninstallCmd.Flags().Bool("full-clean", false, "(experimental) Uninstall packages and all the other deps/revdeps of it.")
uninstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
uninstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
uninstallCmd.Flags().BoolP("keep-protected-files", "k", false, "Keep package protected files around")
RootCmd.AddCommand(uninstallCmd)
}

View File

@@ -15,93 +15,92 @@
package cmd
import (
"os"
"path/filepath"
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
. "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var upgradeCmd = &cobra.Command{
Use: "upgrade",
Short: "Upgrades the system",
Use: "upgrade",
Short: "Upgrades the system",
Aliases: []string{"u"},
PreRun: func(cmd *cobra.Command, args []string) {
LuetCfg.Viper.BindPFlag("system.database_path", installCmd.Flags().Lookup("system-dbpath"))
LuetCfg.Viper.BindPFlag("system.rootfs", installCmd.Flags().Lookup("system-target"))
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Long: `Upgrades packages in parallel`,
Run: func(cmd *cobra.Command, args []string) {
var systemDB pkg.PackageDatabase
repos := installer.Repositories{}
for _, repo := range LuetCfg.SystemRepositories {
if !repo.Enable {
continue
}
force := viper.GetBool("force")
nodeps, _ := cmd.Flags().GetBool("nodeps")
full, _ := cmd.Flags().GetBool("full")
universe, _ := cmd.Flags().GetBool("universe")
clean, _ := cmd.Flags().GetBool("clean")
sync, _ := cmd.Flags().GetBool("sync")
osCheck, _ := cmd.Flags().GetBool("oscheck")
r := installer.NewSystemRepository(repo)
repos = append(repos, r)
}
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
stype := LuetCfg.Viper.GetString("solver.type")
discount := LuetCfg.Viper.GetFloat64("solver.discount")
rate := LuetCfg.Viper.GetFloat64("solver.rate")
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
force := LuetCfg.Viper.GetBool("force")
util.SetSystemConfig(util.DefaultContext)
opts := util.SetSolverConfig(util.DefaultContext)
LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
LuetCfg.GetSolverOptions().Discount = float32(discount)
LuetCfg.GetSolverOptions().MaxAttempts = attempts
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
Debug("Solver", LuetCfg.GetSolverOptions().String())
util.DefaultContext.Debug("Solver", opts.CompactString())
// Load config protect configs
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: LuetCfg.GetGeneral().Concurrency,
SolverOptions: *LuetCfg.GetSolverOptions(),
Force: force,
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
Force: force,
FullUninstall: full,
NoDeps: nodeps,
SolverUpgrade: universe,
RemoveUnavailableOnUpgrade: clean,
UpgradeNewRevisions: sync,
PreserveSystemEssentialData: true,
Ask: !yes,
AutoOSCheck: osCheck,
DownloadOnly: downloadOnly,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
})
inst.Repositories(repos)
_, err := inst.SyncRepositories(false)
if err != nil {
Fatal("Error: " + err.Error())
}
if LuetCfg.GetSystem().DatabaseEngine == "boltdb" {
systemDB = pkg.NewBoltDatabase(
filepath.Join(LuetCfg.GetSystem().GetSystemRepoDatabaseDirPath(), "luet.db"))
} else {
systemDB = pkg.NewInMemoryDatabase(true)
}
system := &installer.System{Database: systemDB, Target: LuetCfg.GetSystem().Rootfs}
err = inst.Upgrade(system)
if err != nil {
Fatal("Error: " + err.Error())
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
if err := inst.Upgrade(system); err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
},
}
func init() {
path, err := os.Getwd()
if err != nil {
Fatal(err)
}
upgradeCmd.Flags().String("system-dbpath", path, "System db path")
upgradeCmd.Flags().String("system-target", path, "System rootpath")
upgradeCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
upgradeCmd.Flags().String("system-dbpath", "", "System db path")
upgradeCmd.Flags().String("system-target", "", "System rootpath")
upgradeCmd.Flags().String("system-engine", "", "System DB engine")
upgradeCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
upgradeCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
upgradeCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
upgradeCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
upgradeCmd.Flags().Bool("force", false, "Force upgrade by ignoring errors")
upgradeCmd.Flags().Bool("nodeps", false, "Don't consider package dependencies (harmful! overrides checkconflicts and full!)")
upgradeCmd.Flags().Bool("full", false, "Attempts to remove as much packages as possible which aren't required (slow)")
upgradeCmd.Flags().Bool("universe", false, "Use ONLY the SAT solver to compute upgrades (experimental)")
upgradeCmd.Flags().Bool("clean", false, "Try to drop removed packages (experimental, only when --universe is enabled)")
upgradeCmd.Flags().Bool("sync", false, "Upgrade packages with new revisions (experimental)")
upgradeCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
upgradeCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
upgradeCmd.Flags().Bool("download-only", false, "Download only")
upgradeCmd.Flags().Bool("oscheck", false, "Perform automatically oschecks after upgrades")
RootCmd.AddCommand(upgradeCmd)
}

161
cmd/util.go Normal file
View File

@@ -0,0 +1,161 @@
// Copyright © 2020-2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"os"
"path/filepath"
"runtime"
"github.com/docker/docker/api/types"
"github.com/docker/go-units"
"github.com/mudler/luet/pkg/api/core/image"
luettypes "github.com/mudler/luet/pkg/api/core/types"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
"github.com/pkg/errors"
"github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/helpers/docker"
"github.com/spf13/cobra"
)
func pack(ctx *luettypes.Context, p, dst, imageName, arch, OS string) error {
tempimage, err := ctx.Config.GetSystem().TempFile("tempimage")
if err != nil {
return errors.Wrap(err, "error met while creating tempdir for "+p)
}
defer os.RemoveAll(tempimage.Name()) // clean up
if err := image.CreateTar(p, tempimage.Name(), imageName, arch, OS); err != nil {
return errors.Wrap(err, "could not create image from tar")
}
return fileHelper.CopyFile(tempimage.Name(), dst)
}
func NewPackCommand() *cobra.Command {
c := &cobra.Command{
Use: "pack image src.tar dst.tar",
Short: "Pack a standard tar archive as a container image",
Long: `Pack creates a tar which can be loaded as an image from a standard flat tar archive, for e.g. with docker load.
It doesn't need the docker daemon to run, and allows to override default os/arch:
luet util pack --os arm64 image:tag src.tar dst.tar
`,
Args: cobra.MinimumNArgs(3),
Run: func(cmd *cobra.Command, args []string) {
image := args[0]
src := args[1]
dst := args[2]
arch, _ := cmd.Flags().GetString("arch")
os, _ := cmd.Flags().GetString("os")
err := pack(util.DefaultContext, src, dst, image, arch, os)
if err != nil {
util.DefaultContext.Fatal(err.Error())
}
util.DefaultContext.Info("Image packed as", image)
},
}
c.Flags().String("arch", runtime.GOARCH, "Image architecture")
c.Flags().String("os", runtime.GOOS, "Image OS")
return c
}
func NewUnpackCommand() *cobra.Command {
c := &cobra.Command{
Use: "unpack image path",
Short: "Unpack a docker image natively",
Long: `unpack doesn't need the docker daemon to run, and unpacks a docker image in the specified directory:
luet util unpack golang:alpine /alpine
`,
PreRun: func(cmd *cobra.Command, args []string) {
if len(args) != 2 {
util.DefaultContext.Fatal("Expects an image and a path")
}
},
Run: func(cmd *cobra.Command, args []string) {
image := args[0]
destination, err := filepath.Abs(args[1])
if err != nil {
util.DefaultContext.Error("Invalid path %s", destination)
os.Exit(1)
}
verify, _ := cmd.Flags().GetBool("verify")
user, _ := cmd.Flags().GetString("auth-username")
pass, _ := cmd.Flags().GetString("auth-password")
authType, _ := cmd.Flags().GetString("auth-type")
server, _ := cmd.Flags().GetString("auth-server-address")
identity, _ := cmd.Flags().GetString("auth-identity-token")
registryToken, _ := cmd.Flags().GetString("auth-registry-token")
util.DefaultContext.Info("Downloading", image, "to", destination)
auth := &types.AuthConfig{
Username: user,
Password: pass,
ServerAddress: server,
Auth: authType,
IdentityToken: identity,
RegistryToken: registryToken,
}
info, err := docker.DownloadAndExtractDockerImage(util.DefaultContext, image, destination, auth, verify)
if err != nil {
util.DefaultContext.Error(err.Error())
os.Exit(1)
}
util.DefaultContext.Info(fmt.Sprintf("Pulled: %s %s", info.Target.Digest, info.Name))
util.DefaultContext.Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.Target.Size))))
},
}
c.Flags().String("auth-username", "", "Username to authenticate to registry/notary")
c.Flags().String("auth-password", "", "Password to authenticate to registry")
c.Flags().String("auth-type", "", "Auth type")
c.Flags().String("auth-server-address", "", "Authentication server address")
c.Flags().String("auth-identity-token", "", "Authentication identity token")
c.Flags().String("auth-registry-token", "", "Authentication registry token")
c.Flags().Bool("verify", false, "Verify signed images to notary before to pull")
return c
}
var utilGroup = &cobra.Command{
Use: "util [command] [OPTIONS]",
Short: "General luet internal utilities exposed",
}
func init() {
RootCmd.AddCommand(utilGroup)
utilGroup.AddCommand(
NewUnpackCommand(),
NewPackCommand(),
)
}

170
cmd/util/cli.go Normal file
View File

@@ -0,0 +1,170 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package util
import (
"errors"
"os"
"path/filepath"
"strings"
"github.com/marcsauter/single"
"github.com/pterm/pterm"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/installer"
)
var DefaultContext = types.NewContext()
var lockedCommands = []string{"install", "uninstall", "upgrade"}
var bannerCommands = []string{"install", "build", "uninstall", "upgrade"}
func BindSystemFlags(cmd *cobra.Command) {
viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
}
func BindSolverFlags(cmd *cobra.Command) {
viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
}
func BindValuesFlags(cmd *cobra.Command) {
viper.BindPFlag("values", cmd.Flags().Lookup("values"))
}
func ValuesFlags() []string {
return viper.GetStringSlice("values")
}
func SetSystemConfig(ctx *types.Context) {
dbpath := viper.GetString("system.database_path")
rootfs := viper.GetString("system.rootfs")
engine := viper.GetString("system.database_engine")
ctx.Config.System.DatabaseEngine = engine
ctx.Config.System.DatabasePath = dbpath
ctx.Config.System.SetRootFS(rootfs)
}
func SetSolverConfig(ctx *types.Context) (c *types.LuetSolverOptions) {
stype := viper.GetString("solver.type")
discount := viper.GetFloat64("solver.discount")
rate := viper.GetFloat64("solver.rate")
attempts := viper.GetInt("solver.max_attempts")
ctx.Config.GetSolverOptions().Type = stype
ctx.Config.GetSolverOptions().LearnRate = float32(rate)
ctx.Config.GetSolverOptions().Discount = float32(discount)
ctx.Config.GetSolverOptions().MaxAttempts = attempts
return &types.LuetSolverOptions{
Type: stype,
LearnRate: float32(rate),
Discount: float32(discount),
MaxAttempts: attempts,
}
}
func SetCliFinalizerEnvs(ctx *types.Context, finalizerEnvs []string) error {
if len(finalizerEnvs) > 0 {
for _, v := range finalizerEnvs {
idx := strings.Index(v, "=")
if idx < 0 {
return errors.New("Found invalid runtime finalizer environment: " + v)
}
ctx.Config.SetFinalizerEnv(v[0:idx], v[idx+1:])
}
}
return nil
}
// TemplateFolders returns the default folders which holds shared template between packages in a given tree path
func TemplateFolders(ctx *types.Context, fromRepo bool, treePaths []string) []string {
templateFolders := []string{}
for _, t := range treePaths {
templateFolders = append(templateFolders, filepath.Join(t, "templates"))
}
if fromRepo {
for _, s := range installer.SystemRepositories(ctx.Config.SystemRepositories) {
templateFolders = append(templateFolders, filepath.Join(s.TreePath, "templates"))
}
}
return templateFolders
}
func IntroScreen() {
luetLogo, _ := pterm.DefaultBigText.WithLetters(
pterm.NewLettersFromStringWithStyle("LU", pterm.NewStyle(pterm.FgLightMagenta)),
pterm.NewLettersFromStringWithStyle("ET", pterm.NewStyle(pterm.FgLightBlue))).
Srender()
pterm.DefaultCenter.Print(luetLogo)
pterm.DefaultCenter.Print(pterm.DefaultHeader.WithFullWidth().WithBackgroundStyle(pterm.NewStyle(pterm.BgLightBlue)).WithMargin(10).Sprint("Luet - 0-deps container-based package manager"))
}
func HandleLock(c *types.Context) {
if os.Getenv("LUET_NOLOCK") != "true" {
if len(os.Args) > 1 {
for _, lockedCmd := range lockedCommands {
if os.Args[1] == lockedCmd {
s := single.New("luet")
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
c.Fatal("another instance of the app is already running, exiting")
} else if err != nil {
// Another error occurred, might be worth handling it as well
c.Fatal("failed to acquire exclusive app lock:", err.Error())
}
defer s.TryUnlock()
break
}
}
}
}
}
func DisplayVersionBanner(c *types.Context, banner func(), version func() string, license []string) {
display := false
if len(os.Args) > 1 {
for _, c := range bannerCommands {
if os.Args[1] == c {
display = true
}
}
}
if display {
if c.Config.General.Quiet {
pterm.Info.Printf("Luet %s\n", version())
pterm.Info.Println(strings.Join(license, "\n"))
} else {
banner()
pterm.DefaultCenter.Print(version())
for _, l := range license {
pterm.DefaultCenter.Print(l)
}
}
}
}

210
cmd/util/config.go Normal file
View File

@@ -0,0 +1,210 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package util
import (
"fmt"
"os"
"os/user"
"path/filepath"
"runtime"
"strings"
extensions "github.com/mudler/cobra-extensions"
"github.com/mudler/luet/pkg/api/core/types"
helpers "github.com/mudler/luet/pkg/helpers"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
const (
LuetEnvPrefix = "LUET"
)
var cfgFile string
// initConfig reads in config file and ENV variables if set.
func initConfig() {
setDefaults(viper.GetViper())
// Luet support these priorities on read configuration file:
// - command line option (if available)
// - $PWD/.luet.yaml
// - $HOME/.luet.yaml
// - /etc/luet/luet.yaml
//
// Note: currently a single viper instance support only one config name.
viper.SetEnvPrefix(LuetEnvPrefix)
viper.SetConfigType("yaml")
if cfgFile != "" { // enable ability to specify config file via flag
viper.SetConfigFile(cfgFile)
} else {
// Retrieve pwd directory
pwdDir, err := os.Getwd()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
homeDir := helpers.GetHomeDir()
if fileHelper.Exists(filepath.Join(pwdDir, ".luet.yaml")) || (homeDir != "" && fileHelper.Exists(filepath.Join(homeDir, ".luet.yaml"))) {
viper.AddConfigPath(".")
if homeDir != "" {
viper.AddConfigPath(homeDir)
}
viper.SetConfigName(".luet")
} else {
viper.SetConfigName("luet")
viper.AddConfigPath("/etc/luet")
}
}
viper.AutomaticEnv() // read in environment variables that match
// Create EnvKey Replacer for handle complex structure
replacer := strings.NewReplacer(".", "__")
viper.SetEnvKeyReplacer(replacer)
viper.SetTypeByDefaultValue(true)
// If a config file is found, read it in.
viper.ReadInConfig()
}
// InitContext inits the context by parsing the configurations from viper
// this is meant to be run before each command to be able to parse any override from
// the CLI/ENV
func InitContext(ctx *types.Context) (err error) {
err = viper.Unmarshal(&ctx.Config)
if err != nil {
return
}
// Inits the context with the configurations loaded
// It reads system repositories, sets logging, and all the
// context which is required to perform luet actions
err = ctx.Init()
if err != nil {
return
}
// no_spinner is not mapped in our configs
ctx.NoSpinner = viper.GetBool("no_spinner")
return
}
func setDefaults(viper *viper.Viper) {
viper.SetDefault("logging.level", "info")
viper.SetDefault("logging.enable_logfile", false)
viper.SetDefault("logging.path", "/var/log/luet.log")
viper.SetDefault("logging.json_format", false)
viper.SetDefault("logging.enable_emoji", true)
viper.SetDefault("logging.color", true)
viper.SetDefault("general.concurrency", runtime.NumCPU())
viper.SetDefault("general.debug", false)
viper.SetDefault("general.quiet", false)
viper.SetDefault("general.show_build_output", false)
viper.SetDefault("general.fatal_warnings", false)
viper.SetDefault("general.http_timeout", 360)
u, err := user.Current()
// os/user doesn't work in from scratch environments
if err != nil || (u != nil && u.Uid == "0") {
viper.SetDefault("general.same_owner", true)
} else {
viper.SetDefault("general.same_owner", false)
}
viper.SetDefault("system.database_engine", "boltdb")
viper.SetDefault("system.database_path", "/var/cache/luet")
viper.SetDefault("system.rootfs", "/")
viper.SetDefault("system.tmpdir_base", filepath.Join(os.TempDir(), "tmpluet"))
viper.SetDefault("system.pkgs_cache_path", "packages")
viper.SetDefault("repos_confdir", []string{"/etc/luet/repos.conf.d"})
viper.SetDefault("config_protect_confdir", []string{"/etc/luet/config.protect.d"})
viper.SetDefault("config_protect_skip", false)
// TODO: Set default to false when we are ready for migration.
viper.SetDefault("config_from_host", true)
viper.SetDefault("cache_repositories", []string{})
viper.SetDefault("system_repositories", []string{})
viper.SetDefault("finalizer_envs", make(map[string]string))
viper.SetDefault("solver.type", "")
viper.SetDefault("solver.rate", 0.7)
viper.SetDefault("solver.discount", 1.0)
viper.SetDefault("solver.max_attempts", 9000)
}
// InitViper inits a new viper
// this is meant to be run just once at beginning to setup the root command
func InitViper(ctx *types.Context, RootCmd *cobra.Command) {
cobra.OnInitialize(initConfig)
pflags := RootCmd.PersistentFlags()
pflags.StringVar(&cfgFile, "config", "", "config file (default is $HOME/.luet.yaml)")
pflags.BoolP("debug", "d", false, "debug output")
pflags.BoolP("quiet", "q", false, "quiet output")
pflags.Bool("fatal", false, "Enables Warnings to exit")
pflags.Bool("enable-logfile", false, "Enable log to file")
pflags.Bool("no-spinner", false, "Disable spinner.")
pflags.Bool("color", ctx.Config.GetLogging().Color, "Enable/Disable color.")
pflags.Bool("emoji", ctx.Config.GetLogging().EnableEmoji, "Enable/Disable emoji.")
pflags.Bool("skip-config-protect", ctx.Config.ConfigProtectSkip,
"Disable config protect analysis.")
pflags.StringP("logfile", "l", ctx.Config.GetLogging().Path,
"Logfile path. Empty value disable log to file.")
pflags.StringSlice("plugin", []string{}, "A list of runtime plugins to load")
// os/user doesn't work in from scratch environments.
// Check if i can retrieve user informations.
_, err := user.Current()
if err != nil {
ctx.Warning("failed to retrieve user identity:", err.Error())
}
pflags.Bool("same-owner", ctx.Config.GetGeneral().SameOwner, "Maintain same owner on uncompress.")
pflags.Int("concurrency", runtime.NumCPU(), "Concurrency")
pflags.Int("http-timeout", ctx.Config.General.HTTPTimeout, "Default timeout for http(s) requests")
viper.BindPFlag("logging.color", pflags.Lookup("color"))
viper.BindPFlag("logging.enable_emoji", pflags.Lookup("emoji"))
viper.BindPFlag("logging.enable_logfile", pflags.Lookup("enable-logfile"))
viper.BindPFlag("logging.path", pflags.Lookup("logfile"))
viper.BindPFlag("general.concurrency", pflags.Lookup("concurrency"))
viper.BindPFlag("general.debug", pflags.Lookup("debug"))
viper.BindPFlag("general.quiet", pflags.Lookup("quiet"))
viper.BindPFlag("general.fatal_warnings", pflags.Lookup("fatal"))
viper.BindPFlag("general.same_owner", pflags.Lookup("same-owner"))
viper.BindPFlag("plugin", pflags.Lookup("plugin"))
viper.BindPFlag("general.http_timeout", pflags.Lookup("http-timeout"))
// Currently I maintain this only from cli.
viper.BindPFlag("no_spinner", pflags.Lookup("no-spinner"))
viper.BindPFlag("config_protect_skip", pflags.Lookup("skip-config-protect"))
// Extensions must be binary with the "luet-" prefix to be able to be shown in the help.
// we also accept extensions in the relative path where luet is being started, "extensions/"
exts := extensions.Discover("luet", "extensions")
for _, ex := range exts {
cobraCmd := ex.CobraCommand()
RootCmd.AddCommand(cobraCmd)
}
}

42
cmd/util/search.go Normal file
View File

@@ -0,0 +1,42 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package util
import "github.com/pterm/pterm"
type TableWriter struct {
td pterm.TableData
}
func (l *TableWriter) AppendRow(item []string) {
l.td = append(l.td, item)
}
func (l *TableWriter) Render() {
pterm.DefaultTable.WithHasHeader().WithData(l.td).Render()
}
type ListWriter struct {
bb []pterm.BulletListItem
}
func (l *ListWriter) AppendItem(item pterm.BulletListItem) {
l.bb = append(l.bb, item)
}
func (l *ListWriter) Render() {
pterm.DefaultBulletList.WithItems(l.bb).Render()
}

View File

@@ -0,0 +1,3 @@
name: "etc_conf"
dirs:
- "/etc/"

43
contrib/config/get_luet_root.sh Executable file
View File

@@ -0,0 +1,43 @@
#!/bin/bash
if [ $(id -u) -ne 0 ]
then echo "Please run the installer with sudo/as root"
exit
fi
set -ex
export LUET_NOLOCK=true
LUET_VERSION=$(curl -s https://api.github.com/repos/mudler/luet/releases/latest | grep tag_name | awk '{ print $2 }' | sed -e 's/\"//g' -e 's/,//g' || echo "0.9.24" )
LUET_ROOTFS=${LUET_ROOTFS:-/}
LUET_DATABASE_PATH=${LUET_DATABASE_PATH:-/var/luet/db}
LUET_DATABASE_ENGINE=${LUET_DATABASE_ENGINE:-boltdb}
LUET_CONFIG_PROTECT=${LUET_CONFIG_PROTECT:-1}
curl -L https://github.com/mudler/luet/releases/download/${LUET_VERSION}/luet-${LUET_VERSION}-linux-amd64 --output luet
chmod +x luet
mkdir -p /etc/luet/repos.conf.d || true
mkdir -p $LUET_DATABASE_PATH || true
mkdir -p /var/tmp/luet || true
if [ "${LUET_CONFIG_PROTECT}" = "1" ] ; then
mkdir -p /etc/luet/config.protect.d || true
curl -L https://raw.githubusercontent.com/mudler/luet/master/contrib/config/config.protect.d/01_etc.yml.example --output /etc/luet/config.protect.d/01_etc.yml
fi
curl -L https://raw.githubusercontent.com/mocaccinoOS/repository-index/master/packages/mocaccino-repository-index.yml --output /etc/luet/repos.conf.d/mocaccino-repository-index.yml
cat > /etc/luet/luet.yaml <<EOF
general:
debug: false
system:
rootfs: ${LUET_ROOTFS}
database_path: "${LUET_DATABASE_PATH}"
database_engine: "${LUET_DATABASE_ENGINE}"
tmpdir_base: "/var/tmp/luet"
EOF
./luet install -y repository/luet repository/mocaccino-repository-index
./luet install -y system/luet system/luet-extensions
rm -rf luet

View File

@@ -4,8 +4,11 @@
# Logging configuration section:
# ---------------------------------------------
# logging:
# # Enable loggging to file (if path is not empty)
# enable_logfile: false
#
# Leave empty to skip logging to file.
# path: ""
# path: "/var/log/luet.log"
#
# Set logging level: error|warning|info|debug
# level: "info"
@@ -13,6 +16,12 @@
# Enable JSON log format instead of console mode.
# json_format: false.
#
# Disable/Enable color
# color: true
#
# Enable/Disable emoji
# enable_emoji: true
#
# ---------------------------------------------
# General configuration section:
# ---------------------------------------------
@@ -56,6 +65,11 @@
# The path is append to rootfs option path.
# database_path: "/var/cache/luet"
#
# Define the tmpdir base directory where luet store temporary files.
# Default $TMPDIR/tmpluet
# tmpdir_base: "/tmp/tmpluet"
#
#
# ---------------------------------------------
# Repositories configurations directories.
# ---------------------------------------------
@@ -66,7 +80,33 @@
# - /etc/luet/repos.conf.d
#
#
# ---------------------------------------------
# ------------------------------------------------
# Config protect configuration files directories.
# -----------------------------------------------
# Define the list of directories where load
# configuration files with the list of config
# protect paths.
# config_protect_confdir:
# - /etc/luet/config.protect.d
#
# Permit to ignore rules defined on
# config protect confdir and packages
# annotation.
# config_protect_skip: false
#
# The paths used for load repositories and config
# protects are based on host rootfs.
# If set to false rootfs path is used as prefix.
# config_from_host: true
#
#
# ------------------------------------------------
# Finalizer Environment Variables
# -----------------------------------------------
# finalizer_envs:
# - key: "BUILD_ISO"
# value: "1"
#
# System repositories
# ---------------------------------------------
# In alternative to define repositories files
@@ -96,8 +136,11 @@
# packages. By default caching is disable.
# cached: false
#
# Path where store tree of the specifications. Default path is $database_path/repos/$repo_name
# tree_path: "/var/cache/luet/repos/local"
# Path where store tree of the specifications. Default path is $database_path/repos/$repo_name/treefs
# tree_path: "/var/cache/luet/repos/local/treefs"
#
# Path where store repository metadata. Default path is $database_path/repos/$repo_name/meta
# meta_path: "/var/cache/luet/repos/local/meta"
#
# Define the list of the URL where retrieve tree and packages.
# urls:
@@ -125,4 +168,4 @@
#
# Number of overall attempts that the solver has available before bailing out.
# max_attempts: 9000
#
#

103
go.mod
View File

@@ -1,50 +1,77 @@
module github.com/mudler/luet
go 1.12
go 1.16
require (
github.com/DataDog/zstd v1.4.4 // indirect
github.com/MottainaiCI/simplestreams-builder v0.0.0-20190710131531-efb382161f56 // indirect
github.com/Sabayon/pkgs-checker v0.6.2-0.20200315232328-b6efed54b4b1
github.com/DataDog/zstd v1.4.5 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.1.1 // indirect
github.com/Sabayon/pkgs-checker v0.8.4
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154
github.com/briandowns/spinner v1.7.0
github.com/cavaliercoder/grab v2.0.0+incompatible
github.com/crillab/gophersat v1.1.9-0.20200211102949-9a8bf7f2f0a3
github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
github.com/containerd/containerd v1.5.2
github.com/crillab/gophersat v1.3.2-0.20210701121804-72b19f5b6b38
github.com/docker/cli v20.10.7+incompatible
github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.7+incompatible
github.com/docker/go-units v0.4.0
github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/ghodss/yaml v1.0.0
github.com/go-yaml/yaml v2.1.0+incompatible // indirect
github.com/hashicorp/go-version v1.2.0
github.com/go-sql-driver/mysql v1.6.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/go-containerregistry v0.6.0
github.com/google/renameio v1.0.0
github.com/google/uuid v1.3.0 // indirect
github.com/gookit/color v1.5.0
github.com/gorilla/mux v1.7.4 // indirect
github.com/hashicorp/go-multierror v1.0.0
github.com/hashicorp/go-version v1.3.0
github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
github.com/klauspost/pgzip v1.2.1
github.com/kr/pretty v0.2.0 // indirect
github.com/klauspost/compress v1.13.0
github.com/klauspost/pgzip v1.2.5
github.com/knqyf263/go-deb-version v0.0.0-20190517075300-09fca494f03d
github.com/kyokomi/emoji v2.1.0+incompatible
github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0
github.com/mattn/go-isatty v0.0.10 // indirect
github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect
github.com/mudler/docker-companion v0.4.6-0.20191110154655-b8b364100616
github.com/onsi/ginkgo v1.10.1
github.com/onsi/gomega v1.7.0
github.com/otiai10/copy v1.0.2
github.com/pelletier/go-toml v1.6.0 // indirect
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.1
github.com/mitchellh/mapstructure v1.4.2 // indirect
github.com/moby/moby v20.10.9+incompatible
github.com/moby/sys/mount v0.2.0 // indirect
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
github.com/onsi/ginkgo v1.16.4
github.com/onsi/gomega v1.16.0
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.1
github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578
github.com/pelletier/go-toml v1.9.4 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible
github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f
github.com/pkg/errors v0.8.1
github.com/spf13/afero v1.2.2 // indirect
github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.5.0
github.com/stevenle/topsort v0.0.0-20130922064739-8130c1d7596b
go.etcd.io/bbolt v1.3.3
go.uber.org/atomic v1.5.1 // indirect
go.uber.org/multierr v1.4.0 // indirect
go.uber.org/zap v1.13.0
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876 // indirect
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f // indirect
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
golang.org/x/sys v0.0.0-20200102141924-c96a22e43c9c // indirect
golang.org/x/tools v0.0.0-20200102200121-6de373a2766c // indirect
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect
gopkg.in/yaml.v2 v2.2.7
mvdan.cc/sh/v3 v3.0.0-beta1
github.com/pkg/errors v0.9.1
github.com/pterm/pterm v0.12.32-0.20211002183613-ada9ef6790c3
github.com/rancher-sandbox/gofilecache v0.0.0-20210330135715-becdeff5df15
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/cobra v1.2.1
github.com/spf13/viper v1.8.1
github.com/theupdateframework/notary v0.7.0
go.etcd.io/bbolt v1.3.5
go.uber.org/multierr v1.6.0
go.uber.org/zap v1.17.0
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
golang.org/x/mod v0.4.2
golang.org/x/oauth2 v0.0.0-20210810183815-faf39c7919d5 // indirect
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
google.golang.org/genproto v0.0.0-20210811021853-ddbe55d93216 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/ini.v1 v1.63.2 // indirect
gopkg.in/yaml.v2 v2.4.0
helm.sh/helm/v3 v3.3.4
)

1688
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,28 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package client_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestClient(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Client API Suite")
}

131
pkg/api/client/search.go Normal file
View File

@@ -0,0 +1,131 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package client
import (
"encoding/json"
"fmt"
"strings"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/mudler/luet/pkg/api/client/utils"
)
func TreePackages(treedir string) (searchResult SearchResult, err error) {
var res []byte
res, err = utils.RunSHOUT("tree", fmt.Sprintf("luet tree pkglist --tree %s --output json", treedir))
if err != nil {
fmt.Println(string(res))
return
}
json.Unmarshal(res, &searchResult)
return
}
func imageAvailable(image string) bool {
_, err := crane.Digest(image)
return err == nil
}
type SearchResult struct {
Packages []Package
}
type Package struct {
Name, Category, Version, Path string
}
func (p Package) String() string {
return fmt.Sprintf("%s/%s@%s", p.Category, p.Name, p.Version)
}
func (p Package) Image(repository string) string {
return fmt.Sprintf("%s:%s-%s-%s", repository, p.Name, p.Category, strings.ReplaceAll(p.Version, "+", "-"))
}
func (p Package) ImageTag() string {
// ${name}-${category}-${version//+/-}
return fmt.Sprintf("%s-%s-%s", p.Name, p.Category, strings.ReplaceAll(p.Version, "+", "-"))
}
func (p Package) ImageMetadata(repository string) string {
return fmt.Sprintf("%s.metadata.yaml", p.Image(repository))
}
func (p Package) ImageAvailable(repository string) bool {
return imageAvailable(p.Image(repository))
}
func (p Package) Equal(pp Package) bool {
if p.Name == pp.Name && p.Category == pp.Category && p.Version == pp.Version {
return true
}
return false
}
func (p Package) EqualS(s string) bool {
if s == fmt.Sprintf("%s/%s", p.Category, p.Name) {
return true
}
return false
}
func (p Package) EqualSV(s string) bool {
if s == fmt.Sprintf("%s/%s@%s", p.Category, p.Name, p.Version) {
return true
}
return false
}
func (p Package) EqualNoV(pp Package) bool {
if p.Name == pp.Name && p.Category == pp.Category {
return true
}
return false
}
func (s SearchResult) FilterByCategory(cat string) SearchResult {
new := SearchResult{Packages: []Package{}}
for _, r := range s.Packages {
if r.Category == cat {
new.Packages = append(new.Packages, r)
}
}
return new
}
func (s SearchResult) FilterByName(name string) SearchResult {
new := SearchResult{Packages: []Package{}}
for _, r := range s.Packages {
if !strings.Contains(r.Name, name) {
new.Packages = append(new.Packages, r)
}
}
return new
}
type Packages []Package
func (p Packages) Exist(pp Package) bool {
for _, pi := range p {
if pp.Equal(pi) {
return true
}
}
return false
}

View File

@@ -0,0 +1,47 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package client_test
import (
. "github.com/mudler/luet/pkg/api/client"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Client CLI API", func() {
Context("Reads a package tree from the luet CLI", func() {
It("Correctly detect packages", func() {
t, err := TreePackages("../../../tests/fixtures/alpine")
Expect(err).ToNot(HaveOccurred())
Expect(t).ToNot(BeNil())
Expect(len(t.Packages)).To(Equal(1))
Expect(t.Packages[0].Name).To(Equal("alpine"))
Expect(t.Packages[0].Category).To(Equal("seed"))
Expect(t.Packages[0].Version).To(Equal("1.0"))
Expect(t.Packages[0].ImageAvailable("foo")).To(BeFalse())
Expect(t.Packages[0].Equal(t.Packages[0])).To(BeTrue())
Expect(t.Packages[0].Equal(Package{})).To(BeFalse())
Expect(t.Packages[0].EqualNoV(Package{Name: "alpine", Category: "seed"})).To(BeTrue())
Expect(t.Packages[0].EqualS("seed/alpine")).To(BeTrue())
Expect(t.Packages[0].EqualS("seed/alpinev")).To(BeFalse())
Expect(t.Packages[0].EqualSV("seed/alpine@1.0")).To(BeTrue())
Expect(t.Packages[0].Image("foo")).To(Equal("foo:alpine-seed-1.0"))
Expect(Packages(t.Packages).Exist(t.Packages[0])).To(BeTrue())
Expect(Packages(t.Packages).Exist(Package{})).To(BeFalse())
})
})
})

View File

@@ -0,0 +1,37 @@
package utils
import (
"log"
"os"
"os/exec"
"strings"
)
func RunSHOUT(stepName, bashFragment string) ([]byte, error) {
cmd := exec.Command("sh", "-s")
cmd.Stdin = strings.NewReader(bashWrap(bashFragment))
cmd.Env = os.Environ()
// log.Printf("Running in background: %v", stepName)
return cmd.CombinedOutput()
}
func RunSH(stepName, bashFragment string) error {
cmd := exec.Command("sh", "-s")
cmd.Stdin = strings.NewReader(bashWrap(bashFragment))
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Env = os.Environ()
log.Printf("Running: %v (%v)", stepName, bashFragment)
return cmd.Run()
}
func bashWrap(cmd string) string {
return `
set -o errexit
set -o nounset
` + cmd + `
`
}

118
pkg/api/core/bus/events.go Normal file
View File

@@ -0,0 +1,118 @@
package bus
import (
"fmt"
"github.com/mudler/go-pluggable"
"github.com/mudler/luet/pkg/api/core/types"
)
var (
// Package events
// EventPackageInstall is the event fired when a new package is being installed
EventPackageInstall pluggable.EventType = "package.install"
// EventPackageUnInstall is the event fired when a new package is being uninstalled
EventPackageUnInstall pluggable.EventType = "package.uninstall"
// EventPreUpgrade is the event fired before an upgrade is attempted
EventPreUpgrade pluggable.EventType = "package.pre.upgrade"
// EventPostUpgrade is the event fired after an upgrade is done
EventPostUpgrade pluggable.EventType = "package.post.upgrade"
// Package build
// EventPackagePreBuild is the event fired before a package is being built
EventPackagePreBuild pluggable.EventType = "package.pre.build"
// EventPackagePreBuildArtifact is the event fired before a package tarball is being generated
EventPackagePreBuildArtifact pluggable.EventType = "package.pre.build_artifact"
// EventPackagePostBuildArtifact is the event fired after a package tarball is generated
EventPackagePostBuildArtifact pluggable.EventType = "package.post.build_artifact"
// EventPackagePostBuild is the event fired after a package was built
EventPackagePostBuild pluggable.EventType = "package.post.build"
// Image build
// EventImagePreBuild is the event fired before a image is being built
EventImagePreBuild pluggable.EventType = "image.pre.build"
// EventImagePrePull is the event fired before a image is being pulled
EventImagePrePull pluggable.EventType = "image.pre.pull"
// EventImagePrePush is the event fired before a image is being pushed
EventImagePrePush pluggable.EventType = "image.pre.push"
// EventImagePostBuild is the event fired after an image is being built
EventImagePostBuild pluggable.EventType = "image.post.build"
// EventImagePostPull is the event fired after an image is being pulled
EventImagePostPull pluggable.EventType = "image.post.pull"
// EventImagePostPush is the event fired after an image is being pushed
EventImagePostPush pluggable.EventType = "image.post.push"
// Repository events
// EventRepositoryPreBuild is the event fired before a repository is being built
EventRepositoryPreBuild pluggable.EventType = "repository.pre.build"
// EventRepositoryPostBuild is the event fired after a repository was built
EventRepositoryPostBuild pluggable.EventType = "repository.post.build"
// Image unpack
// EventImagePreUnPack is the event fired before unpacking an image to a local dir
EventImagePreUnPack pluggable.EventType = "image.pre.unpack"
// EventImagePostUnPack is the event fired after unpacking an image to a local dir
EventImagePostUnPack pluggable.EventType = "image.post.unpack"
)
// Manager is the bus instance manager, which subscribes plugins to events emitted by Luet
var Manager *Bus = &Bus{
Manager: pluggable.NewManager(
[]pluggable.EventType{
EventPackageInstall,
EventPackageUnInstall,
EventPackagePreBuild,
EventPreUpgrade,
EventPostUpgrade,
EventPackagePreBuildArtifact,
EventPackagePostBuildArtifact,
EventPackagePostBuild,
EventRepositoryPreBuild,
EventRepositoryPostBuild,
EventImagePreBuild,
EventImagePrePull,
EventImagePrePush,
EventImagePostBuild,
EventImagePostPull,
EventImagePostPush,
EventImagePreUnPack,
EventImagePostUnPack,
},
),
}
type Bus struct {
*pluggable.Manager
}
func (b *Bus) Initialize(ctx *types.Context, plugin ...string) {
b.Manager.Load(plugin...).Register()
for _, e := range b.Manager.Events {
b.Manager.Response(e, func(p *pluggable.Plugin, r *pluggable.EventResponse) {
ctx.Debug(
"plugin_event",
"received from",
p.Name,
"at",
p.Executable,
r,
)
if r.Errored() {
err := fmt.Sprintf("Plugin %s at %s had an error: %s", p.Name, p.Executable, r.Error)
ctx.Fatal(err)
} else {
if r.State != "" {
message := fmt.Sprintf(":lollipop: Plugin %s at %s succeded, state reported:", p.Name, p.Executable)
ctx.Success(message)
ctx.Info(r.State)
}
}
})
}
}

View File

@@ -0,0 +1,108 @@
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package config
import (
"fmt"
"path/filepath"
"strings"
)
type ConfigProtectConfFile struct {
Filename string
Name string `mapstructure:"name" yaml:"name" json:"name"`
Directories []string `mapstructure:"dirs" yaml:"dirs" json:"dirs"`
}
func NewConfigProtectConfFile(filename string) *ConfigProtectConfFile {
return &ConfigProtectConfFile{
Filename: filename,
Name: "",
Directories: []string{},
}
}
func (c *ConfigProtectConfFile) String() string {
return fmt.Sprintf("[%s] filename: %s, dirs: %s", c.Name, c.Filename,
c.Directories)
}
type ConfigProtect struct {
AnnotationDir string
MapProtected map[string]bool
}
func NewConfigProtect(annotationDir string) *ConfigProtect {
if len(annotationDir) > 0 && annotationDir[0:1] != "/" {
annotationDir = "/" + annotationDir
}
return &ConfigProtect{
AnnotationDir: annotationDir,
MapProtected: make(map[string]bool),
}
}
func (c *ConfigProtect) Map(files []string, protected []ConfigProtectConfFile) {
for _, file := range files {
if file[0:1] != "/" {
file = "/" + file
}
if len(protected) > 0 {
for _, conf := range protected {
for _, dir := range conf.Directories {
// Note file is without / at begin (on unpack)
if strings.HasPrefix(file, filepath.Clean(dir)) {
// docker archive modifier works with path without / at begin.
c.MapProtected[file] = true
goto nextFile
}
}
}
}
if c.AnnotationDir != "" && strings.HasPrefix(file, filepath.Clean(c.AnnotationDir)) {
c.MapProtected[file] = true
}
nextFile:
}
}
func (c *ConfigProtect) Protected(file string) bool {
if file[0:1] != "/" {
file = "/" + file
}
_, ans := c.MapProtected[file]
return ans
}
func (c *ConfigProtect) GetProtectFiles(withSlash bool) []string {
ans := []string{}
for key := range c.MapProtected {
if withSlash {
ans = append(ans, key)
} else {
ans = append(ans, key[1:])
}
}
return ans
}

View File

@@ -0,0 +1,121 @@
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package config_test
import (
config "github.com/mudler/luet/pkg/api/core/config"
"github.com/mudler/luet/pkg/api/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Config", func() {
Context("Test config protect", func() {
It("Protect1", func() {
ctx := types.NewContext()
files := []string{
"etc/foo/my.conf",
"usr/bin/foo",
"usr/share/doc/foo.md",
}
cp := config.NewConfigProtect("/etc")
cp.Map(files, ctx.Config.ConfigProtectConfFiles)
Expect(cp.Protected("etc/foo/my.conf")).To(BeTrue())
Expect(cp.Protected("/etc/foo/my.conf")).To(BeTrue())
Expect(cp.Protected("usr/bin/foo")).To(BeFalse())
Expect(cp.Protected("/usr/bin/foo")).To(BeFalse())
Expect(cp.Protected("/usr/share/doc/foo.md")).To(BeFalse())
Expect(cp.GetProtectFiles(false)).To(Equal(
[]string{
"etc/foo/my.conf",
},
))
Expect(cp.GetProtectFiles(true)).To(Equal(
[]string{
"/etc/foo/my.conf",
},
))
})
It("Protect2", func() {
ctx := types.NewContext()
files := []string{
"etc/foo/my.conf",
"usr/bin/foo",
"usr/share/doc/foo.md",
}
cp := config.NewConfigProtect("")
cp.Map(files, ctx.Config.ConfigProtectConfFiles)
Expect(cp.Protected("etc/foo/my.conf")).To(BeFalse())
Expect(cp.Protected("/etc/foo/my.conf")).To(BeFalse())
Expect(cp.Protected("usr/bin/foo")).To(BeFalse())
Expect(cp.Protected("/usr/bin/foo")).To(BeFalse())
Expect(cp.Protected("/usr/share/doc/foo.md")).To(BeFalse())
Expect(cp.GetProtectFiles(false)).To(Equal(
[]string{},
))
Expect(cp.GetProtectFiles(true)).To(Equal(
[]string{},
))
})
It("Protect3: Annotation dir without initial slash", func() {
ctx := types.NewContext()
files := []string{
"etc/foo/my.conf",
"usr/bin/foo",
"usr/share/doc/foo.md",
}
cp := config.NewConfigProtect("etc")
cp.Map(files, ctx.Config.ConfigProtectConfFiles)
Expect(cp.Protected("etc/foo/my.conf")).To(BeTrue())
Expect(cp.Protected("/etc/foo/my.conf")).To(BeTrue())
Expect(cp.Protected("usr/bin/foo")).To(BeFalse())
Expect(cp.Protected("/usr/bin/foo")).To(BeFalse())
Expect(cp.Protected("/usr/share/doc/foo.md")).To(BeFalse())
Expect(cp.GetProtectFiles(false)).To(Equal(
[]string{
"etc/foo/my.conf",
},
))
Expect(cp.GetProtectFiles(true)).To(Equal(
[]string{
"/etc/foo/my.conf",
},
))
})
})
})

View File

@@ -0,0 +1,29 @@
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package config_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestSolver(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Config Suite")
}

168
pkg/api/core/image/cache.go Normal file
View File

@@ -0,0 +1,168 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image
import (
"encoding/json"
"os"
"strings"
"github.com/peterbourgon/diskv"
)
// Cache represents a key-value store which is capable to upgrade to disk when it
// reaches a pre-defined threshold.
type Cache struct {
store *diskv.Diskv
memory map[string]string
dir string
onDisk bool
maxmemorySize, maxItemSize int
}
// New creates a new key-value cache
// the cache acts in memory as long as the maxItemsize is not reached.
// Once the threshold is met the cache is offloaded to disk automatically,
// with a buffer of maxmemorySize into memory.
func NewCache(path string, maxmemorySize, maxItemsize int) *Cache {
disk := diskv.New(diskv.Options{
BasePath: path,
CacheSizeMax: uint64(maxmemorySize), // 500MB
})
return &Cache{
memory: make(map[string]string),
store: disk,
dir: path,
maxmemorySize: maxmemorySize,
maxItemSize: maxItemsize,
}
}
// This is needed as the disk cache is merely stored as separate files
// thus we don't want to conflict file names with the path separator.
// XXX: This is inconvenient as while we are looping result we can't rely
// anymore originally to the key name.
// We don't do any hashing to avoid any performance impact
func cleanKey(s string) string {
return strings.ReplaceAll(s, string(os.PathSeparator), "_")
}
// Count returns the items in the cache.
// If it's a disk cache might be an expensive call.
func (c *Cache) Count() int {
if !c.onDisk {
return len(c.memory)
}
count := 0
for range c.store.Keys(nil) {
count++
}
return count
}
// Get attempts to retrieve a value for a key
func (c *Cache) Get(key string) (value string, found bool) {
if !c.onDisk {
v, ok := c.memory[key]
return v, ok
}
v, err := c.store.Read(cleanKey(key))
if err == nil {
found = true
}
value = string(v)
return
}
func (c *Cache) flushToDisk() {
for k, v := range c.memory {
c.store.Write(cleanKey(k), []byte(v))
}
c.memory = make(map[string]string)
c.onDisk = true
}
// Set updates or inserts a new value
func (c *Cache) Set(key, value string) error {
if !c.onDisk && c.Count() >= c.maxItemSize && c.maxItemSize != 0 {
c.flushToDisk()
}
if c.onDisk {
return c.store.Write(cleanKey(key), []byte(value))
}
c.memory[key] = value
return nil
}
// SetValue updates or inserts a new value by marshalling it into JSON.
func (c *Cache) SetValue(key string, value interface{}) error {
dat, err := json.Marshal(value)
if err != nil {
return err
}
return c.Set(cleanKey(key), string(dat))
}
// CacheResult represent the key value result when
// iterating over the cache
type CacheResult struct {
key, value string
}
// Value returns the underlying value
func (c CacheResult) Value() string {
return c.value
}
// Key returns the cache result key
func (c CacheResult) Key() string {
return c.key
}
// Unmarshal the result into the interface. Use it to retrieve data
// set with SetValue
func (c CacheResult) Unmarshal(i interface{}) error {
return json.Unmarshal([]byte(c.Value()), i)
}
// Iterates over cache by key
func (c *Cache) All(fn func(CacheResult)) {
if !c.onDisk {
for k, v := range c.memory {
fn(CacheResult{key: k, value: v})
}
return
}
for key := range c.store.Keys(nil) {
val, _ := c.store.Read(key)
fn(CacheResult{key: key, value: string(val)})
}
}
// Clean the cache
func (c *Cache) Clean() {
c.memory = make(map[string]string)
c.onDisk = false
os.RemoveAll(c.dir)
}

View File

@@ -0,0 +1,98 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"path/filepath"
. "github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Cache", func() {
ctx := types.NewContext()
Context("used as k/v store", func() {
cache := &Cache{}
var dir string
BeforeEach(func() {
ctx = types.NewContext()
var err error
dir, err = ctx.Config.System.TempDir("foo")
Expect(err).ToNot(HaveOccurred())
cache = NewCache(dir, 10*1024*1024, 1) // 10MB Cache when upgrading to files. Max volatile memory of 1 row.
})
AfterEach(func() {
cache.Clean()
})
It("does handle automatically memory upgrade", func() {
cache.Set("foo", "bar")
v, found := cache.Get("foo")
Expect(found).To(BeTrue())
Expect(v).To(Equal("bar"))
Expect(file.Exists(filepath.Join(dir, "foo"))).To(BeFalse())
cache.Set("baz", "bar")
Expect(file.Exists(filepath.Join(dir, "foo"))).To(BeTrue())
Expect(file.Exists(filepath.Join(dir, "baz"))).To(BeTrue())
v, found = cache.Get("foo")
Expect(found).To(BeTrue())
Expect(v).To(Equal("bar"))
Expect(cache.Count()).To(Equal(2))
})
It("does CRUD", func() {
cache.Set("foo", "bar")
v, found := cache.Get("foo")
Expect(found).To(BeTrue())
Expect(v).To(Equal("bar"))
hit := false
cache.All(func(c CacheResult) {
hit = true
Expect(c.Key()).To(Equal("foo"))
Expect(c.Value()).To(Equal("bar"))
})
Expect(hit).To(BeTrue())
})
It("Unmarshals values", func() {
type testStruct struct {
Test string
}
cache.SetValue("foo", &testStruct{Test: "baz"})
n := &testStruct{}
cache.All(func(cr CacheResult) {
err := cr.Unmarshal(n)
Expect(err).ToNot(HaveOccurred())
})
Expect(n.Test).To(Equal("baz"))
})
})
})

View File

@@ -0,0 +1,97 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image
import (
"io"
"os"
containerdCompression "github.com/containerd/containerd/archive/compression"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/empty"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/pkg/errors"
)
func imageFromTar(imagename, architecture, OS string, opener func() (io.ReadCloser, error)) (name.Reference, v1.Image, error) {
newRef, err := name.ParseReference(imagename)
if err != nil {
return nil, nil, err
}
layer, err := tarball.LayerFromOpener(opener)
if err != nil {
return nil, nil, err
}
baseImage := empty.Image
cfg, err := baseImage.ConfigFile()
if err != nil {
return nil, nil, err
}
cfg.Architecture = architecture
cfg.OS = OS
baseImage, err = mutate.ConfigFile(baseImage, cfg)
if err != nil {
return nil, nil, err
}
img, err := mutate.Append(baseImage, mutate.Addendum{
Layer: layer,
History: v1.History{
CreatedBy: "luet",
Comment: "Custom image",
},
})
if err != nil {
return nil, nil, err
}
return newRef, img, nil
}
// CreateTar a imagetarball from a standard tarball
func CreateTar(srctar, dstimageTar, imagename, architecture, OS string) error {
dstFile, err := os.Create(dstimageTar)
if err != nil {
return errors.Wrap(err, "Cannot create "+dstimageTar)
}
defer dstFile.Close()
newRef, img, err := imageFromTar(imagename, architecture, OS, func() (io.ReadCloser, error) {
f, err := os.Open(srctar)
if err != nil {
return nil, errors.Wrap(err, "Cannot open "+srctar)
}
decompressed, err := containerdCompression.DecompressStream(f)
if err != nil {
return nil, errors.Wrap(err, "Cannot open "+srctar)
}
return decompressed, nil
})
if err != nil {
return err
}
// NOTE: We might also stream that back to the daemon with daemon.Write(tag, img)
return tarball.Write(newRef, img, dstFile)
}

View File

@@ -0,0 +1,80 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"os"
"path/filepath"
"runtime"
. "github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/api/core/types/artifact"
"github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Create", func() {
Context("Creates an OCI image from a standard tar", func() {
It("creates an image which is loadable", func() {
ctx := types.NewContext()
dst, err := ctx.Config.System.TempFile("dst")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(dst.Name())
srcTar, err := ctx.Config.System.TempFile("srcTar")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(srcTar.Name())
b := backend.NewSimpleDockerBackend(ctx)
b.DownloadImage(backend.Options{ImageName: "alpine"})
img, err := b.ImageReference("alpine", false)
Expect(err).ToNot(HaveOccurred())
_, dir, err := Extract(ctx, img, nil)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(dir)
Expect(file.Touch(filepath.Join(dir, "test"))).ToNot(HaveOccurred())
Expect(file.Exists(filepath.Join(dir, "bin"))).To(BeTrue())
a := artifact.NewPackageArtifact(srcTar.Name())
a.Compress(dir, 1)
// Unfortunately there is no other easy way to test this
err = CreateTar(srcTar.Name(), dst.Name(), "testimage", runtime.GOARCH, runtime.GOOS)
Expect(err).ToNot(HaveOccurred())
b.LoadImage(dst.Name())
Expect(b.ImageExists("testimage")).To(BeTrue())
img, err = b.ImageReference("testimage", false)
Expect(err).ToNot(HaveOccurred())
_, dir, err = Extract(ctx, img, nil)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(dir)
Expect(file.Exists(filepath.Join(dir, "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(dir, "test"))).To(BeTrue())
})
})
})

111
pkg/api/core/image/delta.go Normal file
View File

@@ -0,0 +1,111 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image
import (
"archive/tar"
"io"
"regexp"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate"
)
func compileRegexes(regexes []string) []*regexp.Regexp {
var result []*regexp.Regexp
for _, i := range regexes {
r, e := regexp.Compile(i)
if e != nil {
continue
}
result = append(result, r)
}
return result
}
type ImageDiffNode struct {
Name string `json:"Name"`
Size int `json:"Size"`
}
type ImageDiff struct {
Additions []ImageDiffNode `json:"Adds"`
Deletions []ImageDiffNode `json:"Dels"`
Changes []ImageDiffNode `json:"Mods"`
}
func Delta(srcimg, dstimg v1.Image) (res ImageDiff, err error) {
srcReader := mutate.Extract(srcimg)
defer srcReader.Close()
dstReader := mutate.Extract(dstimg)
defer dstReader.Close()
filesSrc, filesDst := map[string]int64{}, map[string]int64{}
srcTar := tar.NewReader(srcReader)
dstTar := tar.NewReader(dstReader)
for {
var hdr *tar.Header
hdr, err = srcTar.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return
}
filesSrc[hdr.Name] = hdr.Size
}
for {
var hdr *tar.Header
hdr, err = dstTar.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return
}
filesDst[hdr.Name] = hdr.Size
}
err = nil
for f, size := range filesDst {
if size2, exist := filesSrc[f]; exist && size2 != size {
res.Changes = append(res.Changes, ImageDiffNode{
Name: f,
Size: int(size),
})
} else if !exist {
res.Additions = append(res.Additions, ImageDiffNode{
Name: f,
Size: int(size),
})
}
}
for f, size := range filesSrc {
if _, exist := filesDst[f]; !exist {
res.Deletions = append(res.Deletions, ImageDiffNode{
Name: f,
Size: int(size),
})
}
}
return
}

View File

@@ -0,0 +1,134 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
daemon "github.com/google/go-containerregistry/pkg/v1/daemon"
. "github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Delta", func() {
Context("Generates deltas of images", func() {
It("computes delta", func() {
ref, err := name.ParseReference("alpine")
Expect(err).ToNot(HaveOccurred())
img, err := daemon.Image(ref)
Expect(err).ToNot(HaveOccurred())
layers, err := Delta(img, img)
Expect(err).ToNot(HaveOccurred())
Expect(len(layers.Changes)).To(Equal(0))
Expect(len(layers.Additions)).To(Equal(0))
Expect(len(layers.Deletions)).To(Equal(0))
})
Context("ExtractDeltaFiles", func() {
ctx := types.NewContext()
var tmpfile *os.File
var ref, ref2 name.Reference
var img, img2 v1.Image
var err error
ref, _ = name.ParseReference("alpine")
ref2, _ = name.ParseReference("golang:alpine")
img, _ = daemon.Image(ref)
img2, _ = daemon.Image(ref2)
BeforeEach(func() {
ctx = types.NewContext()
tmpfile, err = ioutil.TempFile("", "delta")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpfile.Name()) // clean up
})
It("Extract all deltas", func() {
f, err := ExtractDeltaAdditionsFiles(ctx, img, []string{}, []string{})
Expect(err).ToNot(HaveOccurred())
_, tmpdir, err := Extract(
ctx,
img2,
f,
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "home"))).To(BeFalse())
Expect(file.Exists(filepath.Join(tmpdir, "root", ".cache"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeFalse())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go", "bin"))).To(BeTrue())
})
It("Extract deltas and excludes /usr/local/go", func() {
f, err := ExtractDeltaAdditionsFiles(ctx, img, []string{}, []string{"usr/local/go"})
Expect(err).ToNot(HaveOccurred())
Expect(err).ToNot(HaveOccurred())
_, tmpdir, err := Extract(
ctx,
img2,
f,
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go"))).To(BeFalse())
})
It("Extract deltas and excludes /usr/local/go/bin, but includes /usr/local/go", func() {
f, err := ExtractDeltaAdditionsFiles(ctx, img, []string{"usr/local/go"}, []string{"usr/local/go/bin"})
Expect(err).ToNot(HaveOccurred())
_, tmpdir, err := Extract(
ctx,
img2,
f,
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go", "bin"))).To(BeFalse())
})
It("Extract deltas and includes /usr/local/go", func() {
f, err := ExtractDeltaAdditionsFiles(ctx, img, []string{"usr/local/go"}, []string{})
Expect(err).ToNot(HaveOccurred())
_, tmpdir, err := Extract(
ctx,
img2,
f,
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "local", "go"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "root", ".cache"))).To(BeFalse())
})
})
})
})

View File

@@ -0,0 +1,227 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image
import (
"archive/tar"
"context"
"io"
"os"
"path/filepath"
"strings"
containerdarchive "github.com/containerd/containerd/archive"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/pkg/errors"
)
// ExtractDeltaAdditionsFromImages is a filter that takes two images
// an includes and an excludes list. It computes the delta between the images
// considering the added files only, and applies a filter on them based on the regexes
// in the lists.
func ExtractDeltaAdditionsFiles(
ctx *types.Context,
srcimg v1.Image,
includes []string, excludes []string,
) (func(h *tar.Header) (bool, error), error) {
includeRegexp := compileRegexes(includes)
excludeRegexp := compileRegexes(excludes)
srcfilesd, err := ctx.Config.System.TempDir("srcfiles")
if err != nil {
return nil, err
}
filesSrc := NewCache(srcfilesd, 50*1024*1024, 10000)
srcReader := mutate.Extract(srcimg)
defer srcReader.Close()
srcTar := tar.NewReader(srcReader)
for {
var hdr *tar.Header
hdr, err := srcTar.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return nil, err
}
switch hdr.Typeflag {
case tar.TypeDir:
filesSrc.Set(filepath.Dir(hdr.Name), "")
default:
filesSrc.Set(hdr.Name, "")
}
}
return func(h *tar.Header) (bool, error) {
fileName := filepath.Join(string(os.PathSeparator), h.Name)
_, exists := filesSrc.Get(h.Name)
if exists {
return false, nil
}
switch {
case len(includes) == 0 && len(excludes) != 0:
for _, i := range excludeRegexp {
if i.MatchString(filepath.Join(string(os.PathSeparator), h.Name)) &&
fileName == filepath.Join(string(os.PathSeparator), h.Name) {
return false, nil
}
}
ctx.Debug("Adding name", fileName)
return true, nil
case len(includes) > 0 && len(excludes) == 0:
for _, i := range includeRegexp {
if i.MatchString(filepath.Join(string(os.PathSeparator), h.Name)) && fileName == filepath.Join(string(os.PathSeparator), h.Name) {
ctx.Debug("Adding name", fileName)
return true, nil
}
}
return false, nil
case len(includes) != 0 && len(excludes) != 0:
for _, i := range includeRegexp {
if i.MatchString(filepath.Join(string(os.PathSeparator), h.Name)) && fileName == filepath.Join(string(os.PathSeparator), h.Name) {
for _, e := range excludeRegexp {
if e.MatchString(fileName) {
return false, nil
}
}
ctx.Debug("Adding name", fileName)
return true, nil
}
}
return false, nil
default:
ctx.Debug("Adding name", fileName)
return true, nil
}
}, nil
}
// ExtractFiles returns a filter that extracts files from the given path (if not empty)
// It then filters files by an include and exclude list.
// The list can be regexes
func ExtractFiles(
ctx *types.Context,
prefixPath string,
includes []string, excludes []string,
) func(h *tar.Header) (bool, error) {
includeRegexp := compileRegexes(includes)
excludeRegexp := compileRegexes(excludes)
return func(h *tar.Header) (bool, error) {
fileName := filepath.Join(string(os.PathSeparator), h.Name)
switch {
case len(includes) == 0 && len(excludes) != 0:
for _, i := range excludeRegexp {
if i.MatchString(filepath.Join(prefixPath, fileName)) {
return false, nil
}
}
if prefixPath != "" {
return strings.HasPrefix(fileName, prefixPath), nil
}
ctx.Debug("Adding name", fileName)
return true, nil
case len(includes) > 0 && len(excludes) == 0:
for _, i := range includeRegexp {
if i.MatchString(filepath.Join(prefixPath, fileName)) {
if prefixPath != "" {
return strings.HasPrefix(fileName, prefixPath), nil
}
ctx.Debug("Adding name", fileName)
return true, nil
}
}
return false, nil
case len(includes) != 0 && len(excludes) != 0:
for _, i := range includeRegexp {
if i.MatchString(filepath.Join(prefixPath, fileName)) {
for _, e := range excludeRegexp {
if e.MatchString(filepath.Join(prefixPath, fileName)) {
return false, nil
}
}
if prefixPath != "" {
return strings.HasPrefix(fileName, prefixPath), nil
}
ctx.Debug("Adding name", fileName)
return true, nil
}
}
return false, nil
default:
if prefixPath != "" {
return strings.HasPrefix(fileName, prefixPath), nil
}
return true, nil
}
}
}
// ExtractReader perform the extracting action over the io.ReadCloser
// it extracts the files over output. Accepts a filter as an option
// and additional containerd Options
func ExtractReader(ctx *types.Context, reader io.ReadCloser, output string, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
defer reader.Close()
// If no filter is specified, grab all.
if filter == nil {
filter = func(h *tar.Header) (bool, error) { return true, nil }
}
opts = append(opts, containerdarchive.WithFilter(filter))
// Handle the extraction
c, err := containerdarchive.Apply(context.Background(), output, reader, opts...)
if err != nil {
return 0, "", err
}
return c, output, nil
}
// Extract is just syntax sugar around ExtractReader. It extracts an image into a dir
func Extract(ctx *types.Context, img v1.Image, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
tmpdiffs, err := ctx.Config.GetSystem().TempDir("extraction")
if err != nil {
return 0, "", errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
return ExtractReader(ctx, mutate.Extract(img), tmpdiffs, filter, opts...)
}
// ExtractTo is just syntax sugar around ExtractReader
func ExtractTo(ctx *types.Context, img v1.Image, output string, filter func(h *tar.Header) (bool, error), opts ...containerdarchive.ApplyOpt) (int64, string, error) {
return ExtractReader(ctx, mutate.Extract(img), output, filter, opts...)
}

View File

@@ -0,0 +1,111 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
daemon "github.com/google/go-containerregistry/pkg/v1/daemon"
. "github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Extract", func() {
Context("extract files from images", func() {
Context("ExtractFiles", func() {
ctx := types.NewContext()
var tmpfile *os.File
var ref name.Reference
var img v1.Image
var err error
BeforeEach(func() {
ctx = types.NewContext()
tmpfile, err = ioutil.TempFile("", "extract")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpfile.Name()) // clean up
ref, err = name.ParseReference("alpine")
Expect(err).ToNot(HaveOccurred())
img, err = daemon.Image(ref)
Expect(err).ToNot(HaveOccurred())
})
It("Extract all files", func() {
_, tmpdir, err := Extract(
ctx,
img,
ExtractFiles(ctx, "", []string{}, []string{}),
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeTrue())
})
It("Extract specific dir", func() {
_, tmpdir, err := Extract(
ctx,
img,
ExtractFiles(ctx, "/usr", []string{}, []string{}),
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "sbin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeFalse())
})
It("Extract a dir with includes/excludes", func() {
_, tmpdir, err := Extract(
ctx,
img,
ExtractFiles(ctx, "/usr", []string{"bin"}, []string{"sbin"}),
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeFalse())
Expect(file.Exists(filepath.Join(tmpdir, "usr", "sbin"))).To(BeFalse())
})
It("Extract with includes/excludes", func() {
_, tmpdir, err := Extract(
ctx,
img,
ExtractFiles(ctx, "", []string{"/usr|/usr/bin"}, []string{"^/bin"}),
)
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
Expect(file.Exists(filepath.Join(tmpdir, "usr", "bin"))).To(BeTrue())
Expect(file.Exists(filepath.Join(tmpdir, "bin", "sh"))).To(BeFalse())
})
})
})
})

View File

@@ -0,0 +1,34 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package image_test
import (
"testing"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler/backend"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestMutator(t *testing.T) {
RegisterFailHandler(Fail)
b := backend.NewSimpleDockerBackend(types.NewContext())
b.DownloadImage(backend.Options{ImageName: "alpine"})
b.DownloadImage(backend.Options{ImageName: "golang:alpine"})
RunSpecs(t, "Mutator API Suite")
}

View File

@@ -0,0 +1,604 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact
import (
"archive/tar"
"bufio"
"bytes"
"crypto/sha1"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"github.com/docker/docker/pkg/pools"
v1 "github.com/google/go-containerregistry/pkg/v1"
zstd "github.com/klauspost/compress/zstd"
gzip "github.com/klauspost/pgzip"
//"strconv"
"strings"
containerdCompression "github.com/containerd/containerd/archive/compression"
bus "github.com/mudler/luet/pkg/api/core/bus"
config "github.com/mudler/luet/pkg/api/core/config"
"github.com/mudler/luet/pkg/api/core/image"
types "github.com/mudler/luet/pkg/api/core/types"
backend "github.com/mudler/luet/pkg/compiler/backend"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
"github.com/mudler/luet/pkg/helpers"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
// When compiling, we write also a fingerprint.metadata.yaml file with PackageArtifact. In this way we can have another command to create the repository
// which will consist in just of an repository.yaml which is just the repository structure with the list of package artifact.
// In this way a generic client can fetch the packages and, after unpacking the tree, performing queries to install packages.
type PackageArtifact struct {
Path string `json:"path"`
Dependencies []*PackageArtifact `json:"dependencies"`
CompileSpec *compilerspec.LuetCompilationSpec `json:"compilationspec"`
Checksums Checksums `json:"checksums"`
SourceAssertion solver.PackagesAssertions `json:"-"`
CompressionType compression.Implementation `json:"compressiontype"`
Files []string `json:"files"`
PackageCacheImage string `json:"package_cacheimage"`
Runtime *pkg.DefaultPackage `json:"runtime,omitempty"`
}
func ImageToArtifact(ctx *types.Context, img v1.Image, t compression.Implementation, output string, filter func(h *tar.Header) (bool, error)) (*PackageArtifact, error) {
_, tmpdiffs, err := image.Extract(ctx, img, filter)
if err != nil {
return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(tmpdiffs) // clean up
a := NewPackageArtifact(output)
a.CompressionType = t
err = a.Compress(tmpdiffs, 1)
if err != nil {
return nil, errors.Wrap(err, "Error met while creating package archive")
}
return a, nil
}
func (p *PackageArtifact) ShallowCopy() *PackageArtifact {
copy := *p
return &copy
}
func NewPackageArtifact(path string) *PackageArtifact {
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: compression.None}
}
func NewPackageArtifactFromYaml(data []byte) (*PackageArtifact, error) {
p := &PackageArtifact{Checksums: Checksums{}}
err := yaml.Unmarshal(data, &p)
if err != nil {
return p, err
}
return p, err
}
func (a *PackageArtifact) Hash() error {
return a.Checksums.Generate(a)
}
func (a *PackageArtifact) Verify() error {
sum := Checksums{}
if err := sum.Generate(a); err != nil {
return err
}
if err := sum.Compare(a.Checksums); err != nil {
return err
}
return nil
}
func (a *PackageArtifact) WriteYAML(dst string) error {
// First compute checksum of artifact. When we write the yaml we want to write up-to-date informations.
err := a.Hash()
if err != nil {
return errors.Wrap(err, "Failed generating checksums for artifact")
}
// Update runtime package information
if a.CompileSpec != nil && a.CompileSpec.Package != nil {
runtime, err := a.CompileSpec.Package.GetRuntimePackage()
if err != nil {
return errors.Wrapf(err, "getting runtime package for '%s'", a.CompileSpec.Package.HumanReadableString())
}
a.Runtime = runtime
}
data, err := yaml.Marshal(a)
if err != nil {
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
}
bus.Manager.Publish(bus.EventPackagePreBuildArtifact, a)
mangle, err := NewPackageArtifactFromYaml(data)
if err != nil {
return errors.Wrap(err, "Generated invalid artifact")
}
//p := a.CompileSpec.GetPackage().GetPath()
mangle.CompileSpec.GetPackage().SetPath("")
for _, ass := range mangle.CompileSpec.GetSourceAssertion() {
ass.Package.SetPath("")
}
data, err = yaml.Marshal(mangle)
if err != nil {
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
}
err = ioutil.WriteFile(filepath.Join(dst, a.CompileSpec.GetPackage().GetMetadataFilePath()), data, os.ModePerm)
if err != nil {
return errors.Wrap(err, "While writing PackageArtifact YAML")
}
//a.CompileSpec.GetPackage().SetPath(p)
bus.Manager.Publish(bus.EventPackagePostBuildArtifact, a)
return nil
}
func (a *PackageArtifact) GetFileName() string {
return path.Base(a.Path)
}
// CreateArtifactForFile creates a new artifact from the given file
func CreateArtifactForFile(ctx *types.Context, s string, opts ...func(*PackageArtifact)) (*PackageArtifact, error) {
if _, err := os.Stat(s); os.IsNotExist(err) {
return nil, errors.Wrap(err, "artifact path doesn't exist")
}
fileName := path.Base(s)
archive, err := ctx.Config.GetSystem().TempDir("archive")
if err != nil {
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
}
defer os.RemoveAll(archive) // clean up
dst := filepath.Join(archive, fileName)
if err := fileHelper.CopyFile(s, dst); err != nil {
return nil, errors.Wrapf(err, "error while copying %s to %s", s, dst)
}
artifact, err := ctx.Config.GetSystem().TempDir("artifact")
if err != nil {
return nil, errors.Wrap(err, "error met while creating tempdir for "+s)
}
a := &PackageArtifact{Path: filepath.Join(artifact, fileName)}
for _, o := range opts {
o(a)
}
return a, a.Compress(archive, 1)
}
type ImageBuilder interface {
BuildImage(backend.Options) error
LoadImage(path string) error
}
// GenerateFinalImage takes an artifact and builds a Docker image with its content
func (a *PackageArtifact) GenerateFinalImage(ctx *types.Context, imageName string, b ImageBuilder, keepPerms bool) error {
tempimage, err := ctx.Config.GetSystem().TempFile("tempimage")
if err != nil {
return errors.Wrap(err, "error met while creating tempdir for "+a.Path)
}
defer os.RemoveAll(tempimage.Name()) // clean up
if err := image.CreateTar(a.Path, tempimage.Name(), imageName, runtime.GOARCH, runtime.GOOS); err != nil {
return errors.Wrap(err, "could not create image from tar")
}
if err := b.LoadImage(tempimage.Name()); err != nil {
return errors.Wrap(err, "while loading image")
}
return nil
}
// Compress is responsible to archive and compress to the artifact Path.
// It accepts a source path, which is the content to be archived/compressed
// and a concurrency parameter.
func (a *PackageArtifact) Compress(src string, concurrency int) error {
switch a.CompressionType {
case compression.Zstandard:
err := helpers.Tar(src, a.Path)
if err != nil {
return err
}
original, err := os.Open(a.Path)
if err != nil {
return err
}
defer original.Close()
zstdFile := a.getCompressedName()
bufferedReader := bufio.NewReader(original)
// Open a file for writing.
dst, err := os.Create(zstdFile)
if err != nil {
return err
}
enc, err := zstd.NewWriter(dst)
if err != nil {
return err
}
_, err = io.Copy(enc, bufferedReader)
if err != nil {
enc.Close()
return err
}
if err := enc.Close(); err != nil {
return err
}
os.RemoveAll(a.Path) // Remove original
// Debug("Removed artifact", a.Path)
a.Path = zstdFile
return nil
case compression.GZip:
err := helpers.Tar(src, a.Path)
if err != nil {
return err
}
original, err := os.Open(a.Path)
if err != nil {
return err
}
defer original.Close()
gzipfile := a.getCompressedName()
bufferedReader := bufio.NewReader(original)
// Open a file for writing.
dst, err := os.Create(gzipfile)
if err != nil {
return err
}
// Create gzip writer.
w := gzip.NewWriter(dst)
w.SetConcurrency(1<<20, concurrency)
defer w.Close()
defer dst.Close()
_, err = io.Copy(w, bufferedReader)
if err != nil {
return err
}
w.Close()
os.RemoveAll(a.Path) // Remove original
// Debug("Removed artifact", a.Path)
// a.CompressedPath = gzipfile
a.Path = gzipfile
return nil
//a.Path = gzipfile
// Defaults to tar only (covers when "none" is supplied)
default:
return helpers.Tar(src, a.getCompressedName())
}
}
func (a *PackageArtifact) getCompressedName() string {
switch a.CompressionType {
case compression.Zstandard:
return a.Path + ".zst"
case compression.GZip:
return a.Path + ".gz"
}
return a.Path
}
// GetUncompressedName returns the artifact path without the extension suffix
func (a *PackageArtifact) GetUncompressedName() string {
switch a.CompressionType {
case compression.Zstandard, compression.GZip:
return strings.TrimSuffix(a.Path, filepath.Ext(a.Path))
}
return a.Path
}
func hashContent(bv []byte) string {
hasher := sha1.New()
hasher.Write(bv)
sha := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
return sha
}
func hashFileContent(path string) (string, error) {
f, err := os.Open(path)
if err != nil {
return "", err
}
defer f.Close()
h := sha1.New()
if _, err := io.Copy(h, f); err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
}
func replaceFileTarWrapper(dst string, inputTarStream io.ReadCloser, mods []string, fn func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error)) io.ReadCloser {
pipeReader, pipeWriter := io.Pipe()
go func() {
tarReader := tar.NewReader(inputTarStream)
tarWriter := tar.NewWriter(pipeWriter)
defer inputTarStream.Close()
defer tarWriter.Close()
modify := func(name string, original *tar.Header, tarReader io.Reader) error {
header, data, err := fn(dst, name, original, tarReader)
switch {
case err != nil:
return err
case header == nil:
return nil
}
if header.Name == "" {
header.Name = name
}
header.Size = int64(len(data))
if err := tarWriter.WriteHeader(header); err != nil {
return err
}
if len(data) != 0 {
if _, err := tarWriter.Write(data); err != nil {
return err
}
}
return nil
}
// var remaining []string
var err error
var originalHeader *tar.Header
for {
originalHeader, err = tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
pipeWriter.CloseWithError(err)
return
}
if !helpers.Contains(mods, originalHeader.Name) {
// No modifiers for this file, copy the header and data
if err := tarWriter.WriteHeader(originalHeader); err != nil {
pipeWriter.CloseWithError(err)
return
}
if _, err := pools.Copy(tarWriter, tarReader); err != nil {
pipeWriter.CloseWithError(err)
return
}
continue
}
if err := modify(originalHeader.Name, originalHeader, tarReader); err != nil {
pipeWriter.CloseWithError(err)
return
}
}
// Apply the modifiers that haven't matched any files in the archive
pipeWriter.Close()
}()
return pipeReader
}
func tarModifierWrapperFunc(ctx *types.Context) func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
return func(dst, path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) {
// If the destination path already exists I rename target file name with postfix.
var destPath string
// Read data. TODO: We need change archive callback to permit to return a Reader
buffer := bytes.Buffer{}
if content != nil {
if _, err := buffer.ReadFrom(content); err != nil {
return nil, nil, err
}
}
tarHash := hashContent(buffer.Bytes())
// If file is not present on archive but is defined on mods
// I receive the callback. Prevent nil exception.
if header != nil {
switch header.Typeflag {
case tar.TypeReg:
destPath = filepath.Join(dst, path)
default:
// Nothing to do. I return original reader
return header, buffer.Bytes(), nil
}
existingHash := ""
f, err := os.Lstat(destPath)
if err == nil {
ctx.Debug("File exists already, computing hash for", destPath)
hash, herr := hashFileContent(destPath)
if herr == nil {
existingHash = hash
}
}
ctx.Debug(destPath, "- existing file hash: ", existingHash, "Tar file hashsum: ", tarHash)
if fileHelper.Exists(destPath) {
ctx.Debug(destPath, "already exists")
}
// We want to protect file only if the hash of the files are differing OR the file size are
differs := (existingHash != "" && existingHash != tarHash) || (err != nil && f != nil && header.Size != f.Size())
// Check if exists
if fileHelper.Exists(destPath) && differs {
ctx.Debug(destPath, "already exists and differs")
for i := 1; i < 1000; i++ {
name := filepath.Join(filepath.Join(filepath.Dir(path),
fmt.Sprintf("._cfg%04d_%s", i, filepath.Base(path))))
if fileHelper.Exists(name) {
continue
}
ctx.Info(fmt.Sprintf("Found protected file %s. Creating %s.", destPath,
filepath.Join(dst, name)))
return &tar.Header{
Mode: header.Mode,
Typeflag: header.Typeflag,
PAXRecords: header.PAXRecords,
Name: name,
}, buffer.Bytes(), nil
}
}
}
return header, buffer.Bytes(), nil
}
}
func (a *PackageArtifact) GetProtectFiles(ctx *types.Context) (res []string) {
annotationDir := ""
if !ctx.Config.ConfigProtectSkip {
// a.CompileSpec could be nil when artifact.Unpack is used for tree tarball
if a.CompileSpec != nil &&
a.CompileSpec.GetPackage().HasAnnotation(string(pkg.ConfigProtectAnnnotation)) {
dir, ok := a.CompileSpec.GetPackage().GetAnnotations()[string(pkg.ConfigProtectAnnnotation)]
if ok {
annotationDir = dir
}
}
// TODO: check if skip this if we have a.CompileSpec nil
cp := config.NewConfigProtect(annotationDir)
cp.Map(a.Files, ctx.Config.GetConfigProtectConfFiles())
// NOTE: for unpack we need files path without initial /
res = cp.GetProtectFiles(false)
}
return
}
// Unpack Untar and decompress (TODO) to the given path
func (a *PackageArtifact) Unpack(ctx *types.Context, dst string, keepPerms bool) error {
if !strings.HasPrefix(dst, string(os.PathSeparator)) {
return errors.New("destination must be an absolute path")
}
// Create
protectedFiles := a.GetProtectFiles(ctx)
mod := tarModifierWrapperFunc(ctx)
//tarModifier := helpers.NewTarModifierWrapper(dst, mod)
archiveFile, err := os.Open(a.Path)
if err != nil {
return errors.Wrap(err, "Cannot open "+a.Path)
}
defer archiveFile.Close()
decompressed, err := containerdCompression.DecompressStream(archiveFile)
if err != nil {
return errors.Wrap(err, "Cannot open "+a.Path)
}
replacerArchive := replaceFileTarWrapper(dst, decompressed, protectedFiles, mod)
// or with filter?
// func(header *tar.Header) (bool, error) {
// if helpers.Contains(protectedFiles, header.Name) {
// newHead, _, err := mod(dst, header.Name, header, decompressed)
// if err != nil {
// return false, err
// }
// header.Name = newHead.Name
// // Override target path
// //target = filepath.Join(dest, header.Name)
// }
// // tarModifier.Modifier()
// return true, nil
// },
_, _, err = image.ExtractReader(ctx, replacerArchive, dst, nil)
return err
}
// FileList generates the list of file of a package from the local archive
func (a *PackageArtifact) FileList() ([]string, error) {
var files []string
archiveFile, err := os.Open(a.Path)
if err != nil {
return files, errors.Wrap(err, "Cannot open "+a.Path)
}
defer archiveFile.Close()
decompressed, err := containerdCompression.DecompressStream(archiveFile)
if err != nil {
return files, errors.Wrap(err, "Cannot open "+a.Path)
}
defer decompressed.Close()
tr := tar.NewReader(decompressed)
// untar each segment
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return []string{}, err
}
// determine proper file path info
finfo := hdr.FileInfo()
fileName := hdr.Name
if finfo.Mode().IsDir() {
continue
}
files = append(files, fileName)
// if a dir, create it, then go to next segment
}
return files, nil
}

View File

@@ -0,0 +1,28 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestArtifact(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Artifact Suite")
}

View File

@@ -0,0 +1,233 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact_test
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types"
. "github.com/mudler/luet/pkg/api/core/types/artifact"
. "github.com/mudler/luet/pkg/compiler/backend"
backend "github.com/mudler/luet/pkg/compiler/backend"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/compiler/types/options"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
. "github.com/mudler/luet/pkg/compiler"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Artifact", func() {
Context("Simple package build definition", func() {
ctx := types.NewContext()
It("Generates a verified delta", func() {
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../../../../tests/fixtures/buildtree")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
cc := NewLuetCompiler(nil, generalRecipe.GetDatabase(), options.WithContext(types.NewContext()))
lspec, err := cc.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
Expect(err).ToNot(HaveOccurred())
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
Expect(lspec.Image).To(Equal("luet/base"))
Expect(lspec.Seed).To(Equal("alpine"))
tmpdir, err := ioutil.TempDir(os.TempDir(), "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpdir2, err := ioutil.TempDir(os.TempDir(), "tree2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir2) // clean up
unpacked, err := ioutil.TempDir(os.TempDir(), "unpacked")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(unpacked) // clean up
rootfs, err := ioutil.TempDir(os.TempDir(), "rootfs")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(rootfs) // clean up
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM alpine
COPY . /luetbuild
WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin`))
b := NewSimpleDockerBackend(ctx)
opts := backend.Options{
ImageName: "luet/base",
SourcePath: tmpdir,
DockerFileName: "Dockerfile",
Destination: filepath.Join(tmpdir2, "output1.tar"),
}
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM luet/base
COPY . /luetbuild
WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin
RUN echo foo > /test
RUN echo bar > /test2`))
opts2 := backend.Options{
ImageName: "test",
SourcePath: tmpdir,
DockerFileName: "LuetDockerfile",
Destination: filepath.Join(tmpdir, "output2.tar"),
}
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
})
It("Generates packages images", func() {
b := NewSimpleDockerBackend(ctx)
imageprefix := "foo/"
testString := []byte(`funky test data`)
tmpdir, err := ioutil.TempDir(os.TempDir(), "artifact")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpWork, err := ioutil.TempDir(os.TempDir(), "artifact2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpWork) // clean up
Expect(os.MkdirAll(filepath.Join(tmpdir, "foo", "bar"), os.ModePerm)).ToNot(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(tmpdir, "test"), testString, 0644)
Expect(err).ToNot(HaveOccurred())
err = ioutil.WriteFile(filepath.Join(tmpdir, "foo", "bar", "test"), testString, 0644)
Expect(err).ToNot(HaveOccurred())
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
err = a.Compress(tmpdir, 1)
Expect(err).ToNot(HaveOccurred())
resultingImage := imageprefix + "foo--1.0"
err = a.GenerateFinalImage(ctx, resultingImage, b, false)
Expect(err).ToNot(HaveOccurred())
Expect(b.ImageExists(resultingImage)).To(BeTrue())
result, err := ioutil.TempDir(os.TempDir(), "result")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(result) // clean up
img, err := b.ImageReference(resultingImage, true)
Expect(err).ToNot(HaveOccurred())
_, _, err = image.ExtractTo(
ctx,
img,
result,
nil,
)
Expect(err).ToNot(HaveOccurred())
content, err := ioutil.ReadFile(filepath.Join(result, "test"))
Expect(err).ToNot(HaveOccurred())
Expect(content).To(Equal(testString))
content, err = ioutil.ReadFile(filepath.Join(result, "foo", "bar", "test"))
Expect(err).ToNot(HaveOccurred())
Expect(content).To(Equal(testString))
})
It("Generates empty packages images", func() {
b := NewSimpleDockerBackend(ctx)
imageprefix := "foo/"
tmpdir, err := ioutil.TempDir(os.TempDir(), "artifact")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpWork, err := ioutil.TempDir(os.TempDir(), "artifact2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpWork) // clean up
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Version: "1.0"}}
err = a.Compress(tmpdir, 1)
Expect(err).ToNot(HaveOccurred())
resultingImage := imageprefix + "foo--1.0"
err = a.GenerateFinalImage(ctx, resultingImage, b, false)
Expect(err).ToNot(HaveOccurred())
Expect(b.ImageExists(resultingImage)).To(BeTrue())
result, err := ioutil.TempDir(os.TempDir(), "result")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(result) // clean up
img, err := b.ImageReference(resultingImage, false)
Expect(err).ToNot(HaveOccurred())
_, _, err = image.ExtractTo(
ctx,
img,
result,
nil,
)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.DirectoryIsEmpty(result)).To(BeTrue())
})
It("Retrieves uncompressed name", func() {
a := NewPackageArtifact("foo.tar.gz")
a.CompressionType = (compression.GZip)
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
a = NewPackageArtifact("foo.tar.zst")
a.CompressionType = compression.Zstandard
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
a = NewPackageArtifact("foo.tar")
a.CompressionType = compression.None
Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
})
})
})

View File

@@ -0,0 +1,63 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact
import (
"crypto/sha512"
"fmt"
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/rancher-sandbox/gofilecache"
)
type ArtifactCache struct {
gofilecache.Cache
}
func NewCache(dir string) *ArtifactCache {
return &ArtifactCache{Cache: *gofilecache.InitCache(dir)}
}
func (c *ArtifactCache) cacheID(a *PackageArtifact) [64]byte {
fingerprint := filepath.Base(a.Path)
if a.CompileSpec != nil && a.CompileSpec.Package != nil {
fingerprint = a.CompileSpec.Package.GetFingerPrint()
}
if len(a.Checksums) > 0 {
for _, cs := range a.Checksums.List() {
t := cs[0]
result := cs[1]
fingerprint += fmt.Sprintf("+%s:%s", t, result)
}
}
return sha512.Sum512([]byte(fingerprint))
}
func (c *ArtifactCache) Get(a *PackageArtifact) (string, error) {
fileName, _, err := c.Cache.GetFile(c.cacheID(a))
return fileName, err
}
func (c *ArtifactCache) Put(a *PackageArtifact) (gofilecache.OutputID, int64, error) {
file, err := os.Open(a.Path)
if err != nil {
return [64]byte{}, 0, errors.Wrapf(err, "failed opening %s", a.Path)
}
defer file.Close()
return c.Cache.Put(c.cacheID(a), file)
}

View File

@@ -0,0 +1,90 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package artifact_test
import (
"io/ioutil"
"os"
"path/filepath"
types "github.com/mudler/luet/pkg/api/core/types"
. "github.com/mudler/luet/pkg/api/core/types/artifact"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Cache", func() {
Context("CacheID", func() {
It("Get and retrieve files", func() {
tmpdir, err := ioutil.TempDir(os.TempDir(), "test")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpdirartifact, err := ioutil.TempDir(os.TempDir(), "testartifact")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdirartifact) // clean up
err = ioutil.WriteFile(filepath.Join(tmpdirartifact, "foo"), []byte(string("foo")), os.ModePerm)
Expect(err).ToNot(HaveOccurred())
a := NewPackageArtifact(filepath.Join(tmpdir, "foo.tar.gz"))
err = a.Compress(tmpdirartifact, 1)
Expect(err).ToNot(HaveOccurred())
cache := NewCache(tmpdir)
// Put an artifact in the cache and retrieve it later
// the artifact is NOT hashed so it is referenced just by the path in the cache
_, _, err = cache.Put(a)
Expect(err).ToNot(HaveOccurred())
path, err := cache.Get(a)
Expect(err).ToNot(HaveOccurred())
b := NewPackageArtifact(path)
ctx := types.NewContext()
err = b.Unpack(ctx, tmpdir, false)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(tmpdir, "foo"))).To(BeTrue())
bb, err := ioutil.ReadFile(filepath.Join(tmpdir, "foo"))
Expect(err).ToNot(HaveOccurred())
Expect(string(bb)).To(Equal("foo"))
// After the artifact is hashed, the fingerprint mutates so the cache doesn't see it hitting again
// the test we did above fails as we expect to.
a.Hash()
_, err = cache.Get(a)
Expect(err).To(HaveOccurred())
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "bar"}}
_, _, err = cache.Put(a)
Expect(err).ToNot(HaveOccurred())
c := NewPackageArtifact(filepath.Join(tmpdir, "foo.tar.gz"))
c.Hash()
c.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &pkg.DefaultPackage{Name: "foo", Category: "bar"}}
_, err = cache.Get(c)
Expect(err).ToNot(HaveOccurred())
})
})
})

View File

@@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler
package artifact
import (
@@ -24,6 +24,7 @@ import (
"hash"
"io"
"os"
"sort"
// . "github.com/mudler/luet/pkg/logger"
"github.com/pkg/errors"
@@ -42,8 +43,20 @@ type HashOptions struct {
Type HashImplementation
}
func (c Checksums) List() (res [][]string) {
keys := make([]string, 0)
for k, _ := range c {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
res = append(res, []string{k, c[k]})
}
return
}
// Generate generates all Checksums supported for the artifact
func (c *Checksums) Generate(a Artifact) error {
func (c *Checksums) Generate(a *PackageArtifact) error {
return c.generateSHA256(a)
}
@@ -56,13 +69,13 @@ func (c Checksums) Compare(d Checksums) error {
return nil
}
func (c *Checksums) generateSHA256(a Artifact) error {
func (c *Checksums) generateSHA256(a *PackageArtifact) error {
return c.generateSum(a, HashOptions{Hasher: sha256.New(), Type: SHA256})
}
func (c *Checksums) generateSum(a Artifact, opts HashOptions) error {
func (c *Checksums) generateSum(a *PackageArtifact, opts HashOptions) error {
f, err := os.Open(a.GetPath())
f, err := os.Open(a.Path)
if err != nil {
return err
}

View File

@@ -13,13 +13,13 @@
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler_test
package artifact_test
import (
"io/ioutil"
"os"
. "github.com/mudler/luet/pkg/compiler"
. "github.com/mudler/luet/pkg/api/core/types/artifact"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -40,13 +40,13 @@ var _ = Describe("Checksum", func() {
Expect(len(definitionsum)).To(Equal(0))
Expect(len(definitionsum2)).To(Equal(0))
err = buildsum.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/build.yaml"))
err = buildsum.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/build.yaml"))
Expect(err).ToNot(HaveOccurred())
err = definitionsum.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/definition.yaml"))
err = definitionsum.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/definition.yaml"))
Expect(err).ToNot(HaveOccurred())
err = definitionsum2.Generate(NewPackageArtifact("../../tests/fixtures/layers/alpine/definition.yaml"))
err = definitionsum2.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/definition.yaml"))
Expect(err).ToNot(HaveOccurred())
Expect(len(buildsum)).To(Equal(1))

View File

@@ -0,0 +1,475 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
// 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"strings"
"github.com/mudler/luet/pkg/api/core/config"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/package"
solver "github.com/mudler/luet/pkg/solver"
"github.com/pkg/errors"
"gopkg.in/yaml.v2"
)
var AvailableResolvers = strings.Join([]string{solver.QLearningResolverType}, " ")
type LuetLoggingConfig struct {
// Path of the logfile
Path string `mapstructure:"path"`
// Enable/Disable logging to file
EnableLogFile bool `mapstructure:"enable_logfile"`
// Enable JSON format logging in file
JsonFormat bool `mapstructure:"json_format"`
// Log level
Level LogLevel `mapstructure:"level"`
// Enable emoji
EnableEmoji bool `mapstructure:"enable_emoji"`
// Enable/Disable color in logging
Color bool `mapstructure:"color"`
}
type LuetGeneralConfig struct {
SameOwner bool `yaml:"same_owner,omitempty" mapstructure:"same_owner"`
Concurrency int `yaml:"concurrency,omitempty" mapstructure:"concurrency"`
Debug bool `yaml:"debug,omitempty" mapstructure:"debug"`
ShowBuildOutput bool `yaml:"show_build_output,omitempty" mapstructure:"show_build_output"`
FatalWarns bool `yaml:"fatal_warnings,omitempty" mapstructure:"fatal_warnings"`
HTTPTimeout int `yaml:"http_timeout,omitempty" mapstructure:"http_timeout"`
Quiet bool `yaml:"quiet" mapstructure:"quiet"`
}
type LuetSolverOptions struct {
solver.Options `yaml:"options,omitempty"`
Type string `yaml:"type,omitempty" mapstructure:"type"`
LearnRate float32 `yaml:"rate,omitempty" mapstructure:"rate"`
Discount float32 `yaml:"discount,omitempty" mapstructure:"discount"`
MaxAttempts int `yaml:"max_attempts,omitempty" mapstructure:"max_attempts"`
Implementation solver.SolverType `yaml:"implementation,omitempty" mapstructure:"implementation"`
}
func (opts LuetSolverOptions) ResolverIsSet() bool {
switch opts.Type {
case solver.QLearningResolverType:
return true
default:
return false
}
}
func (opts LuetSolverOptions) Resolver() solver.PackageResolver {
switch opts.Type {
case solver.QLearningResolverType:
if opts.LearnRate != 0.0 {
return solver.NewQLearningResolver(opts.LearnRate, opts.Discount, opts.MaxAttempts, 999999)
}
return solver.SimpleQLearningSolver()
}
return &solver.Explainer{}
}
func (opts *LuetSolverOptions) CompactString() string {
return fmt.Sprintf("type: %s rate: %f, discount: %f, attempts: %d, initialobserved: %d",
opts.Type, opts.LearnRate, opts.Discount, opts.MaxAttempts, 999999)
}
type LuetSystemConfig struct {
DatabaseEngine string `yaml:"database_engine" mapstructure:"database_engine"`
DatabasePath string `yaml:"database_path" mapstructure:"database_path"`
Rootfs string `yaml:"rootfs" mapstructure:"rootfs"`
PkgsCachePath string `yaml:"pkgs_cache_path" mapstructure:"pkgs_cache_path"`
TmpDirBase string `yaml:"tmpdir_base" mapstructure:"tmpdir_base"`
}
func (s *LuetSystemConfig) SetRootFS(path string) error {
p, err := fileHelper.Rel2Abs(path)
if err != nil {
return err
}
s.Rootfs = p
return nil
}
func (sc *LuetSystemConfig) GetRepoDatabaseDirPath(name string) string {
dbpath := filepath.Join(sc.Rootfs, sc.DatabasePath)
dbpath = filepath.Join(dbpath, "repos/"+name)
err := os.MkdirAll(dbpath, os.ModePerm)
if err != nil {
panic(err)
}
return dbpath
}
func (sc *LuetSystemConfig) GetSystemRepoDatabaseDirPath() string {
dbpath := filepath.Join(sc.Rootfs,
sc.DatabasePath)
err := os.MkdirAll(dbpath, os.ModePerm)
if err != nil {
panic(err)
}
return dbpath
}
func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (p string) {
var cachepath string
if sc.PkgsCachePath != "" {
cachepath = sc.PkgsCachePath
} else {
// Create dynamic cache for test suites
cachepath, _ = ioutil.TempDir(os.TempDir(), "cachepkgs")
}
if filepath.IsAbs(cachepath) {
p = cachepath
} else {
p = filepath.Join(sc.GetSystemRepoDatabaseDirPath(), cachepath)
}
sc.PkgsCachePath = cachepath // Be consistent with the path we set
return
}
func (sc *LuetSystemConfig) GetRootFsAbs() (string, error) {
return filepath.Abs(sc.Rootfs)
}
type LuetKV struct {
Key string `json:"key" yaml:"key" mapstructure:"key"`
Value string `json:"value" yaml:"value" mapstructure:"value"`
}
type LuetConfig struct {
Logging LuetLoggingConfig `yaml:"logging,omitempty" mapstructure:"logging"`
General LuetGeneralConfig `yaml:"general,omitempty" mapstructure:"general"`
System LuetSystemConfig `yaml:"system" mapstructure:"system"`
Solver LuetSolverOptions `yaml:"solver,omitempty" mapstructure:"solver"`
RepositoriesConfDir []string `yaml:"repos_confdir,omitempty" mapstructure:"repos_confdir"`
ConfigProtectConfDir []string `yaml:"config_protect_confdir,omitempty" mapstructure:"config_protect_confdir"`
ConfigProtectSkip bool `yaml:"config_protect_skip,omitempty" mapstructure:"config_protect_skip"`
ConfigFromHost bool `yaml:"config_from_host,omitempty" mapstructure:"config_from_host"`
SystemRepositories LuetRepositories `yaml:"repositories,omitempty" mapstructure:"repositories"`
FinalizerEnvs []LuetKV `json:"finalizer_envs,omitempty" yaml:"finalizer_envs,omitempty" mapstructure:"finalizer_envs,omitempty"`
ConfigProtectConfFiles []config.ConfigProtectConfFile `yaml:"-" mapstructure:"-"`
}
func (c *LuetConfig) GetSystemDB() pkg.PackageDatabase {
switch c.GetSystem().DatabaseEngine {
case "boltdb":
return pkg.NewBoltDatabase(
filepath.Join(c.GetSystem().GetSystemRepoDatabaseDirPath(), "luet.db"))
default:
return pkg.NewInMemoryDatabase(true)
}
}
func (c *LuetConfig) AddSystemRepository(r LuetRepository) {
c.SystemRepositories = append(c.SystemRepositories, r)
}
func (c *LuetConfig) GetFinalizerEnvsMap() map[string]string {
ans := make(map[string]string)
for _, kv := range c.FinalizerEnvs {
ans[kv.Key] = kv.Value
}
return ans
}
func (c *LuetConfig) SetFinalizerEnv(k, v string) {
keyPresent := false
envs := []LuetKV{}
for _, kv := range c.FinalizerEnvs {
if kv.Key == k {
keyPresent = true
envs = append(envs, LuetKV{Key: kv.Key, Value: v})
} else {
envs = append(envs, kv)
}
}
if !keyPresent {
envs = append(envs, LuetKV{Key: k, Value: v})
}
c.FinalizerEnvs = envs
}
func (c *LuetConfig) GetFinalizerEnvs() []string {
ans := []string{}
for _, kv := range c.FinalizerEnvs {
ans = append(ans, fmt.Sprintf("%s=%s", kv.Key, kv.Value))
}
return ans
}
func (c *LuetConfig) GetFinalizerEnv(k string) (string, error) {
keyNotPresent := true
ans := ""
for _, kv := range c.FinalizerEnvs {
if kv.Key == k {
keyNotPresent = false
ans = kv.Value
}
}
if keyNotPresent {
return "", errors.New("Finalizer key " + k + " not found")
}
return ans, nil
}
func (c *LuetConfig) GetLogging() *LuetLoggingConfig {
return &c.Logging
}
func (c *LuetConfig) GetGeneral() *LuetGeneralConfig {
return &c.General
}
func (c *LuetConfig) GetSystem() *LuetSystemConfig {
return &c.System
}
func (c *LuetConfig) GetSolverOptions() *LuetSolverOptions {
return &c.Solver
}
func (c *LuetConfig) YAML() ([]byte, error) {
return yaml.Marshal(c)
}
func (c *LuetConfig) GetConfigProtectConfFiles() []config.ConfigProtectConfFile {
return c.ConfigProtectConfFiles
}
func (c *LuetConfig) AddConfigProtectConfFile(file *config.ConfigProtectConfFile) {
if c.ConfigProtectConfFiles == nil {
c.ConfigProtectConfFiles = []config.ConfigProtectConfFile{*file}
} else {
c.ConfigProtectConfFiles = append(c.ConfigProtectConfFiles, *file)
}
}
func (c *LuetConfig) LoadRepositories(ctx *Context) error {
var regexRepo = regexp.MustCompile(`.yml$|.yaml$`)
var err error
rootfs := ""
// Respect the rootfs param on read repositories
if !c.ConfigFromHost {
rootfs, err = c.GetSystem().GetRootFsAbs()
if err != nil {
return err
}
}
for _, rdir := range c.RepositoriesConfDir {
rdir = filepath.Join(rootfs, rdir)
ctx.Debug("Parsing Repository Directory", rdir, "...")
files, err := ioutil.ReadDir(rdir)
if err != nil {
ctx.Debug("Skip dir", rdir, ":", err.Error())
continue
}
for _, file := range files {
if file.IsDir() {
continue
}
if !regexRepo.MatchString(file.Name()) {
ctx.Debug("File", file.Name(), "skipped.")
continue
}
content, err := ioutil.ReadFile(path.Join(rdir, file.Name()))
if err != nil {
ctx.Warning("On read file", file.Name(), ":", err.Error())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
r, err := LoadRepository(content)
if err != nil {
ctx.Warning("On parse file", file.Name(), ":", err.Error())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
if r.Name == "" || len(r.Urls) == 0 || r.Type == "" {
ctx.Warning("Invalid repository ", file.Name())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
c.AddSystemRepository(*r)
}
}
return nil
}
func (c *LuetConfig) GetSystemRepository(name string) (*LuetRepository, error) {
var ans *LuetRepository = nil
for idx, repo := range c.SystemRepositories {
if repo.Name == name {
ans = &c.SystemRepositories[idx]
break
}
}
if ans == nil {
return nil, errors.New("Repository " + name + " not found")
}
return ans, nil
}
func (c *LuetConfig) LoadConfigProtect(ctx *Context) error {
var regexConfs = regexp.MustCompile(`.yml$`)
var err error
rootfs := ""
// Respect the rootfs param on read repositories
if !c.ConfigFromHost {
rootfs, err = c.GetSystem().GetRootFsAbs()
if err != nil {
return err
}
}
for _, cdir := range c.ConfigProtectConfDir {
cdir = filepath.Join(rootfs, cdir)
ctx.Debug("Parsing Config Protect Directory", cdir, "...")
files, err := ioutil.ReadDir(cdir)
if err != nil {
ctx.Debug("Skip dir", cdir, ":", err.Error())
continue
}
for _, file := range files {
if file.IsDir() {
continue
}
if !regexConfs.MatchString(file.Name()) {
ctx.Debug("File", file.Name(), "skipped.")
continue
}
content, err := ioutil.ReadFile(path.Join(cdir, file.Name()))
if err != nil {
ctx.Warning("On read file", file.Name(), ":", err.Error())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
r, err := loadConfigProtectConFile(file.Name(), content)
if err != nil {
ctx.Warning("On parse file", file.Name(), ":", err.Error())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
if r.Name == "" || len(r.Directories) == 0 {
ctx.Warning("Invalid config protect file", file.Name())
ctx.Warning("File", file.Name(), "skipped.")
continue
}
c.AddConfigProtectConfFile(r)
}
}
return nil
}
func loadConfigProtectConFile(filename string, data []byte) (*config.ConfigProtectConfFile, error) {
ans := config.NewConfigProtectConfFile(filename)
err := yaml.Unmarshal(data, &ans)
if err != nil {
return nil, err
}
return ans, nil
}
func (c *LuetLoggingConfig) SetLogLevel(s LogLevel) {
c.Level = s
}
func (c *LuetSystemConfig) InitTmpDir() error {
if !filepath.IsAbs(c.TmpDirBase) {
abs, err := fileHelper.Rel2Abs(c.TmpDirBase)
if err != nil {
return errors.Wrap(err, "while converting relative path to absolute path")
}
c.TmpDirBase = abs
}
if _, err := os.Stat(c.TmpDirBase); err != nil {
if os.IsNotExist(err) {
err = os.MkdirAll(c.TmpDirBase, os.ModePerm)
if err != nil {
return err
}
}
}
return nil
}
func (c *LuetSystemConfig) CleanupTmpDir() error {
return os.RemoveAll(c.TmpDirBase)
}
func (c *LuetSystemConfig) TempDir(pattern string) (string, error) {
err := c.InitTmpDir()
if err != nil {
return "", err
}
return ioutil.TempDir(c.TmpDirBase, pattern)
}
func (c *LuetSystemConfig) TempFile(pattern string) (*os.File, error) {
err := c.InitTmpDir()
if err != nil {
return nil, err
}
return ioutil.TempFile(c.TmpDirBase, pattern)
}

View File

@@ -0,0 +1,28 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types_test
import (
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestAPITypes(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Types Suite")
}

View File

@@ -0,0 +1,89 @@
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
// Daniele Rondina <geaaru@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types_test
import (
"os"
"path/filepath"
"strings"
types "github.com/mudler/luet/pkg/api/core/types"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Config", func() {
Context("Load Repository1", func() {
ctx := types.NewContext()
ctx.Config.RepositoriesConfDir = []string{
"../../../../tests/fixtures/repos.conf.d",
}
err := ctx.Config.LoadRepositories(ctx)
It("Check Load Repository 1", func() {
Expect(err).Should(BeNil())
Expect(len(ctx.Config.SystemRepositories)).Should(Equal(2))
Expect(ctx.Config.SystemRepositories[0].Name).Should(Equal("test1"))
Expect(ctx.Config.SystemRepositories[0].Priority).Should(Equal(999))
Expect(ctx.Config.SystemRepositories[0].Type).Should(Equal("disk"))
Expect(len(ctx.Config.SystemRepositories[0].Urls)).Should(Equal(1))
Expect(ctx.Config.SystemRepositories[0].Urls[0]).Should(Equal("tests/repos/test1"))
})
It("Chec Load Repository 2", func() {
Expect(err).Should(BeNil())
Expect(len(ctx.Config.SystemRepositories)).Should(Equal(2))
Expect(ctx.Config.SystemRepositories[1].Name).Should(Equal("test2"))
Expect(ctx.Config.SystemRepositories[1].Priority).Should(Equal(1000))
Expect(ctx.Config.SystemRepositories[1].Type).Should(Equal("disk"))
Expect(len(ctx.Config.SystemRepositories[1].Urls)).Should(Equal(1))
Expect(ctx.Config.SystemRepositories[1].Urls[0]).Should(Equal("tests/repos/test2"))
})
})
Context("Simple temporary directory creation", func() {
It("Create Temporary directory", func() {
ctx := types.NewContext()
ctx.Config.GetSystem().TmpDirBase = os.TempDir() + "/tmpluet"
tmpDir, err := ctx.Config.GetSystem().TempDir("test1")
Expect(err).ToNot(HaveOccurred())
Expect(strings.HasPrefix(tmpDir, filepath.Join(os.TempDir(), "tmpluet"))).To(BeTrue())
Expect(fileHelper.Exists(tmpDir)).To(BeTrue())
defer os.RemoveAll(tmpDir)
})
It("Create Temporary file", func() {
ctx := types.NewContext()
ctx.Config.GetSystem().TmpDirBase = os.TempDir() + "/tmpluet"
tmpFile, err := ctx.Config.GetSystem().TempFile("testfile1")
Expect(err).ToNot(HaveOccurred())
Expect(strings.HasPrefix(tmpFile.Name(), filepath.Join(os.TempDir(), "tmpluet"))).To(BeTrue())
Expect(fileHelper.Exists(tmpFile.Name())).To(BeTrue())
defer os.Remove(tmpFile.Name())
})
})
})

View File

@@ -0,0 +1,431 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types
import (
"context"
"fmt"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync"
"github.com/kyokomi/emoji"
"github.com/mudler/luet/pkg/helpers/terminal"
"github.com/pkg/errors"
"github.com/pterm/pterm"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/term"
)
const (
ErrorLevel LogLevel = "error"
WarningLevel LogLevel = "warning"
InfoLevel LogLevel = "info"
SuccessLevel LogLevel = "success"
FatalLevel LogLevel = "fatal"
)
type Context struct {
context.Context
Config *LuetConfig
IsTerminal bool
NoSpinner bool
name string
s *pterm.SpinnerPrinter
spinnerLock *sync.Mutex
z *zap.Logger
ProgressBar *pterm.ProgressbarPrinter
}
func NewContext() *Context {
return &Context{
spinnerLock: &sync.Mutex{},
IsTerminal: terminal.IsTerminal(os.Stdout),
Config: &LuetConfig{
ConfigFromHost: true,
Logging: LuetLoggingConfig{},
General: LuetGeneralConfig{},
System: LuetSystemConfig{
DatabasePath: filepath.Join("var", "db", "packages"),
TmpDirBase: filepath.Join(os.TempDir(), "tmpluet")},
Solver: LuetSolverOptions{},
},
s: pterm.DefaultSpinner.WithShowTimer(false).WithRemoveWhenDone(true),
}
}
func (c *Context) WithName(name string) *Context {
newc := c.Copy()
newc.name = name
return newc
}
func (c *Context) Copy() *Context {
configCopy := *c.Config
configCopy.System = *c.Config.GetSystem()
configCopy.General = *c.Config.GetGeneral()
configCopy.Logging = *c.Config.GetLogging()
ctx := *c
ctxCopy := &ctx
ctxCopy.Config = &configCopy
return ctxCopy
}
// GetTerminalSize returns the width and the height of the active terminal.
func (c *Context) GetTerminalSize() (width, height int, err error) {
w, h, err := term.GetSize(int(os.Stdout.Fd()))
if w <= 0 {
w = 0
}
if h <= 0 {
h = 0
}
if err != nil {
err = errors.New("size not detectable")
}
return w, h, err
}
func (c *Context) Init() (err error) {
if c.IsTerminal {
if !c.Config.Logging.Color {
c.Debug("Disabling colors")
c.NoColor()
}
} else {
c.Debug("Not a terminal, disabling colors")
c.NoColor()
}
if c.Config.General.Quiet {
c.NoColor()
pterm.DisableStyling()
}
c.Debug("Colors", c.Config.GetLogging().Color)
c.Debug("Logging level", c.Config.GetLogging().Level)
c.Debug("Debug mode", c.Config.GetGeneral().Debug)
if c.Config.GetLogging().EnableLogFile && c.Config.GetLogging().Path != "" {
// Init zap logger
err = c.InitZap()
if err != nil {
return
}
}
// Load repositories
err = c.Config.LoadRepositories(c)
if err != nil {
return
}
return
}
func (c *Context) NoColor() {
pterm.DisableColor()
}
func (c *Context) Ask() bool {
var input string
c.Info("Do you want to continue with this operation? [y/N]: ")
_, err := fmt.Scanln(&input)
if err != nil {
return false
}
input = strings.ToLower(input)
if input == "y" || input == "yes" {
return true
}
return false
}
func (c *Context) InitZap() error {
var err error
if c.z == nil {
// TODO: test permission for open logfile.
cfg := zap.NewProductionConfig()
cfg.OutputPaths = []string{c.Config.GetLogging().Path}
cfg.Level = c.Config.GetLogging().Level.ZapLevel()
cfg.ErrorOutputPaths = []string{}
if c.Config.GetLogging().JsonFormat {
cfg.Encoding = "json"
} else {
cfg.Encoding = "console"
}
cfg.DisableCaller = true
cfg.DisableStacktrace = true
cfg.EncoderConfig.TimeKey = "time"
cfg.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
c.z, err = cfg.Build()
if err != nil {
fmt.Fprint(os.Stderr, "Error on initialize file logger: "+err.Error()+"\n")
return err
}
}
return nil
}
// Spinner starts the spinner
func (c *Context) Spinner() {
if !c.IsTerminal || c.NoSpinner {
return
}
c.spinnerLock.Lock()
defer c.spinnerLock.Unlock()
var confLevel int
if c.Config.GetGeneral().Debug {
confLevel = 3
} else {
confLevel = c.Config.GetLogging().Level.ToNumber()
}
if 2 > confLevel {
return
}
if !c.s.IsActive {
c.s, _ = c.s.Start()
}
}
func (c *Context) Screen(text string) {
pterm.DefaultHeader.WithBackgroundStyle(pterm.NewStyle(pterm.BgLightBlue)).WithMargin(2).Println(text)
//pterm.DefaultCenter.Print(pterm.DefaultHeader.WithFullWidth().WithBackgroundStyle(pterm.NewStyle(pterm.BgLightBlue)).WithMargin(10).Sprint(text))
}
func (c *Context) SpinnerText(suffix, prefix string) {
if !c.IsTerminal || c.NoSpinner {
return
}
c.spinnerLock.Lock()
defer c.spinnerLock.Unlock()
if c.Config.GetGeneral().Debug {
fmt.Printf("%s %s\n",
suffix, prefix,
)
} else {
c.s.UpdateText(suffix + prefix)
}
}
func (c *Context) SpinnerStop() {
if !c.IsTerminal {
return
}
c.spinnerLock.Lock()
defer c.spinnerLock.Unlock()
var confLevel int
if c.Config.GetGeneral().Debug {
confLevel = 3
} else {
confLevel = c.Config.GetLogging().Level.ToNumber()
}
if 2 > confLevel {
return
}
if c.s != nil {
c.s.Success()
}
}
func (c *Context) log2File(level LogLevel, msg string) {
switch level {
case FatalLevel:
c.z.Fatal(msg)
case ErrorLevel:
c.z.Error(msg)
case WarningLevel:
c.z.Warn(msg)
case InfoLevel, SuccessLevel:
c.z.Info(msg)
default:
c.z.Debug(msg)
}
}
func (c *Context) Msg(level LogLevel, ln bool, msg ...interface{}) {
var message string
var confLevel, msgLevel int
if c.Config.GetGeneral().Debug {
confLevel = 3
pterm.EnableDebugMessages()
} else {
confLevel = c.Config.GetLogging().Level.ToNumber()
}
msgLevel = level.ToNumber()
if msgLevel > confLevel {
return
}
for _, m := range msg {
message += " " + fmt.Sprintf("%v", m)
}
// Color message
levelMsg := message
if c.Config.GetLogging().Color {
switch level {
case WarningLevel:
levelMsg = pterm.LightYellow(":construction: warning" + message)
case InfoLevel:
levelMsg = message
case SuccessLevel:
levelMsg = pterm.LightGreen(message)
case ErrorLevel:
levelMsg = pterm.Red(message)
default:
levelMsg = pterm.Blue(message)
}
}
// Strip emoji if needed
if c.Config.GetLogging().EnableEmoji && c.IsTerminal {
levelMsg = emoji.Sprint(levelMsg)
} else {
re := regexp.MustCompile(`[:][\w]+[:]`)
levelMsg = re.ReplaceAllString(levelMsg, "")
}
if c.name != "" {
levelMsg = fmt.Sprintf("[%s] %s", c.name, levelMsg)
}
if c.z != nil {
c.log2File(level, message)
}
// Print the message based on the level
switch level {
case SuccessLevel:
if ln {
pterm.Success.Println(levelMsg)
} else {
pterm.Success.Print(levelMsg)
}
case InfoLevel:
if ln {
pterm.Info.Println(levelMsg)
} else {
pterm.Info.Print(levelMsg)
}
case WarningLevel:
if ln {
pterm.Warning.Println(levelMsg)
} else {
pterm.Warning.Print(levelMsg)
}
case ErrorLevel:
if ln {
pterm.Error.Println(levelMsg)
} else {
pterm.Error.Print(levelMsg)
}
case FatalLevel:
if ln {
pterm.Fatal.Println(levelMsg)
} else {
pterm.Fatal.Print(levelMsg)
}
default:
if ln {
pterm.Debug.Println(levelMsg)
} else {
pterm.Debug.Print(levelMsg)
}
}
}
func (c *Context) Warning(mess ...interface{}) {
c.Msg("warning", true, mess...)
if c.Config.GetGeneral().FatalWarns {
os.Exit(2)
}
}
func (c *Context) Debug(mess ...interface{}) {
pc, file, line, ok := runtime.Caller(1)
if ok {
mess = append([]interface{}{fmt.Sprintf("(%s:#%d:%v)",
path.Base(file), line, runtime.FuncForPC(pc).Name())}, mess...)
}
c.Msg("debug", true, mess...)
}
func (c *Context) Info(mess ...interface{}) {
c.Msg("info", true, mess...)
}
func (c *Context) Success(mess ...interface{}) {
c.Msg("success", true, mess...)
}
func (c *Context) Error(mess ...interface{}) {
c.Msg("error", true, mess...)
}
func (c *Context) Fatal(mess ...interface{}) {
c.Error(mess...)
os.Exit(1)
}
type LogLevel string
func (level LogLevel) ToNumber() int {
switch level {
case ErrorLevel, FatalLevel:
return 0
case WarningLevel:
return 1
case InfoLevel, SuccessLevel:
return 2
default: // debug
return 3
}
}
func (level LogLevel) ZapLevel() zap.AtomicLevel {
switch level {
case FatalLevel:
return zap.NewAtomicLevelAt(zap.FatalLevel)
case ErrorLevel:
return zap.NewAtomicLevelAt(zap.ErrorLevel)
case WarningLevel:
return zap.NewAtomicLevelAt(zap.WarnLevel)
case InfoLevel, SuccessLevel:
return zap.NewAtomicLevelAt(zap.InfoLevel)
default:
return zap.NewAtomicLevelAt(zap.DebugLevel)
}
}

View File

@@ -0,0 +1,125 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types_test
import (
"io"
"io/ioutil"
"os"
"github.com/gookit/color"
types "github.com/mudler/luet/pkg/api/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func captureStdout(f func(w io.Writer)) string {
originalStdout := os.Stdout
r, w, _ := os.Pipe()
os.Stdout = w
color.SetOutput(w)
f(w)
_ = w.Close()
out, _ := ioutil.ReadAll(r)
os.Stdout = originalStdout
color.SetOutput(os.Stdout)
_ = r.Close()
return string(out)
}
var _ = Describe("Context and logging", func() {
ctx := types.NewContext()
BeforeEach(func() {
ctx = types.NewContext()
})
Context("LogLevel", func() {
It("converts it correctly to number and zaplog", func() {
Expect(types.ErrorLevel.ToNumber()).To(Equal(0))
Expect(types.InfoLevel.ToNumber()).To(Equal(2))
Expect(types.WarningLevel.ToNumber()).To(Equal(1))
Expect(types.LogLevel("foo").ToNumber()).To(Equal(3))
Expect(types.WarningLevel.ZapLevel().String()).To(Equal("warn"))
Expect(types.InfoLevel.ZapLevel().String()).To(Equal("info"))
Expect(types.ErrorLevel.ZapLevel().String()).To(Equal("error"))
Expect(types.FatalLevel.ZapLevel().String()).To(Equal("fatal"))
Expect(types.LogLevel("foo").ZapLevel().String()).To(Equal("debug"))
})
})
Context("Context", func() {
It("detect if is a terminal", func() {
Expect(captureStdout(func(w io.Writer) {
_, _, err := ctx.GetTerminalSize()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("size not detectable"))
os.Stdout.Write([]byte(err.Error()))
})).To(ContainSubstring("size not detectable"))
})
It("respects loglevel", func() {
ctx.Config.GetGeneral().Debug = false
Expect(captureStdout(func(w io.Writer) {
ctx.Debug("")
})).To(Equal(""))
ctx.Config.GetGeneral().Debug = true
Expect(captureStdout(func(w io.Writer) {
ctx.Debug("foo")
})).To(ContainSubstring("foo"))
})
It("logs to file", func() {
ctx.NoColor()
t, err := ioutil.TempFile("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(t.Name()) // clean up
ctx.Config.GetLogging().EnableLogFile = true
ctx.Config.GetLogging().Path = t.Name()
ctx.Init()
Expect(captureStdout(func(w io.Writer) {
ctx.Info("foot")
})).To(And(ContainSubstring("INFO"), ContainSubstring("foot")))
Expect(captureStdout(func(w io.Writer) {
ctx.Success("test")
})).To(And(ContainSubstring("SUCCESS"), ContainSubstring("test")))
Expect(captureStdout(func(w io.Writer) {
ctx.Error("foobar")
})).To(And(ContainSubstring("ERROR"), ContainSubstring("foobar")))
Expect(captureStdout(func(w io.Writer) {
ctx.Warning("foowarn")
})).To(And(ContainSubstring("WARNING"), ContainSubstring("foowarn")))
l, err := ioutil.ReadFile(t.Name())
Expect(err).ToNot(HaveOccurred())
logs := string(l)
Expect(logs).To(ContainSubstring("foot"))
Expect(logs).To(ContainSubstring("test"))
Expect(logs).To(ContainSubstring("foowarn"))
Expect(logs).To(ContainSubstring("foobar"))
})
})
})

View File

@@ -0,0 +1,96 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types
import (
"fmt"
"runtime"
"gopkg.in/yaml.v2"
)
type LuetRepository struct {
Name string `json:"name" yaml:"name" mapstructure:"name"`
Description string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description"`
Urls []string `json:"urls" yaml:"urls" mapstructure:"urls"`
Type string `json:"type" yaml:"type" mapstructure:"type"`
Mode string `json:"mode,omitempty" yaml:"mode,omitempty" mapstructure:"mode,omitempty"`
Priority int `json:"priority,omitempty" yaml:"priority,omitempty" mapstructure:"priority"`
Enable bool `json:"enable" yaml:"enable" mapstructure:"enable"`
Cached bool `json:"cached,omitempty" yaml:"cached,omitempty" mapstructure:"cached,omitempty"`
Authentication map[string]string `json:"auth,omitempty" yaml:"auth,omitempty" mapstructure:"auth,omitempty"`
TreePath string `json:"treepath,omitempty" yaml:"treepath,omitempty" mapstructure:"treepath"`
MetaPath string `json:"metapath,omitempty" yaml:"metapath,omitempty" mapstructure:"metapath"`
Verify bool `json:"verify,omitempty" yaml:"verify,omitempty" mapstructure:"verify"`
Arch string `json:"arch,omitempty" yaml:"arch,omitempty" mapstructure:"arch"`
ReferenceID string `json:"reference,omitempty" yaml:"reference,omitempty" mapstructure:"reference"`
// Incremented value that identify revision of the repository in a user-friendly way.
Revision int `json:"revision,omitempty" yaml:"-" mapstructure:"-"`
// Epoch time in seconds
LastUpdate string `json:"last_update,omitempty" yaml:"-" mapstructure:"-"`
}
func (r *LuetRepository) String() string {
return fmt.Sprintf("[%s] prio: %d, type: %s, enable: %t, cached: %t",
r.Name, r.Priority, r.Type, r.Enable, r.Cached)
}
// Enabled returns a boolean indicating if the repository should be considered enabled or not
func (r *LuetRepository) Enabled() bool {
return r.Arch != "" && r.Arch == runtime.GOARCH && !r.Enable || r.Enable
}
type LuetRepositories []LuetRepository
func (l LuetRepositories) Enabled() (res LuetRepositories) {
for _, r := range l {
if r.Enabled() {
res = append(res, r)
}
}
return
}
func NewLuetRepository(name, t, descr string, urls []string, priority int, enable, cached bool) *LuetRepository {
return &LuetRepository{
Name: name,
Description: descr,
Urls: urls,
Type: t,
Priority: priority,
Enable: enable,
Cached: cached,
Authentication: make(map[string]string),
}
}
func NewEmptyLuetRepository() *LuetRepository {
return &LuetRepository{
Priority: 9999,
Authentication: make(map[string]string),
}
}
func LoadRepository(data []byte) (*LuetRepository, error) {
ans := NewEmptyLuetRepository()
err := yaml.Unmarshal(data, &ans)
if err != nil {
return nil, err
}
return ans, nil
}

View File

@@ -0,0 +1,49 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package types_test
import (
"runtime"
types "github.com/mudler/luet/pkg/api/core/types"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Types", func() {
Context("Repository detects underlying arch", func() {
It("is enabled if arch is matching", func() {
r := types.LuetRepository{Arch: runtime.GOARCH}
Expect(r.Enabled()).To(BeTrue())
})
It("is disabled if arch is NOT matching", func() {
r := types.LuetRepository{Arch: "foo"}
Expect(r.Enabled()).To(BeFalse())
})
It("is enabled if arch is NOT matching and enabled is true", func() {
r := types.LuetRepository{Arch: "foo", Enable: true}
Expect(r.Enabled()).To(BeTrue())
})
It("enabled is true", func() {
r := types.LuetRepository{Enable: true}
Expect(r.Enabled()).To(BeTrue())
})
It("enabled is false", func() {
r := types.LuetRepository{Enable: false}
Expect(r.Enabled()).To(BeFalse())
})
})
})

185
pkg/box/exec.go Normal file
View File

@@ -0,0 +1,185 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package box
import (
b64 "encoding/base64"
"fmt"
"os"
"os/exec"
"strings"
"syscall"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
"github.com/pkg/errors"
)
type Box interface {
Run() error
Exec() error
}
type DefaultBox struct {
Name string
Root string
Env []string
Cmd string
Args []string
HostMounts []string
Stdin, Stdout, Stderr bool
}
func NewBox(cmd string, args, hostmounts, env []string, rootfs string, stdin, stdout, stderr bool) Box {
return &DefaultBox{
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
Cmd: cmd,
Args: args,
Root: rootfs,
HostMounts: hostmounts,
Env: env,
}
}
func (b *DefaultBox) Exec() error {
if err := mountProc(b.Root); err != nil {
return errors.Wrap(err, "Failed mounting proc on rootfs")
}
if err := mountDev(b.Root); err != nil {
return errors.Wrap(err, "Failed mounting dev on rootfs")
}
for _, hostMount := range b.HostMounts {
target := hostMount
if strings.Contains(hostMount, ":") {
dest := strings.Split(hostMount, ":")
if len(dest) != 2 {
return errors.New("Invalid arguments for mount, it can be: fullpath, or source:target")
}
hostMount = dest[0]
target = dest[1]
}
if err := mountBind(hostMount, b.Root, target); err != nil {
return errors.Wrap(err, fmt.Sprintf("Failed mounting %s on rootfs", hostMount))
}
}
if err := PivotRoot(b.Root); err != nil {
return errors.Wrap(err, "Failed switching pivot on rootfs")
}
cmd := exec.Command(b.Cmd, b.Args...)
if b.Stdin {
cmd.Stdin = os.Stdin
}
if b.Stderr {
cmd.Stderr = os.Stderr
}
if b.Stdout {
cmd.Stdout = os.Stdout
}
cmd.Env = b.Env
if err := cmd.Run(); err != nil {
return errors.Wrap(err, fmt.Sprintf("Error running the %s command in box.Exec", b.Cmd))
}
return nil
}
func (b *DefaultBox) Run() error {
if !fileHelper.Exists(b.Root) {
return errors.New(b.Root + " does not exist")
}
// This matches with exec CLI command in luet
// TODO: Pass by env var as well
execCmd := []string{"exec", "--rootfs", b.Root, "--entrypoint", b.Cmd}
if b.Stdin {
execCmd = append(execCmd, "--stdin")
}
if b.Stderr {
execCmd = append(execCmd, "--stderr")
}
if b.Stdout {
execCmd = append(execCmd, "--stdout")
}
// Encode the command in base64 to avoid bad input from the args given
execCmd = append(execCmd, "--decode")
for _, m := range b.HostMounts {
execCmd = append(execCmd, "--mount")
execCmd = append(execCmd, m)
}
for _, e := range b.Env {
execCmd = append(execCmd, "--env")
execCmd = append(execCmd, e)
}
for _, a := range b.Args {
execCmd = append(execCmd, b64.StdEncoding.EncodeToString([]byte(a)))
}
cmd := exec.Command("/proc/self/exe", execCmd...)
if b.Stdin {
cmd.Stdin = os.Stdin
}
if b.Stderr {
cmd.Stderr = os.Stderr
}
if b.Stdout {
cmd.Stdout = os.Stdout
}
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWNS |
syscall.CLONE_NEWUTS |
syscall.CLONE_NEWIPC |
syscall.CLONE_NEWPID |
syscall.CLONE_NEWNET |
syscall.CLONE_NEWUSER,
UidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: os.Getuid(),
Size: 1,
},
},
GidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: os.Getgid(),
Size: 1,
},
},
}
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "Failed running Box command in box.Run")
}
return nil
}

94
pkg/box/rootfs.go Normal file
View File

@@ -0,0 +1,94 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package box
import (
"os"
"path/filepath"
"syscall"
)
func PivotRoot(newroot string) error {
putold := filepath.Join(newroot, "/.pivot_root")
// bind mount newroot to itself - this is a slight hack needed to satisfy the
// pivot_root requirement that newroot and putold must not be on the same
// filesystem as the current root
if err := syscall.Mount(newroot, newroot, "", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
return err
}
// create putold directory
if err := os.MkdirAll(putold, 0700); err != nil {
return err
}
// call pivot_root
if err := syscall.PivotRoot(newroot, putold); err != nil {
return err
}
// ensure current working directory is set to new root
if err := os.Chdir("/"); err != nil {
return err
}
// umount putold, which now lives at /.pivot_root
putold = "/.pivot_root"
if err := syscall.Unmount(putold, syscall.MNT_DETACH); err != nil {
return err
}
// remove putold
if err := os.RemoveAll(putold); err != nil {
return err
}
return nil
}
func mountProc(newroot string) error {
source := "proc"
target := filepath.Join(newroot, "/proc")
fstype := "proc"
flags := 0
data := ""
os.MkdirAll(target, 0755)
if err := syscall.Mount(source, target, fstype, uintptr(flags), data); err != nil {
return err
}
return nil
}
func mountBind(hostfolder, newroot, dst string) error {
source := hostfolder
target := filepath.Join(newroot, dst)
fstype := "bind"
data := ""
os.MkdirAll(target, 0755)
if err := syscall.Mount(source, target, fstype, syscall.MS_BIND|syscall.MS_REC, data); err != nil {
return err
}
return nil
}
func mountDev(newroot string) error {
return mountBind("/dev", newroot, "/dev")
}

View File

@@ -1,504 +0,0 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler
import (
"archive/tar"
"bufio"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
gzip "github.com/klauspost/pgzip"
//"strconv"
"strings"
"sync"
. "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
"github.com/mudler/luet/pkg/solver"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
)
type CompressionImplementation string
const (
None CompressionImplementation = "none" // e.g. tar for standard packages
GZip CompressionImplementation = "gzip"
)
type ArtifactIndex []Artifact
func (i ArtifactIndex) CleanPath() ArtifactIndex {
newIndex := ArtifactIndex{}
for _, n := range i {
art := n.(*PackageArtifact)
// FIXME: This is a dup and makes difficult to add attributes to artifacts
newIndex = append(newIndex, &PackageArtifact{
Path: path.Base(n.GetPath()),
SourceAssertion: art.SourceAssertion,
CompileSpec: art.CompileSpec,
Dependencies: art.Dependencies,
CompressionType: art.CompressionType,
Checksums: art.Checksums,
})
}
return newIndex
//Update if exists, otherwise just create
}
// When compiling, we write also a fingerprint.metadata.yaml file with PackageArtifact. In this way we can have another command to create the repository
// which will consist in just of an repository.yaml which is just the repository structure with the list of package artifact.
// In this way a generic client can fetch the packages and, after unpacking the tree, performing queries to install packages.
type PackageArtifact struct {
Path string `json:"path"`
Dependencies []*PackageArtifact `json:"dependencies"`
CompileSpec *LuetCompilationSpec `json:"compilationspec"`
Checksums Checksums `json:"checksums"`
SourceAssertion solver.PackagesAssertions `json:"-"`
CompressionType CompressionImplementation `json:"compressiontype"`
}
func NewPackageArtifact(path string) Artifact {
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: None}
}
func NewPackageArtifactFromYaml(data []byte) (Artifact, error) {
p := &PackageArtifact{Checksums: Checksums{}}
err := yaml.Unmarshal(data, &p)
if err != nil {
return p, err
}
return p, err
}
func LoadArtifactFromYaml(spec CompilationSpec) (Artifact, error) {
metaFile := spec.GetPackage().GetFingerPrint() + ".metadata.yaml"
dat, err := ioutil.ReadFile(spec.Rel(metaFile))
if err != nil {
return nil, errors.Wrap(err, "Error reading file "+metaFile)
}
art, err := NewPackageArtifactFromYaml(dat)
if err != nil {
return nil, errors.Wrap(err, "Error writing file "+metaFile)
}
// It is relative, set it back to abs
art.SetPath(spec.Rel(art.GetPath()))
return art, nil
}
func (a *PackageArtifact) SetCompressionType(t CompressionImplementation) {
a.CompressionType = t
}
func (a *PackageArtifact) GetChecksums() Checksums {
return a.Checksums
}
func (a *PackageArtifact) SetChecksums(c Checksums) {
a.Checksums = c
}
func (a *PackageArtifact) Hash() error {
return a.Checksums.Generate(a)
}
func (a *PackageArtifact) Verify() error {
sum := Checksums{}
err := sum.Generate(a)
if err != nil {
return err
}
err = sum.Compare(a.Checksums)
if err != nil {
return err
}
return nil
}
func (a *PackageArtifact) WriteYaml(dst string) error {
// First compute checksum of artifact. When we write the yaml we want to write up-to-date informations.
err := a.Hash()
if err != nil {
return errors.Wrap(err, "Failed generating checksums for artifact")
}
//p := a.CompileSpec.GetPackage().GetPath()
//a.CompileSpec.GetPackage().SetPath("")
// for _, ass := range a.CompileSpec.GetSourceAssertion() {
// ass.Package.SetPath("")
// }
data, err := yaml.Marshal(a)
if err != nil {
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
}
mangle, err := NewPackageArtifactFromYaml(data)
if err != nil {
return errors.Wrap(err, "Generated invalid artifact")
}
//p := a.CompileSpec.GetPackage().GetPath()
mangle.GetCompileSpec().GetPackage().SetPath("")
for _, ass := range mangle.GetCompileSpec().GetSourceAssertion() {
ass.Package.SetPath("")
}
data, err = yaml.Marshal(mangle)
if err != nil {
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
}
err = ioutil.WriteFile(filepath.Join(dst, a.GetCompileSpec().GetPackage().GetFingerPrint()+".metadata.yaml"), data, os.ModePerm)
if err != nil {
return errors.Wrap(err, "While writing PackageArtifact YAML")
}
//a.CompileSpec.GetPackage().SetPath(p)
return nil
}
func (a *PackageArtifact) GetSourceAssertion() solver.PackagesAssertions {
return a.SourceAssertion
}
func (a *PackageArtifact) SetCompileSpec(as CompilationSpec) {
a.CompileSpec = as.(*LuetCompilationSpec)
}
func (a *PackageArtifact) GetCompileSpec() CompilationSpec {
return a.CompileSpec
}
func (a *PackageArtifact) SetSourceAssertion(as solver.PackagesAssertions) {
a.SourceAssertion = as
}
func (a *PackageArtifact) GetDependencies() []Artifact {
ret := []Artifact{}
for _, d := range a.Dependencies {
ret = append(ret, d)
}
return ret
}
func (a *PackageArtifact) SetDependencies(d []Artifact) {
ret := []*PackageArtifact{}
for _, dd := range d {
ret = append(ret, dd.(*PackageArtifact))
}
a.Dependencies = ret
}
func (a *PackageArtifact) GetPath() string {
return a.Path
}
func (a *PackageArtifact) SetPath(p string) {
a.Path = p
}
// Compress Archives and compress (TODO) to the artifact path
func (a *PackageArtifact) Compress(src string, concurrency int) error {
switch a.CompressionType {
case GZip:
err := helpers.Tar(src, a.Path)
if err != nil {
return err
}
original, err := os.Open(a.Path)
if err != nil {
return err
}
defer original.Close()
gzipfile := a.Path + ".gz"
bufferedReader := bufio.NewReader(original)
// Open a file for writing.
dst, err := os.Create(gzipfile)
if err != nil {
return err
}
// Create gzip writer.
w := gzip.NewWriter(dst)
w.SetConcurrency(concurrency, 10)
defer w.Close()
defer dst.Close()
_, err = io.Copy(w, bufferedReader)
if err != nil {
return err
}
w.Close()
os.RemoveAll(a.Path) // Remove original
// a.CompressedPath = gzipfile
a.Path = gzipfile
return nil
//a.Path = gzipfile
// Defaults to tar only (covers when "none" is supplied)
default:
return helpers.Tar(src, a.Path)
}
return errors.New("Compression type must be supplied")
}
// Unpack Untar and decompress (TODO) to the given path
func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
switch a.CompressionType {
case GZip:
// Create the uncompressed archive
archive, err := os.Create(a.GetPath() + ".uncompressed")
if err != nil {
return err
}
defer os.RemoveAll(a.GetPath() + ".uncompressed")
defer archive.Close()
original, err := os.Open(a.Path)
if err != nil {
return errors.Wrap(err, "Cannot open "+a.Path)
}
defer original.Close()
bufferedReader := bufio.NewReader(original)
r, err := gzip.NewReader(bufferedReader)
if err != nil {
return err
}
defer r.Close()
_, err = io.Copy(archive, r)
if err != nil {
return errors.Wrap(err, "Cannot copy to "+a.GetPath()+".uncompressed")
}
err = helpers.Untar(a.GetPath()+".uncompressed", dst,
LuetCfg.GetGeneral().SameOwner)
if err != nil {
return err
}
return nil
// Defaults to tar only (covers when "none" is supplied)
default:
return helpers.Untar(a.GetPath(), dst, LuetCfg.GetGeneral().SameOwner)
}
return errors.New("Compression type must be supplied")
}
func (a *PackageArtifact) FileList() ([]string, error) {
var tr *tar.Reader
switch a.CompressionType {
case GZip:
// Create the uncompressed archive
archive, err := os.Create(a.GetPath() + ".uncompressed")
if err != nil {
return []string{}, err
}
defer os.RemoveAll(a.GetPath() + ".uncompressed")
defer archive.Close()
original, err := os.Open(a.Path)
if err != nil {
return []string{}, errors.Wrap(err, "Cannot open "+a.Path)
}
defer original.Close()
bufferedReader := bufio.NewReader(original)
r, err := gzip.NewReader(bufferedReader)
if err != nil {
return []string{}, err
}
defer r.Close()
tr = tar.NewReader(r)
// Defaults to tar only (covers when "none" is supplied)
default:
tarFile, err := os.Open(a.GetPath())
if err != nil {
return []string{}, errors.Wrap(err, "Could not open package archive")
}
defer tarFile.Close()
tr = tar.NewReader(tarFile)
}
var files []string
// untar each segment
for {
hdr, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
return []string{}, err
}
// determine proper file path info
finfo := hdr.FileInfo()
fileName := hdr.Name
if finfo.Mode().IsDir() {
continue
}
files = append(files, fileName)
// if a dir, create it, then go to next segment
}
return files, nil
}
type CopyJob struct {
Src, Dst string
Artifact string
}
func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
defer wg.Done()
for job := range s {
//Info("#"+strconv.Itoa(i), "copying", job.Src, "to", job.Dst)
// if dir, err := helpers.IsDirectory(job.Src); err == nil && dir {
// err = helpers.CopyDir(job.Src, job.Dst)
// if err != nil {
// Warning("Error copying dir", job, err)
// }
// continue
// }
if !helpers.Exists(job.Dst) {
if err := helpers.CopyFile(job.Src, job.Dst); err != nil {
Warning("Error copying", job, err)
}
}
}
}
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, t CompressionImplementation) (Artifact, error) {
archive, err := ioutil.TempDir(os.TempDir(), "archive")
if err != nil {
return nil, errors.Wrap(err, "Error met while creating tempdir for archive")
}
defer os.RemoveAll(archive) // clean up
if strings.HasSuffix(src, ".tar") {
rootfs, err := ioutil.TempDir(os.TempDir(), "rootfs")
if err != nil {
return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(rootfs) // clean up
err = helpers.Untar(src, rootfs, keepPerms)
if err != nil {
return nil, errors.Wrap(err, "Error met while unpacking rootfs")
}
src = rootfs
}
toCopy := make(chan CopyJob)
var wg = new(sync.WaitGroup)
for i := 0; i < concurrency; i++ {
wg.Add(1)
go worker(i, wg, toCopy)
}
// Handle includes in spec. If specified they filter what gets in the package
if len(includes) > 0 {
var includeRegexp []*regexp.Regexp
for _, i := range includes {
r, e := regexp.Compile(i)
if e != nil {
Warning("Failed compiling regex:", e)
continue
}
includeRegexp = append(includeRegexp, r)
}
for _, l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
ADDS:
for _, a := range l.Diffs.Additions {
for _, i := range includeRegexp {
if i.MatchString(a.Name) {
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
continue ADDS
}
}
}
}
} else {
// Otherwise just grab all
for _, l := range layers {
// Consider d.Additions (and d.Changes? - warn at least) only
for _, a := range l.Diffs.Additions {
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
}
}
}
close(toCopy)
wg.Wait()
a := NewPackageArtifact(dst)
a.SetCompressionType(t)
err = a.Compress(archive, concurrency)
if err != nil {
return nil, errors.Wrap(err, "Error met while creating package archive")
}
return a, nil
}
func ComputeArtifactLayerSummary(diffs []ArtifactLayer) ArtifactLayersSummary {
ans := ArtifactLayersSummary{
Layers: make([]ArtifactLayerSummary, 0),
}
for _, layer := range diffs {
sum := ArtifactLayerSummary{
FromImage: layer.FromImage,
ToImage: layer.ToImage,
AddFiles: 0,
AddSizes: 0,
DelFiles: 0,
DelSizes: 0,
ChangeFiles: 0,
ChangeSizes: 0,
}
for _, a := range layer.Diffs.Additions {
sum.AddFiles++
sum.AddSizes += int64(a.Size)
}
for _, d := range layer.Diffs.Deletions {
sum.DelFiles++
sum.DelSizes += int64(d.Size)
}
for _, c := range layer.Diffs.Changes {
sum.ChangeFiles++
sum.ChangeSizes += int64(c.Size)
}
ans.Layers = append(ans.Layers, sum)
}
return ans
}

View File

@@ -1,153 +0,0 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler_test
import (
"io/ioutil"
"os"
"path/filepath"
. "github.com/mudler/luet/pkg/compiler/backend"
. "github.com/mudler/luet/pkg/compiler"
helpers "github.com/mudler/luet/pkg/helpers"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Artifact", func() {
Context("Simple package build definition", func() {
It("Generates a verified delta", func() {
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../tests/fixtures/buildtree")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
Expect(err).ToNot(HaveOccurred())
lspec, ok := spec.(*LuetCompilationSpec)
Expect(ok).To(BeTrue())
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
Expect(lspec.Image).To(Equal("luet/base"))
Expect(lspec.Seed).To(Equal("alpine"))
tmpdir, err := ioutil.TempDir(os.TempDir(), "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
tmpdir2, err := ioutil.TempDir(os.TempDir(), "tree2")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir2) // clean up
unpacked, err := ioutil.TempDir(os.TempDir(), "unpacked")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(unpacked) // clean up
rootfs, err := ioutil.TempDir(os.TempDir(), "rootfs")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(rootfs) // clean up
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM alpine
COPY . /luetbuild
WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin`))
b := NewSimpleDockerBackend()
opts := CompilerBackendOptions{
ImageName: "luet/base",
SourcePath: tmpdir,
DockerFileName: "Dockerfile",
Destination: filepath.Join(tmpdir2, "output1.tar"),
}
Expect(b.ImageDefinitionToTar(opts)).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM luet/base
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin
RUN echo foo > /test
RUN echo bar > /test2`))
opts = CompilerBackendOptions{
ImageName: "test",
SourcePath: tmpdir,
DockerFileName: "LuetDockerfile",
Destination: filepath.Join(tmpdir, "output2.tar"),
}
Expect(b.ImageDefinitionToTar(opts)).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
diffs, err := b.Changes(filepath.Join(tmpdir2, "output1.tar"), filepath.Join(tmpdir, "output2.tar"))
Expect(err).ToNot(HaveOccurred())
Expect(diffs).To(Equal(
[]ArtifactLayer{{
FromImage: filepath.Join(tmpdir2, "output1.tar"),
ToImage: filepath.Join(tmpdir, "output2.tar"),
Diffs: ArtifactDiffs{
Additions: []ArtifactNode{
{Name: "/test", Size: 4},
{Name: "/test2", Size: 4},
},
},
}}))
err = b.ExtractRootfs(CompilerBackendOptions{SourcePath: filepath.Join(tmpdir, "output2.tar"), Destination: rootfs}, false)
Expect(err).ToNot(HaveOccurred())
artifact, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, None)
Expect(err).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue())
err = helpers.Untar(artifact.GetPath(), unpacked, false)
Expect(err).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(unpacked, "test"))).To(BeTrue())
Expect(helpers.Exists(filepath.Join(unpacked, "test2"))).To(BeTrue())
content1, err := helpers.Read(filepath.Join(unpacked, "test"))
Expect(err).ToNot(HaveOccurred())
Expect(content1).To(Equal("foo\n"))
content2, err := helpers.Read(filepath.Join(unpacked, "test2"))
Expect(err).ToNot(HaveOccurred())
Expect(content2).To(Equal("bar\n"))
err = artifact.Hash()
Expect(err).ToNot(HaveOccurred())
err = artifact.Verify()
Expect(err).ToNot(HaveOccurred())
Expect(helpers.CopyFile(filepath.Join(tmpdir, "output2.tar"), filepath.Join(tmpdir, "package.tar"))).ToNot(HaveOccurred())
err = artifact.Verify()
Expect(err).To(HaveOccurred())
})
})
})

41
pkg/compiler/backend.go Normal file
View File

@@ -0,0 +1,41 @@
package compiler
import (
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler/backend"
"github.com/pkg/errors"
)
func NewBackend(ctx *types.Context, s string) (CompilerBackend, error) {
var compilerBackend CompilerBackend
switch s {
case backend.ImgBackend:
compilerBackend = backend.NewSimpleImgBackend(ctx)
case backend.DockerBackend:
compilerBackend = backend.NewSimpleDockerBackend(ctx)
default:
return nil, errors.New("invalid backend. Unsupported")
}
return compilerBackend, nil
}
type CompilerBackend interface {
BuildImage(backend.Options) error
ExportImage(backend.Options) error
LoadImage(string) error
RemoveImage(backend.Options) error
ImageDefinitionToTar(backend.Options) error
CopyImage(string, string) error
DownloadImage(opts backend.Options) error
Push(opts backend.Options) error
ImageAvailable(string) bool
ImageReference(img1 string, ondisk bool) (v1.Image, error)
ImageExists(string) bool
}

View File

@@ -18,15 +18,11 @@ package backend_test
import (
"testing"
. "github.com/mudler/luet/cmd"
config "github.com/mudler/luet/pkg/config"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestSolver(t *testing.T) {
RegisterFailHandler(Fail)
LoadConfig(config.LuetCfg)
RunSpecs(t, "Backend Suite")
}

View File

@@ -0,0 +1,81 @@
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package backend
import (
"os/exec"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/pkg/errors"
)
const (
ImgBackend = "img"
DockerBackend = "docker"
)
func imageAvailable(image string) bool {
_, err := crane.Digest(image)
return err == nil
}
type Options struct {
ImageName string
SourcePath string
DockerFileName string
Destination string
Context string
BackendArgs []string
}
func runCommand(ctx *types.Context, cmd *exec.Cmd) error {
output := ""
buffered := !ctx.Config.GetGeneral().ShowBuildOutput
writer := NewBackendWriter(buffered, ctx)
cmd.Stdout = writer
cmd.Stderr = writer
if buffered {
ctx.Spinner()
defer ctx.SpinnerStop()
}
err := cmd.Start()
if err != nil {
return errors.Wrap(err, "Failed starting command")
}
err = cmd.Wait()
if err != nil {
output = writer.GetCombinedOutput()
return errors.Wrapf(err, "Failed running command: %s", output)
}
return nil
}
func genBuildCommand(opts Options) []string {
context := opts.Context
if context == "" {
context = "."
}
buildarg := append(opts.BackendArgs, "-f", opts.DockerFileName, "-t", opts.ImageName, context)
return append([]string{"build"}, buildarg...)
}

View File

@@ -1,4 +1,4 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
@@ -16,104 +16,205 @@
package backend
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"io"
"os/exec"
"path/filepath"
"strings"
capi "github.com/mudler/docker-companion/api"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/daemon"
"github.com/google/go-containerregistry/pkg/v1/tarball"
bus "github.com/mudler/luet/pkg/api/core/bus"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/pkg/helpers"
. "github.com/mudler/luet/pkg/logger"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/pkg/errors"
)
type SimpleDocker struct{}
type SimpleDocker struct {
ctx *types.Context
}
func NewSimpleDockerBackend() compiler.CompilerBackend {
return &SimpleDocker{}
func NewSimpleDockerBackend(ctx *types.Context) *SimpleDocker {
return &SimpleDocker{ctx: ctx}
}
// TODO: Missing still: labels, and build args expansion
func (*SimpleDocker) BuildImage(opts compiler.CompilerBackendOptions) error {
func (s *SimpleDocker) BuildImage(opts Options) error {
name := opts.ImageName
path := opts.SourcePath
dockerfileName := opts.DockerFileName
buildarg := []string{"build", "-f", dockerfileName, "-t", name, "."}
bus.Manager.Publish(bus.EventImagePreBuild, opts)
Debug(":whale2: Building image " + name)
buildarg := genBuildCommand(opts)
s.ctx.Info(":whale2: Building image " + name)
cmd := exec.Command("docker", buildarg...)
cmd.Dir = path
out, err := cmd.CombinedOutput()
cmd.Dir = opts.SourcePath
err := runCommand(s.ctx, cmd)
if err != nil {
return errors.Wrap(err, "Failed building image: "+string(out))
return err
}
Info(":whale: Building image " + name + " done")
if config.LuetCfg.GetGeneral().ShowBuildOutput {
Info(string(out))
} else {
Debug(string(out))
}
s.ctx.Success(":whale: Building image " + name + " done")
bus.Manager.Publish(bus.EventImagePostBuild, opts)
return nil
}
func (*SimpleDocker) CopyImage(src, dst string) error {
Debug(":whale: Tagging image:", src, "->", dst)
func (s *SimpleDocker) CopyImage(src, dst string) error {
s.ctx.Debug(":whale: Tagging image:", src, "->", dst)
cmd := exec.Command("docker", "tag", src, dst)
out, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed tagging image: "+string(out))
}
Info(":whale: Tagged image:", src, "->", dst)
s.ctx.Success(":whale: Tagged image:", src, "->", dst)
return nil
}
func (*SimpleDocker) DownloadImage(opts compiler.CompilerBackendOptions) error {
func (s *SimpleDocker) LoadImage(path string) error {
s.ctx.Debug(":whale: Loading image:", path)
cmd := exec.Command("docker", "load", "-i", path)
out, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed loading image: "+string(out))
}
s.ctx.Success(":whale: Loaded image:", path)
return nil
}
func (s *SimpleDocker) DownloadImage(opts Options) error {
name := opts.ImageName
bus.Manager.Publish(bus.EventImagePrePull, opts)
buildarg := []string{"pull", name}
Debug(":whale: Downloading image " + name)
s.ctx.Debug(":whale: Downloading image " + name)
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
cmd := exec.Command("docker", buildarg...)
out, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed building image: "+string(out))
return errors.Wrap(err, "Failed pulling image: "+string(out))
}
Info(":whale: Downloaded image:", name)
s.ctx.Success(":whale: Downloaded image:", name)
bus.Manager.Publish(bus.EventImagePostPull, opts)
return nil
}
func (*SimpleDocker) RemoveImage(opts compiler.CompilerBackendOptions) error {
func (s *SimpleDocker) ImageExists(imagename string) bool {
buildarg := []string{"inspect", "--type=image", imagename}
s.ctx.Debug(":whale: Checking existance of docker image: " + imagename)
cmd := exec.Command("docker", buildarg...)
out, err := cmd.CombinedOutput()
if err != nil {
s.ctx.Debug("Image not present")
s.ctx.Debug(string(out))
return false
}
return true
}
func (*SimpleDocker) ImageAvailable(imagename string) bool {
return imageAvailable(imagename)
}
func (s *SimpleDocker) RemoveImage(opts Options) error {
name := opts.ImageName
buildarg := []string{"rmi", name}
out, err := exec.Command("docker", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed removing image: "+string(out))
}
Info(":whale: Removed image:", name)
s.ctx.Success(":whale: Removed image:", name)
//Info(string(out))
return nil
}
func (*SimpleDocker) Push(opts compiler.CompilerBackendOptions) error {
func (s *SimpleDocker) Push(opts Options) error {
name := opts.ImageName
pusharg := []string{"push", name}
bus.Manager.Publish(bus.EventImagePrePush, opts)
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("docker", pusharg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed pushing image: "+string(out))
}
Info(":whale: Pushed image:", name)
s.ctx.Success(":whale: Pushed image:", name)
bus.Manager.Publish(bus.EventImagePostPush, opts)
//Info(string(out))
return nil
}
func (s *SimpleDocker) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) error {
func (s *SimpleDocker) imagefromDaemon(a string) (v1.Image, error) {
ref, err := name.ParseReference(a)
if err != nil {
return nil, err
}
img, err := daemon.Image(ref, daemon.WithUnbufferedOpener())
if err != nil {
return nil, err
}
return img, nil
}
// TODO: Make it possible optionally to use this?
// It might be unsafer, as it relies on the pipe.
// imageFromCLIPipe returns a new image from a tarball by providing a reader from the docker stdout pipe.
// See also daemon.Image implementation below for an example (which returns the tarball stream
// from the HTTP api endpoint instead ).
func (s *SimpleDocker) imageFromCLIPipe(a string) (v1.Image, error) {
return tarball.Image(func() (io.ReadCloser, error) {
buildarg := []string{"save", a}
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
c := exec.Command("docker", buildarg...)
p, err := c.StdoutPipe()
if err != nil {
return nil, err
}
err = c.Start()
if err != nil {
return nil, err
}
go func() { c.Wait() }()
return p, nil
}, nil)
}
func (s *SimpleDocker) imageFromDisk(a string) (v1.Image, error) {
f, err := s.ctx.Config.GetSystem().TempFile("snapshot")
if err != nil {
return nil, err
}
buildarg := []string{"save", a, "-o", f.Name()}
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("docker", buildarg...).CombinedOutput()
if err != nil {
return nil, errors.Wrap(err, "Failed saving image: "+string(out))
}
return crane.Load(f.Name())
}
func (s *SimpleDocker) ImageReference(a string, ondisk bool) (v1.Image, error) {
if ondisk {
return s.imageFromDisk(a)
}
return s.imagefromDaemon(a)
}
func (s *SimpleDocker) ImageDefinitionToTar(opts Options) error {
if err := s.BuildImage(opts); err != nil {
return errors.Wrap(err, "Failed building image")
}
@@ -126,149 +227,25 @@ func (s *SimpleDocker) ImageDefinitionToTar(opts compiler.CompilerBackendOptions
return nil
}
func (*SimpleDocker) ExportImage(opts compiler.CompilerBackendOptions) error {
func (s *SimpleDocker) ExportImage(opts Options) error {
name := opts.ImageName
path := opts.Destination
buildarg := []string{"save", name, "-o", path}
Debug(":whale: Saving image " + name)
s.ctx.Debug(":whale: Saving image " + name)
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("docker", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed exporting image: "+string(out))
}
Info(":whale: Exported image:", name)
s.ctx.Debug(":whale: Exported image:", name)
return nil
}
type ManifestEntry struct {
Layers []string `json:"Layers"`
}
func (*SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
src := opts.SourcePath
dst := opts.Destination
rootfs, err := ioutil.TempDir(dst, "tmprootfs")
if err != nil {
return errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(rootfs) // clean up
// TODO: Following as option if archive as output?
// archive, err := ioutil.TempDir(os.TempDir(), "archive")
// if err != nil {
// return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
// }
// defer os.RemoveAll(archive) // clean up
err = helpers.Untar(src, rootfs, keepPerms)
if err != nil {
return errors.Wrap(err, "Error met while unpacking rootfs")
}
manifest, err := helpers.Read(filepath.Join(rootfs, "manifest.json"))
if err != nil {
return errors.Wrap(err, "Error met while reading image manifest")
}
// Unpack all layers
var manifestData []ManifestEntry
if err := json.Unmarshal([]byte(manifest), &manifestData); err != nil {
return errors.Wrap(err, "Error met while unmarshalling manifest")
}
layers_sha := []string{}
for _, data := range manifestData {
for _, l := range data.Layers {
if strings.Contains(l, "layer.tar") {
layers_sha = append(layers_sha, strings.Replace(l, "/layer.tar", "", -1))
}
}
}
export, err := capi.CreateExport(rootfs)
if err != nil {
return err
}
err = export.UnPackLayers(layers_sha, dst, "")
if err != nil {
return err
}
// err = helpers.Tar(archive, dst)
// if err != nil {
// return nil, errors.Wrap(err, "Error met while creating package archive")
// }
return nil
}
// container-diff diff daemon://luet/base alpine --type=file -j
// [
// {
// "Image1": "luet/base",
// "Image2": "alpine",
// "DiffType": "File",
// "Diff": {
// "Adds": null,
// "Dels": [
// {
// "Name": "/luetbuild",
// "Size": 5830706
// },
// {
// "Name": "/luetbuild/Dockerfile",
// "Size": 50
// },
// {
// "Name": "/luetbuild/output1",
// "Size": 5830656
// }
// ],
// "Mods": null
// }
// }
// ]
// Changes uses container-diff (https://github.com/GoogleContainerTools/container-diff) for retrieving out layer diffs
func (*SimpleDocker) Changes(fromImage, toImage string) ([]compiler.ArtifactLayer, error) {
tmpdiffs, err := ioutil.TempDir(os.TempDir(), "tmpdiffs")
if err != nil {
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
}
defer os.RemoveAll(tmpdiffs) // clean up
diffargs := []string{"diff", fromImage, toImage, "-v", "error", "-q", "--type=file", "-j", "-n", "-c", tmpdiffs}
out, err := exec.Command("container-diff", diffargs...).Output()
if err != nil {
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Failed Resolving layer diffs: "+string(out))
}
if config.LuetCfg.GetGeneral().ShowBuildOutput {
Info(string(out))
}
var diffs []compiler.ArtifactLayer
err = json.Unmarshal(out, &diffs)
if err != nil {
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Failed unmarshalling json response: "+string(out))
}
if config.LuetCfg.GetLogging().Level == "debug" {
summary := compiler.ComputeArtifactLayerSummary(diffs)
for _, l := range summary.Layers {
Debug(fmt.Sprintf("Diff %s -> %s: add %d (%d bytes), del %d (%d bytes), change %d (%d bytes)",
l.FromImage, l.ToImage,
l.AddFiles, l.AddSizes,
l.DelFiles, l.DelSizes,
l.ChangeFiles, l.ChangeSizes))
}
}
return diffs, nil
}

View File

@@ -16,14 +16,16 @@
package backend_test
import (
"github.com/mudler/luet/pkg/api/core/types"
. "github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/compiler/backend"
. "github.com/mudler/luet/pkg/compiler/backend"
fileHelper "github.com/mudler/luet/pkg/helpers/file"
"io/ioutil"
"os"
"path/filepath"
helpers "github.com/mudler/luet/pkg/helpers"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo"
@@ -32,6 +34,7 @@ import (
var _ = Describe("Docker backend", func() {
Context("Simple Docker backend satisfies main interface functionalities", func() {
ctx := types.NewContext()
It("Builds and generate tars", func() {
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
@@ -40,13 +43,10 @@ var _ = Describe("Docker backend", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase(), NewDefaultCompilerOptions())
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
cc := NewLuetCompiler(nil, generalRecipe.GetDatabase())
lspec, err := cc.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
Expect(err).ToNot(HaveOccurred())
lspec, ok := spec.(*LuetCompilationSpec)
Expect(ok).To(BeTrue())
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
Expect(lspec.Image).To(Equal("luet/base"))
Expect(lspec.Seed).To(Equal("alpine"))
@@ -60,7 +60,7 @@ var _ = Describe("Docker backend", func() {
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
dockerfile, err := fileHelper.Read(filepath.Join(tmpdir, "Dockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM alpine
@@ -69,50 +69,49 @@ WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin`))
b := NewSimpleDockerBackend()
opts := CompilerBackendOptions{
b := NewSimpleDockerBackend(ctx)
opts := backend.Options{
ImageName: "luet/base",
SourcePath: tmpdir,
DockerFileName: "Dockerfile",
Destination: filepath.Join(tmpdir2, "output1.tar"),
}
Expect(b.ImageDefinitionToTar(opts)).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
dockerfile, err = helpers.Read(filepath.Join(tmpdir, "LuetDockerfile"))
dockerfile, err = fileHelper.Read(filepath.Join(tmpdir, "LuetDockerfile"))
Expect(err).ToNot(HaveOccurred())
Expect(dockerfile).To(Equal(`
FROM luet/base
COPY . /luetbuild
WORKDIR /luetbuild
ENV PACKAGE_NAME=enman
ENV PACKAGE_VERSION=1.4.0
ENV PACKAGE_CATEGORY=app-admin
RUN echo foo > /test
RUN echo bar > /test2`))
opts = CompilerBackendOptions{
opts2 := backend.Options{
ImageName: "test",
SourcePath: tmpdir,
DockerFileName: "LuetDockerfile",
Destination: filepath.Join(tmpdir, "output2.tar"),
}
Expect(b.ImageDefinitionToTar(opts)).ToNot(HaveOccurred())
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
Expect(b.Changes(filepath.Join(tmpdir2, "output1.tar"), filepath.Join(tmpdir, "output2.tar"))).To(Equal(
[]ArtifactLayer{{
FromImage: filepath.Join(tmpdir2, "output1.tar"),
ToImage: filepath.Join(tmpdir, "output2.tar"),
Diffs: ArtifactDiffs{
Additions: []ArtifactNode{
{Name: "/test", Size: 4},
{Name: "/test2", Size: 4},
},
},
}}))
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
})
It("Detects available images", func() {
b := NewSimpleDockerBackend(ctx)
Expect(b.ImageAvailable("quay.io/mocaccino/extra")).To(BeTrue())
Expect(b.ImageAvailable("ubuntu:20.10")).To(BeTrue())
Expect(b.ImageAvailable("igjo5ijgo25nho52")).To(BeFalse())
})
})
})

View File

@@ -16,88 +16,142 @@
package backend
import (
"os"
"os/exec"
"strings"
"github.com/mudler/luet/pkg/compiler"
. "github.com/mudler/luet/pkg/logger"
"github.com/google/go-containerregistry/pkg/crane"
v1 "github.com/google/go-containerregistry/pkg/v1"
bus "github.com/mudler/luet/pkg/api/core/bus"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/pkg/errors"
)
type SimpleImg struct{}
type SimpleImg struct {
ctx *types.Context
}
func NewSimpleImgBackend() compiler.CompilerBackend {
return &SimpleImg{}
func NewSimpleImgBackend(ctx *types.Context) *SimpleImg {
return &SimpleImg{ctx: ctx}
}
func (s *SimpleImg) LoadImage(string) error {
return errors.New("Not supported")
}
// TODO: Missing still: labels, and build args expansion
func (*SimpleImg) BuildImage(opts compiler.CompilerBackendOptions) error {
func (s *SimpleImg) BuildImage(opts Options) error {
name := opts.ImageName
path := opts.SourcePath
dockerfileName := opts.DockerFileName
bus.Manager.Publish(bus.EventImagePreBuild, opts)
buildarg := genBuildCommand(opts)
s.ctx.Info(":tea: Building image " + name)
buildarg := []string{"build", "-f", dockerfileName, "-t", name, "."}
Spinner(22)
defer SpinnerStop()
Debug(":tea: Building image " + name)
cmd := exec.Command("img", buildarg...)
cmd.Dir = path
out, err := cmd.CombinedOutput()
cmd.Dir = opts.SourcePath
err := runCommand(s.ctx, cmd)
if err != nil {
return errors.Wrap(err, "Failed building image: "+string(out))
return err
}
Info(":tea: Building image " + name + " done")
bus.Manager.Publish(bus.EventImagePostBuild, opts)
s.ctx.Info(":tea: Building image " + name + " done")
return nil
}
func (*SimpleImg) RemoveImage(opts compiler.CompilerBackendOptions) error {
func (s *SimpleImg) RemoveImage(opts Options) error {
name := opts.ImageName
buildarg := []string{"rm", name}
Spinner(22)
defer SpinnerStop()
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("img", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed building image: "+string(out))
return errors.Wrap(err, "Failed removing image: "+string(out))
}
Info(":tea: Image " + name + " removed")
s.ctx.Info(":tea: Image " + name + " removed")
return nil
}
func (*SimpleImg) DownloadImage(opts compiler.CompilerBackendOptions) error {
func (s *SimpleImg) ImageReference(a string, ondisk bool) (v1.Image, error) {
f, err := s.ctx.Config.GetSystem().TempFile("snapshot")
if err != nil {
return nil, err
}
buildarg := []string{"save", a, "-o", f.Name()}
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("img", buildarg...).CombinedOutput()
if err != nil {
return nil, errors.Wrap(err, "Failed saving image: "+string(out))
}
img, err := crane.Load(f.Name())
if err != nil {
return nil, err
}
return img, nil
}
func (s *SimpleImg) DownloadImage(opts Options) error {
name := opts.ImageName
buildarg := []string{"pull", name}
bus.Manager.Publish(bus.EventImagePrePull, opts)
buildarg := []string{"pull", name}
s.ctx.Debug(":tea: Downloading image " + name)
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
Debug(":tea: Downloading image " + name)
cmd := exec.Command("img", buildarg...)
out, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed building image: "+string(out))
return errors.Wrap(err, "Failed downloading image: "+string(out))
}
Info(":tea: Image " + name + " downloaded")
s.ctx.Info(":tea: Image " + name + " downloaded")
bus.Manager.Publish(bus.EventImagePostPull, opts)
return nil
}
func (*SimpleImg) CopyImage(src, dst string) error {
Spinner(22)
defer SpinnerStop()
func (s *SimpleImg) CopyImage(src, dst string) error {
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
Debug(":tea: Tagging image", src, dst)
s.ctx.Debug(":tea: Tagging image", src, dst)
cmd := exec.Command("img", "tag", src, dst)
out, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed tagging image: "+string(out))
}
Info(":tea: Image " + dst + " tagged")
s.ctx.Info(":tea: Image " + dst + " tagged")
return nil
}
func (s *SimpleImg) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) error {
func (s *SimpleImg) ImageAvailable(imagename string) bool {
return imageAvailable(imagename)
}
// ImageExists check if the given image is available locally
func (*SimpleImg) ImageExists(imagename string) bool {
cmd := exec.Command("img", "ls")
out, err := cmd.Output()
if err != nil {
return false
}
if strings.Contains(string(out), imagename) {
return true
}
return false
}
func (s *SimpleImg) ImageDefinitionToTar(opts Options) error {
if err := s.BuildImage(opts); err != nil {
return errors.Wrap(err, "Failed building image")
}
@@ -110,50 +164,35 @@ func (s *SimpleImg) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) e
return nil
}
func (*SimpleImg) ExportImage(opts compiler.CompilerBackendOptions) error {
func (s *SimpleImg) ExportImage(opts Options) error {
name := opts.ImageName
path := opts.Destination
buildarg := []string{"save", "-o", path, name}
Debug(":tea: Saving image " + name)
s.ctx.Debug(":tea: Saving image " + name)
s.ctx.Spinner()
defer s.ctx.SpinnerStop()
out, err := exec.Command("img", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed building image: "+string(out))
return errors.Wrap(err, "Failed exporting image: "+string(out))
}
Info(":tea: Image " + name + " saved")
s.ctx.Info(":tea: Image " + name + " saved")
return nil
}
// TODO: Dup in docker, refactor common code in helpers for shared parts
func (*SimpleImg) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
func (s *SimpleImg) Push(opts Options) error {
name := opts.ImageName
path := opts.Destination
bus.Manager.Publish(bus.EventImagePrePush, opts)
os.RemoveAll(path)
buildarg := []string{"unpack", "-o", path, name}
Debug(":tea: Extracting image " + name)
out, err := exec.Command("img", buildarg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed extracting image: "+string(out))
}
Info(":tea: Image " + name + " extracted")
return nil
//return NewSimpleDockerBackend().ExtractRootfs(opts, keepPerms)
}
// TODO: Use container-diff (https://github.com/GoogleContainerTools/container-diff) for checking out layer diffs
// Changes uses container-diff (https://github.com/GoogleContainerTools/container-diff) for retrieving out layer diffs
func (*SimpleImg) Changes(fromImage, toImage string) ([]compiler.ArtifactLayer, error) {
return NewSimpleDockerBackend().Changes(fromImage, toImage)
}
func (*SimpleImg) Push(opts compiler.CompilerBackendOptions) error {
name := opts.ImageName
pusharg := []string{"push", name}
out, err := exec.Command("img", pusharg...).CombinedOutput()
if err != nil {
return errors.Wrap(err, "Failed pushing image: "+string(out))
}
Info(":tea: Pushed image:", name)
//Info(string(out))
s.ctx.Info(":tea: Pushed image:", name)
bus.Manager.Publish(bus.EventImagePostPush, opts)
//s.ctx.Info(string(out))
return nil
}

View File

@@ -0,0 +1,50 @@
// Copyright © 2021 Daniele Rondina <geaaru@sabayonlinux.org>
// Ettore Di Giacinto <mudler@sabayonlinux.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package backend
import (
"bytes"
"github.com/mudler/luet/pkg/api/core/types"
)
type BackendWriter struct {
BufferedOutput bool
Buffer *bytes.Buffer
ctx *types.Context
}
func NewBackendWriter(buffered bool, ctx *types.Context) *BackendWriter {
return &BackendWriter{
BufferedOutput: buffered,
Buffer: &bytes.Buffer{},
ctx: ctx,
}
}
func (b *BackendWriter) Write(p []byte) (int, error) {
if b.BufferedOutput {
return b.Buffer.Write(p)
}
b.ctx.Msg("info", false, (string(p)))
return len(p), nil
}
func (b *BackendWriter) Close() error { return nil }
func (b *BackendWriter) GetCombinedOutput() string { return b.Buffer.String() }

140
pkg/compiler/buildtree.go Normal file
View File

@@ -0,0 +1,140 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler
import (
"encoding/json"
"sort"
)
func rankMapStringInt(values map[string]int) []string {
type kv struct {
Key string
Value int
}
var ss []kv
for k, v := range values {
ss = append(ss, kv{k, v})
}
sort.Slice(ss, func(i, j int) bool {
return ss[i].Value > ss[j].Value
})
ranked := make([]string, len(values))
for i, kv := range ss {
ranked[i] = kv.Key
}
return ranked
}
type BuildTree struct {
order map[string]int
}
func (bt *BuildTree) Increase(s string) {
if bt.order == nil {
bt.order = make(map[string]int)
}
if _, ok := bt.order[s]; !ok {
bt.order[s] = 0
}
bt.order[s]++
}
func (bt *BuildTree) Reset(s string) {
if bt.order == nil {
bt.order = make(map[string]int)
}
bt.order[s] = 0
}
func (bt *BuildTree) Level(s string) int {
return bt.order[s]
}
func ints(input []int) []int {
u := make([]int, 0, len(input))
m := make(map[int]bool)
for _, val := range input {
if _, ok := m[val]; !ok {
m[val] = true
u = append(u, val)
}
}
return u
}
func (bt *BuildTree) AllInLevel(l int) []string {
var all []string
for k, v := range bt.order {
if v == l {
all = append(all, k)
}
}
return all
}
func (bt *BuildTree) Order(compilationTree map[string]map[string]interface{}) {
sentinel := false
for !sentinel {
sentinel = true
LEVEL:
for _, l := range bt.AllLevels() {
for _, j := range bt.AllInLevel(l) {
for _, k := range bt.AllInLevel(l) {
if j == k {
continue
}
if _, ok := compilationTree[j][k]; ok {
if bt.Level(j) == bt.Level(k) {
bt.Increase(j)
sentinel = false
break LEVEL
}
}
}
}
}
}
}
func (bt *BuildTree) AllLevels() []int {
var all []int
for _, v := range bt.order {
all = append(all, v)
}
sort.Sort(sort.IntSlice(all))
return ints(all)
}
func (bt *BuildTree) JSON() (string, error) {
type buildjob struct {
Jobs []string `json:"packages"`
}
result := []buildjob{}
for _, l := range bt.AllLevels() {
result = append(result, buildjob{Jobs: bt.AllInLevel(l)})
}
dat, err := json.Marshal(&result)
return string(dat), err
}

Some files were not shown because too many files have changed in this diff Show More