mirror of
https://github.com/mudler/luet.git
synced 2025-09-03 08:14:46 +00:00
Compare commits
42 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
6f623ae016 | ||
|
bd80f9acd2 | ||
|
045d25bb28 | ||
|
908b6d2bd4 | ||
|
a3ada624a7 | ||
|
09c7609a7f | ||
|
a1acab0e52 | ||
|
93187182e5 | ||
|
5e7cd183be | ||
|
9c0f0e3457 | ||
|
1120b1ee59 | ||
|
4010033e0c | ||
|
a076613f66 | ||
|
c184b4b3bc | ||
|
40d1f1785b | ||
|
11944f4b8c | ||
|
6f41f8bd8d | ||
|
95b125cb91 | ||
|
f676b50735 | ||
|
0c0401847e | ||
|
02a506a5c5 | ||
|
6f0b657e69 | ||
|
51378bdfb6 | ||
|
66513955c7 | ||
|
694d8656d9 | ||
|
c339e0fed2 | ||
|
e30bb056d5 | ||
|
052a551c0c | ||
|
ffa6fc3829 | ||
|
07a1058ac1 | ||
|
3af9109b99 | ||
|
6e3650d3af | ||
|
5dcf77c987 | ||
|
ee0e70ed3d | ||
|
364b5648b4 | ||
|
e28a4753f8 | ||
|
d1d7f5aa74 | ||
|
e2260b6956 | ||
|
764a09ce0c | ||
|
910f1ad3fe | ||
|
16e9d7b20c | ||
|
6088664887 |
23
.github/workflows/release.yml
vendored
Normal file
23
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
on: push
|
||||
name: Build and release on push
|
||||
jobs:
|
||||
release:
|
||||
name: Test and Release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make deps multiarch-build test-integration test-coverage
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build && sudo chmod -R 777 release/
|
||||
- name: Release
|
||||
uses: fnkr/github-action-ghr@v1
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
env:
|
||||
GHR_PATH: release/
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
21
.github/workflows/test.yml
vendored
Normal file
21
.github/workflows/test.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
|
||||
on: pull_request
|
||||
name: Build and Test
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x]
|
||||
platform: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make deps multiarch-build test-integration test-coverage
|
20
.travis.yml
20
.travis.yml
@@ -6,14 +6,14 @@ go:
|
||||
env:
|
||||
- "GO15VENDOREXPERIMENT=1"
|
||||
before_install:
|
||||
- make deps
|
||||
- curl -LO https://storage.googleapis.com/container-diff/latest/container-diff-linux-amd64 && chmod +x container-diff-linux-amd64 && mkdir -p $HOME/bin && export PATH=$PATH:$HOME/bin && mv container-diff-linux-amd64 $HOME/bin/container-diff
|
||||
- sudo -E env "PATH=$PATH" apt-get install -y libcap2-bin
|
||||
- sudo -E env "PATH=$PATH" make deps
|
||||
script:
|
||||
- make multiarch-build test-integration test-coverage
|
||||
after_success:
|
||||
- |
|
||||
if [ -n "$TRAVIS_TAG" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]; then
|
||||
git config --global user.name "Deployer" && git config --global user.email foo@bar.com
|
||||
go get github.com/tcnksm/ghr
|
||||
ghr -u mudler -r luet --replace $TRAVIS_TAG release/
|
||||
fi
|
||||
- sudo -E env "PATH=$PATH" make multiarch-build test-integration test-coverage
|
||||
#after_success:
|
||||
# - |
|
||||
# if [ -n "$TRAVIS_TAG" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]; then
|
||||
# sudo -E env "PATH=$PATH" git config --global user.name "Deployer" && git config --global user.email foo@bar.com
|
||||
# sudo -E env "PATH=$PATH" go get github.com/tcnksm/ghr
|
||||
# sudo -E env "PATH=$PATH" ghr -u mudler -r luet --replace $TRAVIS_TAG release/
|
||||
# fi
|
||||
|
6
Makefile
6
Makefile
@@ -74,10 +74,6 @@ build-small:
|
||||
image:
|
||||
docker build --rm -t luet/base .
|
||||
|
||||
.PHONY: gox-build
|
||||
gox-build:
|
||||
CGO_ENABLED=0 gox $(BUILD_PLATFORMS) -ldflags '$(LDFLAGS)' -output="release/$(NAME)-$(VERSION)-{{.OS}}-{{.Arch}}"
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
golint ./... | grep -v "be unexported"
|
||||
@@ -94,4 +90,4 @@ test-docker:
|
||||
|
||||
.PHONY: multiarch-build
|
||||
multiarch-build:
|
||||
gox $(BUILD_PLATFORMS) -ldflags '$(LDFLAGS)' -output="release/$(NAME)-$(VERSION)-{{.OS}}-{{.Arch}}"
|
||||
CGO_ENABLED=0 gox $(BUILD_PLATFORMS) -ldflags '$(LDFLAGS)' -output="release/$(NAME)-$(VERSION)-{{.OS}}-{{.Arch}}"
|
||||
|
@@ -79,6 +79,7 @@ var buildCmd = &cobra.Command{
|
||||
onlydeps := viper.GetBool("onlydeps")
|
||||
keepExportedImages := viper.GetBool("keep-exported-images")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
skip, _ := cmd.Flags().GetBool("skip-if-metadata-exists")
|
||||
|
||||
compilerSpecs := compiler.NewLuetCompilationspecs()
|
||||
var compilerBackend compiler.CompilerBackend
|
||||
@@ -143,6 +144,7 @@ var buildCmd = &cobra.Command{
|
||||
opts.OnlyDeps = onlydeps
|
||||
opts.NoDeps = nodeps
|
||||
opts.KeepImageExport = keepExportedImages
|
||||
opts.SkipIfMetadataExists = skip
|
||||
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(), opts)
|
||||
luetCompiler.SetConcurrency(concurrency)
|
||||
@@ -231,6 +233,7 @@ func init() {
|
||||
buildCmd.Flags().Bool("nodeps", false, "Build only the target packages, skipping deps (it works only if you already built the deps locally, or by using --pull) ")
|
||||
buildCmd.Flags().Bool("onlydeps", false, "Build only package dependencies")
|
||||
buildCmd.Flags().Bool("keep-exported-images", false, "Keep exported images used during building")
|
||||
buildCmd.Flags().Bool("skip-if-metadata-exists", false, "Skip package if metadata exists")
|
||||
|
||||
buildCmd.Flags().String("solver-type", "", "Solver strategy")
|
||||
buildCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
||||
|
11
cmd/root.go
11
cmd/root.go
@@ -38,7 +38,7 @@ var Verbose bool
|
||||
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
||||
|
||||
const (
|
||||
LuetCLIVersion = "0.8.0"
|
||||
LuetCLIVersion = "0.8.5"
|
||||
LuetEnvPrefix = "LUET"
|
||||
)
|
||||
|
||||
@@ -88,8 +88,12 @@ func LoadConfig(c *config.LuetConfig) error {
|
||||
return err
|
||||
}
|
||||
|
||||
noSpinner := c.Viper.GetBool("no_spinner")
|
||||
|
||||
InitAurora()
|
||||
NewSpinner()
|
||||
if !noSpinner {
|
||||
NewSpinner()
|
||||
}
|
||||
|
||||
Debug("Using config file:", c.Viper.ConfigFileUsed())
|
||||
|
||||
@@ -145,6 +149,7 @@ func init() {
|
||||
pflags.BoolP("debug", "d", false, "verbose output")
|
||||
pflags.Bool("fatal", false, "Enables Warnings to exit")
|
||||
pflags.Bool("enable-logfile", false, "Enable log to file")
|
||||
pflags.Bool("no-spinner", false, "Disable spinner.")
|
||||
pflags.Bool("color", config.LuetCfg.GetLogging().Color, "Enable/Disable color.")
|
||||
pflags.Bool("emoji", config.LuetCfg.GetLogging().EnableEmoji, "Enable/Disable emoji.")
|
||||
pflags.StringP("logfile", "l", config.LuetCfg.GetLogging().Path,
|
||||
@@ -168,6 +173,8 @@ func init() {
|
||||
config.LuetCfg.Viper.BindPFlag("general.debug", pflags.Lookup("debug"))
|
||||
config.LuetCfg.Viper.BindPFlag("general.fatal_warnings", pflags.Lookup("fatal"))
|
||||
config.LuetCfg.Viper.BindPFlag("general.same_owner", pflags.Lookup("same-owner"))
|
||||
// Currently I maintain this only from cli.
|
||||
config.LuetCfg.Viper.BindPFlag("no_spinner", pflags.Lookup("no-spinner"))
|
||||
|
||||
// Extensions must be binary with the "luet-" prefix to be able to be shown in the help.
|
||||
// we also accept extensions in the relative path where luet is being started, "extensions/"
|
||||
|
@@ -32,6 +32,7 @@ type PackageResult struct {
|
||||
Category string `json:"category"`
|
||||
Version string `json:"version"`
|
||||
Repository string `json:"repository"`
|
||||
Hidden bool `json:"hidden"`
|
||||
}
|
||||
|
||||
type Results struct {
|
||||
@@ -58,6 +59,9 @@ var searchCmd = &cobra.Command{
|
||||
if len(args) != 1 {
|
||||
Fatal("Wrong number of arguments (expected 1)")
|
||||
}
|
||||
|
||||
hidden, _ := cmd.Flags().GetBool("hidden")
|
||||
|
||||
installed := LuetCfg.Viper.GetBool("installed")
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
@@ -114,25 +118,31 @@ var searchCmd = &cobra.Command{
|
||||
}
|
||||
for _, m := range matches {
|
||||
if !revdeps {
|
||||
Info(fmt.Sprintf(":file_folder:%s", m.Repo.GetName()), fmt.Sprintf(":package:%s", m.Package.HumanReadableString()))
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: m.Package.GetName(),
|
||||
Version: m.Package.GetVersion(),
|
||||
Category: m.Package.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
})
|
||||
if !m.Package.IsHidden() || m.Package.IsHidden() && hidden {
|
||||
Info(fmt.Sprintf(":file_folder:%s", m.Repo.GetName()), fmt.Sprintf(":package:%s", m.Package.HumanReadableString()))
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: m.Package.GetName(),
|
||||
Version: m.Package.GetVersion(),
|
||||
Category: m.Package.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: m.Package.IsHidden(),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
visited := make(map[string]interface{})
|
||||
for _, revdep := range m.Package.ExpandedRevdeps(m.Repo.GetTree().GetDatabase(), visited) {
|
||||
Info(fmt.Sprintf(":file_folder:%s", m.Repo.GetName()), fmt.Sprintf(":package:%s", revdep.HumanReadableString()))
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
})
|
||||
if !revdep.IsHidden() || revdep.IsHidden() && hidden {
|
||||
Info(fmt.Sprintf(":file_folder:%s", m.Repo.GetName()), fmt.Sprintf(":package:%s", revdep.HumanReadableString()))
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: m.Repo.GetName(),
|
||||
Hidden: revdep.IsHidden(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -162,26 +172,32 @@ var searchCmd = &cobra.Command{
|
||||
|
||||
for _, pack := range iMatches {
|
||||
if !revdeps {
|
||||
Info(fmt.Sprintf(":package:%s", pack.HumanReadableString()))
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: pack.GetName(),
|
||||
Version: pack.GetVersion(),
|
||||
Category: pack.GetCategory(),
|
||||
Repository: "system",
|
||||
})
|
||||
if !pack.IsHidden() || pack.IsHidden() && hidden {
|
||||
Info(fmt.Sprintf(":package:%s", pack.HumanReadableString()))
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: pack.GetName(),
|
||||
Version: pack.GetVersion(),
|
||||
Category: pack.GetCategory(),
|
||||
Repository: "system",
|
||||
Hidden: pack.IsHidden(),
|
||||
})
|
||||
}
|
||||
} else {
|
||||
visited := make(map[string]interface{})
|
||||
|
||||
for _, revdep := range pack.ExpandedRevdeps(system.Database, visited) {
|
||||
Info(fmt.Sprintf(":package:%s", pack.HumanReadableString()))
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: "system",
|
||||
})
|
||||
if !revdep.IsHidden() || revdep.IsHidden() && hidden {
|
||||
Info(fmt.Sprintf(":package:%s", pack.HumanReadableString()))
|
||||
results.Packages = append(results.Packages,
|
||||
PackageResult{
|
||||
Name: revdep.GetName(),
|
||||
Version: revdep.GetVersion(),
|
||||
Category: revdep.GetCategory(),
|
||||
Repository: "system",
|
||||
Hidden: revdep.IsHidden(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -223,5 +239,7 @@ func init() {
|
||||
searchCmd.Flags().Bool("by-label", false, "Search packages through label")
|
||||
searchCmd.Flags().Bool("by-label-regex", false, "Search packages through label regex")
|
||||
searchCmd.Flags().Bool("revdeps", false, "Search package reverse dependencies")
|
||||
searchCmd.Flags().Bool("hidden", false, "Include hidden packages")
|
||||
|
||||
RootCmd.AddCommand(searchCmd)
|
||||
}
|
||||
|
@@ -18,12 +18,11 @@ package cmd_tree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
//"os"
|
||||
//"sort"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
spectooling "github.com/mudler/luet/pkg/spectooling"
|
||||
tree "github.com/mudler/luet/pkg/tree"
|
||||
version "github.com/mudler/luet/pkg/versioner"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -43,17 +42,26 @@ func NewTreeBumpCommand() *cobra.Command {
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
spec, _ := cmd.Flags().GetString("definition-file")
|
||||
toStdout, _ := cmd.Flags().GetBool("to-stdout")
|
||||
pkgVersion, _ := cmd.Flags().GetString("pkg-version")
|
||||
pack, err := tree.ReadDefinitionFile(spec)
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
|
||||
// Retrieve version build section with Gentoo parser
|
||||
err = pack.BumpBuildVersion()
|
||||
if err != nil {
|
||||
Fatal("Error on increment build version: " + err.Error())
|
||||
if pkgVersion != "" {
|
||||
validator := &version.WrappedVersioner{}
|
||||
err := validator.Validate(pkgVersion)
|
||||
if err != nil {
|
||||
Fatal("Invalid version string: " + err.Error())
|
||||
}
|
||||
pack.SetVersion(pkgVersion)
|
||||
} else {
|
||||
// Retrieve version build section with Gentoo parser
|
||||
err = pack.BumpBuildVersion()
|
||||
if err != nil {
|
||||
Fatal("Error on increment build version: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if toStdout {
|
||||
data, err := spectooling.NewDefaultPackageSanitized(&pack).Yaml()
|
||||
if err != nil {
|
||||
@@ -72,6 +80,7 @@ func NewTreeBumpCommand() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
ans.Flags().StringP("pkg-version", "p", "", "Set a specific package version")
|
||||
ans.Flags().StringP("definition-file", "f", "", "Path of the definition to bump.")
|
||||
ans.Flags().BoolP("to-stdout", "o", false, "Bump package to output.")
|
||||
|
||||
|
@@ -26,6 +26,7 @@ import (
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
tree "github.com/mudler/luet/pkg/tree"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -74,15 +75,24 @@ func NewTreePkglistCommand() *cobra.Command {
|
||||
if len(t) == 0 {
|
||||
Fatal("Mandatory tree param missing.")
|
||||
}
|
||||
|
||||
revdeps, _ := cmd.Flags().GetBool("revdeps")
|
||||
deps, _ := cmd.Flags().GetBool("deps")
|
||||
if revdeps && deps {
|
||||
Fatal("Both revdeps and deps option used. Choice only one.")
|
||||
}
|
||||
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var results TreeResults
|
||||
var depSolver solver.PackageSolver
|
||||
|
||||
treePath, _ := cmd.Flags().GetStringArray("tree")
|
||||
verbose, _ := cmd.Flags().GetBool("verbose")
|
||||
buildtime, _ := cmd.Flags().GetBool("buildtime")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
revdeps, _ := cmd.Flags().GetBool("revdeps")
|
||||
deps, _ := cmd.Flags().GetBool("deps")
|
||||
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
if out != "terminal" {
|
||||
@@ -95,6 +105,7 @@ func NewTreePkglistCommand() *cobra.Command {
|
||||
} else {
|
||||
reciper = tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
}
|
||||
|
||||
for _, t := range treePath {
|
||||
err := reciper.Load(t)
|
||||
if err != nil {
|
||||
@@ -102,6 +113,15 @@ func NewTreePkglistCommand() *cobra.Command {
|
||||
}
|
||||
}
|
||||
|
||||
if deps {
|
||||
emptyInstallationDb := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
depSolver = solver.NewSolver(pkg.NewInMemoryDatabase(false),
|
||||
reciper.GetDatabase(),
|
||||
emptyInstallationDb)
|
||||
|
||||
}
|
||||
|
||||
regExcludes, err := helpers.CreateRegexArray(excludes)
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
@@ -146,15 +166,8 @@ func NewTreePkglistCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
if addPkg {
|
||||
if !revdeps {
|
||||
plist = append(plist, pkgstr)
|
||||
results.Packages = append(results.Packages, TreePackageResult{
|
||||
Name: p.GetName(),
|
||||
Version: p.GetVersion(),
|
||||
Category: p.GetCategory(),
|
||||
Path: p.GetPath(),
|
||||
})
|
||||
} else {
|
||||
if revdeps {
|
||||
|
||||
visited := make(map[string]interface{})
|
||||
for _, revdep := range p.ExpandedRevdeps(reciper.GetDatabase(), visited) {
|
||||
if full {
|
||||
@@ -172,7 +185,58 @@ func NewTreePkglistCommand() *cobra.Command {
|
||||
Path: revdep.GetPath(),
|
||||
})
|
||||
}
|
||||
} else if deps {
|
||||
|
||||
Spinner(32)
|
||||
solution, err := depSolver.Install(pkg.Packages{p})
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
ass := solution.SearchByName(p.GetPackageName())
|
||||
solution, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
for _, pa := range solution {
|
||||
|
||||
if pa.Value {
|
||||
// Exclude itself
|
||||
if pa.Package.GetName() == p.GetName() && pa.Package.GetCategory() == p.GetCategory() {
|
||||
continue
|
||||
}
|
||||
|
||||
if full {
|
||||
pkgstr = pkgDetail(pa.Package)
|
||||
} else if verbose {
|
||||
pkgstr = pa.Package.HumanReadableString()
|
||||
} else {
|
||||
pkgstr = fmt.Sprintf("%s/%s", pa.Package.GetCategory(), pa.Package.GetName())
|
||||
}
|
||||
plist = append(plist, pkgstr)
|
||||
results.Packages = append(results.Packages, TreePackageResult{
|
||||
Name: pa.Package.GetName(),
|
||||
Version: pa.Package.GetVersion(),
|
||||
Category: pa.Package.GetCategory(),
|
||||
Path: pa.Package.GetPath(),
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
plist = append(plist, pkgstr)
|
||||
results.Packages = append(results.Packages, TreePackageResult{
|
||||
Name: p.GetName(),
|
||||
Version: p.GetVersion(),
|
||||
Category: p.GetCategory(),
|
||||
Path: p.GetPath(),
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -192,7 +256,9 @@ func NewTreePkglistCommand() *cobra.Command {
|
||||
}
|
||||
fmt.Println(string(j2))
|
||||
default:
|
||||
sort.Strings(plist)
|
||||
if !deps {
|
||||
sort.Strings(plist)
|
||||
}
|
||||
for _, p := range plist {
|
||||
fmt.Println(p)
|
||||
}
|
||||
@@ -204,6 +270,7 @@ func NewTreePkglistCommand() *cobra.Command {
|
||||
ans.Flags().BoolP("buildtime", "b", false, "Build time match")
|
||||
ans.Flags().StringP("output", "o", "terminal", "Output format ( Defaults: terminal, available: json,yaml )")
|
||||
ans.Flags().Bool("revdeps", false, "Search package reverse dependencies")
|
||||
ans.Flags().Bool("deps", false, "Search package dependencies")
|
||||
|
||||
ans.Flags().BoolP("verbose", "v", false, "Add package version")
|
||||
ans.Flags().BoolP("full", "f", false, "Show package detail")
|
||||
|
@@ -35,248 +35,398 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func validateWorker(i int,
|
||||
wg *sync.WaitGroup,
|
||||
c <-chan pkg.Package,
|
||||
reciper tree.Builder,
|
||||
withSolver bool,
|
||||
regExcludes, regMatches []*regexp.Regexp,
|
||||
excludes, matches []string,
|
||||
errs chan error) {
|
||||
type ValidateOpts struct {
|
||||
WithSolver bool
|
||||
OnlyRuntime bool
|
||||
OnlyBuildtime bool
|
||||
RegExcludes []*regexp.Regexp
|
||||
RegMatches []*regexp.Regexp
|
||||
Excludes []string
|
||||
Matches []string
|
||||
|
||||
defer wg.Done()
|
||||
// Runtime validate stuff
|
||||
RuntimeCacheDeps *pkg.InMemoryDatabase
|
||||
RuntimeReciper *tree.InstallerRecipe
|
||||
|
||||
// Buildtime validate stuff
|
||||
BuildtimeCacheDeps *pkg.InMemoryDatabase
|
||||
BuildtimeReciper *tree.CompilerRecipe
|
||||
|
||||
Mutex sync.Mutex
|
||||
BrokenPkgs int
|
||||
BrokenDeps int
|
||||
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func (o *ValidateOpts) IncrBrokenPkgs() {
|
||||
o.Mutex.Lock()
|
||||
defer o.Mutex.Unlock()
|
||||
o.BrokenPkgs++
|
||||
}
|
||||
|
||||
func (o *ValidateOpts) IncrBrokenDeps() {
|
||||
o.Mutex.Lock()
|
||||
defer o.Mutex.Unlock()
|
||||
o.BrokenDeps++
|
||||
}
|
||||
|
||||
func (o *ValidateOpts) AddError(err error) {
|
||||
o.Mutex.Lock()
|
||||
defer o.Mutex.Unlock()
|
||||
o.Errors = append(o.Errors, err)
|
||||
}
|
||||
|
||||
func validatePackage(p pkg.Package, checkType string, opts *ValidateOpts, reciper tree.Builder, cacheDeps *pkg.InMemoryDatabase) error {
|
||||
var errstr string
|
||||
var ans error
|
||||
|
||||
var depSolver solver.PackageSolver
|
||||
var cacheDeps *pkg.InMemoryDatabase
|
||||
brokenPkgs := 0
|
||||
brokenDeps := 0
|
||||
var errstr string
|
||||
|
||||
emptyInstallationDb := pkg.NewInMemoryDatabase(false)
|
||||
if withSolver {
|
||||
if opts.WithSolver {
|
||||
emptyInstallationDb := pkg.NewInMemoryDatabase(false)
|
||||
depSolver = solver.NewSolver(pkg.NewInMemoryDatabase(false),
|
||||
reciper.GetDatabase(),
|
||||
emptyInstallationDb)
|
||||
|
||||
// Use Singleton in memory cache for speedup dependencies
|
||||
// analysis
|
||||
cacheDeps = pkg.NewInMemoryDatabase(true).(*pkg.InMemoryDatabase)
|
||||
}
|
||||
|
||||
for p := range c {
|
||||
found, err := reciper.GetDatabase().FindPackages(
|
||||
&pkg.DefaultPackage{
|
||||
Name: p.GetName(),
|
||||
Category: p.GetCategory(),
|
||||
Version: ">=0",
|
||||
},
|
||||
)
|
||||
|
||||
found, err := reciper.GetDatabase().FindPackages(
|
||||
&pkg.DefaultPackage{
|
||||
Name: p.GetName(),
|
||||
Category: p.GetCategory(),
|
||||
Version: ">=0",
|
||||
},
|
||||
if err != nil || len(found) < 1 {
|
||||
if err != nil {
|
||||
errstr = err.Error()
|
||||
} else {
|
||||
errstr = "No packages"
|
||||
}
|
||||
Error(fmt.Sprintf("[%9s] %s/%s-%s: Broken. No versions could be found by database %s",
|
||||
checkType,
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
errstr,
|
||||
))
|
||||
|
||||
opts.IncrBrokenDeps()
|
||||
|
||||
return errors.New(
|
||||
fmt.Sprintf("[%9s] %s/%s-%s: Broken. No versions could be found by database %s",
|
||||
checkType,
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
errstr,
|
||||
))
|
||||
}
|
||||
|
||||
// Ensure that we use the right package from right recipier for deps
|
||||
pReciper, err := reciper.GetDatabase().FindPackage(
|
||||
&pkg.DefaultPackage{
|
||||
Name: p.GetName(),
|
||||
Category: p.GetCategory(),
|
||||
Version: p.GetVersion(),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
errstr = fmt.Sprintf("[%9s] %s/%s-%s: Error on retrieve package - %s.",
|
||||
checkType,
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
err.Error(),
|
||||
)
|
||||
Error(errstr)
|
||||
|
||||
if err != nil || len(found) < 1 {
|
||||
return errors.New(errstr)
|
||||
}
|
||||
p = pReciper
|
||||
|
||||
pkgstr := fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(),
|
||||
p.GetVersion())
|
||||
|
||||
validpkg := true
|
||||
|
||||
if len(opts.Matches) > 0 {
|
||||
matched := false
|
||||
for _, rgx := range opts.RegMatches {
|
||||
if rgx.MatchString(pkgstr) {
|
||||
matched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !matched {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(opts.Excludes) > 0 {
|
||||
excluded := false
|
||||
for _, rgx := range opts.RegExcludes {
|
||||
if rgx.MatchString(pkgstr) {
|
||||
excluded = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if excluded {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
Info(fmt.Sprintf("[%9s] Checking package ", checkType)+
|
||||
fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(), p.GetVersion()),
|
||||
"with", len(p.GetRequires()), "dependencies and", len(p.GetConflicts()), "conflicts.")
|
||||
|
||||
all := p.GetRequires()
|
||||
all = append(all, p.GetConflicts()...)
|
||||
for idx, r := range all {
|
||||
|
||||
var deps pkg.Packages
|
||||
var err error
|
||||
if r.IsSelector() {
|
||||
deps, err = reciper.GetDatabase().FindPackages(
|
||||
&pkg.DefaultPackage{
|
||||
Name: r.GetName(),
|
||||
Category: r.GetCategory(),
|
||||
Version: r.GetVersion(),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
deps = append(deps, r)
|
||||
}
|
||||
|
||||
if err != nil || len(deps) < 1 {
|
||||
if err != nil {
|
||||
errstr = err.Error()
|
||||
} else {
|
||||
errstr = "No packages"
|
||||
}
|
||||
Error(fmt.Sprintf("%s/%s-%s: Broken. No versions could be found by database %s",
|
||||
Error(fmt.Sprintf("[%9s] %s/%s-%s: Broken Dep %s/%s-%s - %s",
|
||||
checkType,
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
r.GetCategory(), r.GetName(), r.GetVersion(),
|
||||
errstr,
|
||||
))
|
||||
|
||||
errs <- errors.New(
|
||||
fmt.Sprintf("%s/%s-%s: Broken. No versions could be found by database %s",
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
errstr,
|
||||
))
|
||||
opts.IncrBrokenDeps()
|
||||
|
||||
brokenPkgs++
|
||||
}
|
||||
|
||||
pkgstr := fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(),
|
||||
p.GetVersion())
|
||||
|
||||
validpkg := true
|
||||
|
||||
if len(matches) > 0 {
|
||||
matched := false
|
||||
for _, rgx := range regMatches {
|
||||
if rgx.MatchString(pkgstr) {
|
||||
matched = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !matched {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if len(excludes) > 0 {
|
||||
excluded := false
|
||||
for _, rgx := range regExcludes {
|
||||
if rgx.MatchString(pkgstr) {
|
||||
excluded = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if excluded {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
Info("Checking package "+
|
||||
fmt.Sprintf("%s/%s-%s", p.GetCategory(), p.GetName(), p.GetVersion()),
|
||||
"with", len(p.GetRequires()), "dependencies and", len(p.GetConflicts()), "conflicts.")
|
||||
|
||||
all := p.GetRequires()
|
||||
all = append(all, p.GetConflicts()...)
|
||||
for idx, r := range all {
|
||||
|
||||
var deps pkg.Packages
|
||||
var err error
|
||||
if r.IsSelector() {
|
||||
deps, err = reciper.GetDatabase().FindPackages(
|
||||
&pkg.DefaultPackage{
|
||||
Name: r.GetName(),
|
||||
Category: r.GetCategory(),
|
||||
Version: r.GetVersion(),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
deps = append(deps, r)
|
||||
}
|
||||
|
||||
if err != nil || len(deps) < 1 {
|
||||
if err != nil {
|
||||
errstr = err.Error()
|
||||
} else {
|
||||
errstr = "No packages"
|
||||
}
|
||||
Error(fmt.Sprintf("%s/%s-%s: Broken Dep %s/%s-%s - %s",
|
||||
ans = errors.New(
|
||||
fmt.Sprintf("[%9s] %s/%s-%s: Broken Dep %s/%s-%s - %s",
|
||||
checkType,
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
r.GetCategory(), r.GetName(), r.GetVersion(),
|
||||
errstr))
|
||||
|
||||
validpkg = false
|
||||
|
||||
} else {
|
||||
|
||||
Debug(fmt.Sprintf("[%9s] Find packages for dep", checkType),
|
||||
fmt.Sprintf("%s/%s-%s", r.GetCategory(), r.GetName(), r.GetVersion()))
|
||||
|
||||
if opts.WithSolver {
|
||||
|
||||
Info(fmt.Sprintf("[%9s] :soap: [%2d/%2d] %s/%s-%s: %s/%s-%s",
|
||||
checkType,
|
||||
idx+1, len(all),
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
r.GetCategory(), r.GetName(), r.GetVersion(),
|
||||
errstr,
|
||||
))
|
||||
|
||||
errs <- errors.New(
|
||||
fmt.Sprintf("%s/%s-%s: Broken Dep %s/%s-%s - %s",
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
r.GetCategory(), r.GetName(), r.GetVersion(),
|
||||
errstr))
|
||||
|
||||
brokenDeps++
|
||||
|
||||
validpkg = false
|
||||
|
||||
} else {
|
||||
|
||||
Debug("Find packages for dep",
|
||||
fmt.Sprintf("%s/%s-%s", r.GetCategory(), r.GetName(), r.GetVersion()))
|
||||
|
||||
if withSolver {
|
||||
|
||||
Info(fmt.Sprintf(" :soap: [%2d/%2d] %s/%s-%s: %s/%s-%s",
|
||||
idx+1, len(all),
|
||||
// Check if the solver is already been done for the deep
|
||||
_, err := cacheDeps.Get(r.HashFingerprint(""))
|
||||
if err == nil {
|
||||
Debug(fmt.Sprintf("[%9s] :direct_hit: Cache Hit for dep", checkType),
|
||||
fmt.Sprintf("%s/%s-%s", r.GetCategory(), r.GetName(), r.GetVersion()))
|
||||
continue
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
solution, err := depSolver.Install(pkg.Packages{r})
|
||||
ass := solution.SearchByName(r.GetPackageName())
|
||||
if err == nil {
|
||||
_, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if err != nil {
|
||||
|
||||
Error(fmt.Sprintf("[%9s] %s/%s-%s: solver broken for dep %s/%s-%s - %s",
|
||||
checkType,
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
r.GetCategory(), r.GetName(), r.GetVersion(),
|
||||
err.Error(),
|
||||
))
|
||||
|
||||
// Check if the solver is already been done for the deep
|
||||
_, err := cacheDeps.Get(r.HashFingerprint())
|
||||
if err == nil {
|
||||
Debug(" :direct_hit: Cache Hit for dep",
|
||||
fmt.Sprintf("%s/%s-%s", r.GetCategory(), r.GetName(), r.GetVersion()))
|
||||
continue
|
||||
}
|
||||
|
||||
Spinner(32)
|
||||
solution, err := depSolver.Install(pkg.Packages{r})
|
||||
ass := solution.SearchByName(r.GetPackageName())
|
||||
if err == nil {
|
||||
_, err = solution.Order(reciper.GetDatabase(), ass.Package.GetFingerPrint())
|
||||
}
|
||||
SpinnerStop()
|
||||
|
||||
if err != nil {
|
||||
|
||||
Error(fmt.Sprintf("%s/%s-%s: solver broken for dep %s/%s-%s - %s",
|
||||
ans = errors.New(
|
||||
fmt.Sprintf("[%9s] %s/%s-%s: solver broken for Dep %s/%s-%s - %s",
|
||||
checkType,
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
r.GetCategory(), r.GetName(), r.GetVersion(),
|
||||
err.Error(),
|
||||
))
|
||||
|
||||
errs <- errors.New(
|
||||
fmt.Sprintf("%s/%s-%s: solver broken for Dep %s/%s-%s - %s",
|
||||
p.GetCategory(), p.GetName(), p.GetVersion(),
|
||||
r.GetCategory(), r.GetName(), r.GetVersion(),
|
||||
err.Error()))
|
||||
|
||||
brokenDeps++
|
||||
validpkg = false
|
||||
}
|
||||
|
||||
// Register the key
|
||||
cacheDeps.Set(r.HashFingerprint(), "1")
|
||||
err.Error()))
|
||||
|
||||
opts.IncrBrokenDeps()
|
||||
validpkg = false
|
||||
}
|
||||
|
||||
// Register the key
|
||||
cacheDeps.Set(r.HashFingerprint(""), "1")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !validpkg {
|
||||
opts.IncrBrokenPkgs()
|
||||
}
|
||||
|
||||
return ans
|
||||
}
|
||||
|
||||
func validateWorker(i int,
|
||||
wg *sync.WaitGroup,
|
||||
c <-chan pkg.Package,
|
||||
opts *ValidateOpts) {
|
||||
|
||||
defer wg.Done()
|
||||
|
||||
for p := range c {
|
||||
|
||||
if opts.OnlyBuildtime {
|
||||
// Check buildtime compiler/deps
|
||||
err := validatePackage(p, "buildtime", opts, opts.BuildtimeReciper, opts.BuildtimeCacheDeps)
|
||||
if err != nil {
|
||||
opts.AddError(err)
|
||||
continue
|
||||
}
|
||||
} else if opts.OnlyRuntime {
|
||||
|
||||
// Check runtime installer/deps
|
||||
err := validatePackage(p, "runtime", opts, opts.RuntimeReciper, opts.RuntimeCacheDeps)
|
||||
if err != nil {
|
||||
opts.AddError(err)
|
||||
continue
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
// Check runtime installer/deps
|
||||
err := validatePackage(p, "runtime", opts, opts.RuntimeReciper, opts.RuntimeCacheDeps)
|
||||
if err != nil {
|
||||
opts.AddError(err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check buildtime compiler/deps
|
||||
err = validatePackage(p, "buildtime", opts, opts.BuildtimeReciper, opts.BuildtimeCacheDeps)
|
||||
if err != nil {
|
||||
opts.AddError(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !validpkg {
|
||||
brokenPkgs++
|
||||
}
|
||||
}
|
||||
|
||||
func initOpts(opts *ValidateOpts, onlyRuntime, onlyBuildtime, withSolver bool, treePaths []string) {
|
||||
var err error
|
||||
|
||||
opts.OnlyBuildtime = onlyBuildtime
|
||||
opts.OnlyRuntime = onlyRuntime
|
||||
opts.WithSolver = withSolver
|
||||
opts.RuntimeReciper = nil
|
||||
opts.BuildtimeReciper = nil
|
||||
opts.BrokenPkgs = 0
|
||||
opts.BrokenDeps = 0
|
||||
|
||||
if onlyBuildtime {
|
||||
opts.BuildtimeReciper = (tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))).(*tree.CompilerRecipe)
|
||||
} else if onlyRuntime {
|
||||
opts.RuntimeReciper = (tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))).(*tree.InstallerRecipe)
|
||||
} else {
|
||||
opts.BuildtimeReciper = (tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))).(*tree.CompilerRecipe)
|
||||
opts.RuntimeReciper = (tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))).(*tree.InstallerRecipe)
|
||||
}
|
||||
|
||||
opts.RuntimeCacheDeps = pkg.NewInMemoryDatabase(false).(*pkg.InMemoryDatabase)
|
||||
opts.BuildtimeCacheDeps = pkg.NewInMemoryDatabase(false).(*pkg.InMemoryDatabase)
|
||||
|
||||
for _, treePath := range treePaths {
|
||||
Info(fmt.Sprintf("Loading :deciduous_tree: %s...", treePath))
|
||||
if opts.BuildtimeReciper != nil {
|
||||
err = opts.BuildtimeReciper.Load(treePath)
|
||||
if err != nil {
|
||||
Fatal("Error on load tree ", err)
|
||||
}
|
||||
}
|
||||
if opts.RuntimeReciper != nil {
|
||||
err = opts.RuntimeReciper.Load(treePath)
|
||||
if err != nil {
|
||||
Fatal("Error on load tree ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opts.RegExcludes, err = helpers.CreateRegexArray(opts.Excludes)
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
opts.RegMatches, err = helpers.CreateRegexArray(opts.Matches)
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func NewTreeValidateCommand() *cobra.Command {
|
||||
var excludes []string
|
||||
var matches []string
|
||||
var treePaths []string
|
||||
var opts ValidateOpts
|
||||
|
||||
var ans = &cobra.Command{
|
||||
Use: "validate [OPTIONS]",
|
||||
Short: "Validate a tree or a list of packages",
|
||||
Args: cobra.OnlyValidArgs,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
onlyRuntime, _ := cmd.Flags().GetBool("only-runtime")
|
||||
onlyBuildtime, _ := cmd.Flags().GetBool("only-buildtime")
|
||||
|
||||
if len(treePaths) < 1 {
|
||||
Fatal("Mandatory tree param missing.")
|
||||
}
|
||||
if onlyRuntime && onlyBuildtime {
|
||||
Fatal("Both --only-runtime and --only-buildtime options are not possibile.")
|
||||
}
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var reciper tree.Builder
|
||||
|
||||
concurrency := LuetCfg.GetGeneral().Concurrency
|
||||
|
||||
withSolver, _ := cmd.Flags().GetBool("with-solver")
|
||||
onlyRuntime, _ := cmd.Flags().GetBool("only-runtime")
|
||||
onlyBuildtime, _ := cmd.Flags().GetBool("only-buildtime")
|
||||
|
||||
reciper := tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
for _, treePath := range treePaths {
|
||||
err := reciper.Load(treePath)
|
||||
if err != nil {
|
||||
Fatal("Error on load tree ", err)
|
||||
}
|
||||
}
|
||||
opts.Excludes = excludes
|
||||
opts.Matches = matches
|
||||
initOpts(&opts, onlyRuntime, onlyBuildtime, withSolver, treePaths)
|
||||
|
||||
regExcludes, err := helpers.CreateRegexArray(excludes)
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
}
|
||||
regMatches, err := helpers.CreateRegexArray(matches)
|
||||
if err != nil {
|
||||
Fatal(err.Error())
|
||||
// We need at least one valid reciper for get list of the packages.
|
||||
if onlyBuildtime {
|
||||
reciper = opts.BuildtimeReciper
|
||||
} else {
|
||||
reciper = opts.RuntimeReciper
|
||||
}
|
||||
|
||||
all := make(chan pkg.Package)
|
||||
errs := make(chan error)
|
||||
|
||||
var wg = new(sync.WaitGroup)
|
||||
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go validateWorker(i, wg, all,
|
||||
reciper, withSolver, regExcludes, regMatches, excludes, matches,
|
||||
errs)
|
||||
go validateWorker(i, wg, all, &opts)
|
||||
}
|
||||
for _, p := range reciper.GetDatabase().World() {
|
||||
all <- p
|
||||
@@ -286,11 +436,10 @@ func NewTreeValidateCommand() *cobra.Command {
|
||||
// Wait separately and once done close the channel
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(errs)
|
||||
}()
|
||||
|
||||
stringerrs := []string{}
|
||||
for e := range errs {
|
||||
for _, e := range opts.Errors {
|
||||
stringerrs = append(stringerrs, e.Error())
|
||||
}
|
||||
sort.Strings(stringerrs)
|
||||
@@ -300,9 +449,9 @@ func NewTreeValidateCommand() *cobra.Command {
|
||||
|
||||
// fmt.Println("Broken packages:", brokenPkgs, "(", brokenDeps, "deps ).")
|
||||
if len(stringerrs) != 0 {
|
||||
Error(fmt.Sprintf("Found %d broken packages and %d broken deps.",
|
||||
opts.BrokenPkgs, opts.BrokenDeps))
|
||||
Fatal("Errors: " + strconv.Itoa(len(stringerrs)))
|
||||
// if brokenPkgs > 0 {
|
||||
//os.Exit(1)
|
||||
} else {
|
||||
Info("All good! :white_check_mark:")
|
||||
os.Exit(0)
|
||||
@@ -310,6 +459,8 @@ func NewTreeValidateCommand() *cobra.Command {
|
||||
},
|
||||
}
|
||||
|
||||
ans.Flags().Bool("only-runtime", false, "Check only runtime dependencies.")
|
||||
ans.Flags().Bool("only-buildtime", false, "Check only buildtime dependencies.")
|
||||
ans.Flags().BoolP("with-solver", "s", false,
|
||||
"Enable check of requires also with solver.")
|
||||
ans.Flags().StringSliceVarP(&treePaths, "tree", "t", []string{},
|
||||
|
@@ -62,6 +62,7 @@ var upgradeCmd = &cobra.Command{
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
universe, _ := cmd.Flags().GetBool("universe")
|
||||
clean, _ := cmd.Flags().GetBool("clean")
|
||||
sync, _ := cmd.Flags().GetBool("sync")
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
@@ -78,6 +79,7 @@ var upgradeCmd = &cobra.Command{
|
||||
NoDeps: nodeps,
|
||||
SolverUpgrade: universe,
|
||||
RemoveUnavailableOnUpgrade: clean,
|
||||
UpgradeNewRevisions: sync,
|
||||
})
|
||||
inst.Repositories(repos)
|
||||
_, err := inst.SyncRepositories(false)
|
||||
@@ -115,6 +117,7 @@ func init() {
|
||||
upgradeCmd.Flags().Bool("full", true, "Attempts to remove as much packages as possible which aren't required (slow)")
|
||||
upgradeCmd.Flags().Bool("universe", false, "Use ONLY the SAT solver to compute upgrades (experimental)")
|
||||
upgradeCmd.Flags().Bool("clean", false, "Try to drop removed packages (experimental, only when --universe is enabled)")
|
||||
upgradeCmd.Flags().Bool("sync", false, "Upgrade packages with new revisions (experimental)")
|
||||
|
||||
RootCmd.AddCommand(upgradeCmd)
|
||||
}
|
||||
|
@@ -16,6 +16,12 @@
|
||||
# Enable JSON log format instead of console mode.
|
||||
# json_format: false.
|
||||
#
|
||||
# Disable/Enable color
|
||||
# color: true
|
||||
#
|
||||
# Enable/Disable emoji
|
||||
# enable_emoji: true
|
||||
#
|
||||
# ---------------------------------------------
|
||||
# General configuration section:
|
||||
# ---------------------------------------------
|
||||
|
@@ -27,6 +27,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
system "github.com/docker/docker/pkg/system"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
|
||||
//"strconv"
|
||||
@@ -477,6 +478,27 @@ type CopyJob struct {
|
||||
Artifact string
|
||||
}
|
||||
|
||||
func copyXattr(srcPath, dstPath, attr string) error {
|
||||
data, err := system.Lgetxattr(srcPath, attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data != nil {
|
||||
if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func doCopyXattrs(srcPath, dstPath string) error {
|
||||
if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return copyXattr(srcPath, dstPath, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
||||
defer wg.Done()
|
||||
|
||||
@@ -490,10 +512,13 @@ func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
||||
// continue
|
||||
// }
|
||||
|
||||
if !helpers.Exists(job.Dst) {
|
||||
_, err := os.Lstat(job.Dst)
|
||||
if err != nil {
|
||||
fmt.Println("Copying ", job.Src)
|
||||
if err := helpers.CopyFile(job.Src, job.Dst); err != nil {
|
||||
Warning("Error copying", job, err)
|
||||
}
|
||||
doCopyXattrs(job.Src, job.Dst)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -550,19 +575,33 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, a := range l.Diffs.Changes {
|
||||
Debug("File ", a.Name, " changed")
|
||||
}
|
||||
for _, a := range l.Diffs.Deletions {
|
||||
Debug("File ", a.Name, " deleted")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Otherwise just grab all
|
||||
for _, l := range layers {
|
||||
// Consider d.Additions (and d.Changes? - warn at least) only
|
||||
for _, a := range l.Diffs.Additions {
|
||||
Debug("File ", a.Name, " added")
|
||||
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
|
||||
}
|
||||
for _, a := range l.Diffs.Changes {
|
||||
Debug("File ", a.Name, " changed")
|
||||
}
|
||||
for _, a := range l.Diffs.Deletions {
|
||||
Debug("File ", a.Name, " deleted")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
close(toCopy)
|
||||
wg.Wait()
|
||||
|
||||
a := NewPackageArtifact(dst)
|
||||
a.SetCompressionType(t)
|
||||
err = a.Compress(archive, concurrency)
|
||||
|
199
pkg/compiler/backend/diff.go
Normal file
199
pkg/compiler/backend/diff.go
Normal file
@@ -0,0 +1,199 @@
|
||||
// Copyright © 2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package backend
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// GenerateChanges generates changes between two images using a backend by leveraging export/extractrootfs methods
|
||||
// example of json return: [
|
||||
// {
|
||||
// "Image1": "luet/base",
|
||||
// "Image2": "alpine",
|
||||
// "DiffType": "File",
|
||||
// "Diff": {
|
||||
// "Adds": null,
|
||||
// "Dels": [
|
||||
// {
|
||||
// "Name": "/luetbuild",
|
||||
// "Size": 5830706
|
||||
// },
|
||||
// {
|
||||
// "Name": "/luetbuild/Dockerfile",
|
||||
// "Size": 50
|
||||
// },
|
||||
// {
|
||||
// "Name": "/luetbuild/output1",
|
||||
// "Size": 5830656
|
||||
// }
|
||||
// ],
|
||||
// "Mods": null
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
func GenerateChanges(b compiler.CompilerBackend, srcImage, dstImage string) ([]compiler.ArtifactLayer, error) {
|
||||
|
||||
res := compiler.ArtifactLayer{FromImage: srcImage, ToImage: dstImage}
|
||||
|
||||
tmpdiffs, err := config.LuetCfg.GetSystem().TempDir("extraction")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(tmpdiffs) // clean up
|
||||
|
||||
srcRootFS, err := ioutil.TempDir(tmpdiffs, "src")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(srcRootFS) // clean up
|
||||
|
||||
dstRootFS, err := ioutil.TempDir(tmpdiffs, "dst")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(dstRootFS) // clean up
|
||||
|
||||
// Handle both files (.tar) or images. If parameters are beginning with / , don't export the images
|
||||
if !strings.HasPrefix(srcImage, "/") {
|
||||
srcImageTar, err := ioutil.TempFile(tmpdiffs, "srctar")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
|
||||
defer os.Remove(srcImageTar.Name()) // clean up
|
||||
srcImageExport := compiler.CompilerBackendOptions{
|
||||
ImageName: srcImage,
|
||||
Destination: srcImageTar.Name(),
|
||||
}
|
||||
err = b.ExportImage(srcImageExport)
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while exporting src image "+srcImage)
|
||||
}
|
||||
srcImage = srcImageTar.Name()
|
||||
}
|
||||
|
||||
srcImageExtract := compiler.CompilerBackendOptions{
|
||||
SourcePath: srcImage,
|
||||
Destination: srcRootFS,
|
||||
}
|
||||
err = b.ExtractRootfs(srcImageExtract, false) // No need to keep permissions as we just collect file diffs
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking src image "+srcImage)
|
||||
}
|
||||
|
||||
// Handle both files (.tar) or images. If parameters are beginning with / , don't export the images
|
||||
if !strings.HasPrefix(dstImage, "/") {
|
||||
dstImageTar, err := ioutil.TempFile(tmpdiffs, "dsttar")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
|
||||
defer os.Remove(dstImageTar.Name()) // clean up
|
||||
dstImageExport := compiler.CompilerBackendOptions{
|
||||
ImageName: dstImage,
|
||||
Destination: dstImageTar.Name(),
|
||||
}
|
||||
err = b.ExportImage(dstImageExport)
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while exporting dst image "+dstImage)
|
||||
}
|
||||
dstImage = dstImageTar.Name()
|
||||
}
|
||||
|
||||
dstImageExtract := compiler.CompilerBackendOptions{
|
||||
SourcePath: dstImage,
|
||||
Destination: dstRootFS,
|
||||
}
|
||||
err = b.ExtractRootfs(dstImageExtract, false)
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking dst image "+dstImage)
|
||||
}
|
||||
|
||||
// Get Additions/Changes. dst -> src
|
||||
err = filepath.Walk(dstRootFS, func(path string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
realpath := strings.Replace(path, dstRootFS, "", -1)
|
||||
fileInfo, err := os.Lstat(filepath.Join(srcRootFS, realpath))
|
||||
if err == nil {
|
||||
var sizeA, sizeB int64
|
||||
sizeA = fileInfo.Size()
|
||||
|
||||
if s, err := os.Lstat(filepath.Join(dstRootFS, realpath)); err == nil {
|
||||
sizeB = s.Size()
|
||||
}
|
||||
|
||||
if sizeA != sizeB {
|
||||
// fmt.Println("File changed", path, filepath.Join(srcRootFS, realpath))
|
||||
res.Diffs.Changes = append(res.Diffs.Changes, compiler.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
Size: int(sizeB),
|
||||
})
|
||||
} else {
|
||||
// fmt.Println("File already exists", path, filepath.Join(srcRootFS, realpath))
|
||||
}
|
||||
} else {
|
||||
var sizeB int64
|
||||
|
||||
if s, err := os.Lstat(filepath.Join(dstRootFS, realpath)); err == nil {
|
||||
sizeB = s.Size()
|
||||
}
|
||||
res.Diffs.Additions = append(res.Diffs.Additions, compiler.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
Size: int(sizeB),
|
||||
})
|
||||
|
||||
// fmt.Println("File created", path, filepath.Join(srcRootFS, realpath))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image destination")
|
||||
}
|
||||
|
||||
// Get deletions. src -> dst
|
||||
err = filepath.Walk(srcRootFS, func(path string, info os.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
realpath := strings.Replace(path, srcRootFS, "", -1)
|
||||
if _, err = os.Lstat(filepath.Join(dstRootFS, realpath)); err != nil {
|
||||
// fmt.Println("File deleted", path, filepath.Join(srcRootFS, realpath))
|
||||
res.Diffs.Deletions = append(res.Diffs.Deletions, compiler.ArtifactNode{
|
||||
Name: filepath.Join("/", realpath),
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while walking image source")
|
||||
}
|
||||
|
||||
return []compiler.ArtifactLayer{res}, nil
|
||||
}
|
73
pkg/compiler/backend/diff_test.go
Normal file
73
pkg/compiler/backend/diff_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package backend_test
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
. "github.com/mudler/luet/pkg/compiler/backend"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Docker image diffs", func() {
|
||||
var b compiler.CompilerBackend
|
||||
|
||||
BeforeEach(func() {
|
||||
b = NewSimpleDockerBackend()
|
||||
})
|
||||
|
||||
Context("Generate diffs from docker images", func() {
|
||||
It("Detect no changes", func() {
|
||||
err := b.DownloadImage(compiler.CompilerBackendOptions{
|
||||
ImageName: "alpine:latest",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
layers, err := GenerateChanges(b, "alpine:latest", "alpine:latest")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(layers)).To(Equal(1))
|
||||
Expect(len(layers[0].Diffs.Additions)).To(Equal(0))
|
||||
Expect(len(layers[0].Diffs.Changes)).To(Equal(0))
|
||||
Expect(len(layers[0].Diffs.Deletions)).To(Equal(0))
|
||||
})
|
||||
|
||||
It("Detects additions and changed files", func() {
|
||||
err := b.DownloadImage(compiler.CompilerBackendOptions{
|
||||
ImageName: "quay.io/mocaccino/micro",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = b.DownloadImage(compiler.CompilerBackendOptions{
|
||||
ImageName: "quay.io/mocaccino/extra",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
layers, err := GenerateChanges(b, "quay.io/mocaccino/micro", "quay.io/mocaccino/extra")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(layers)).To(Equal(1))
|
||||
|
||||
Expect(len(layers[0].Diffs.Changes) > 0).To(BeTrue())
|
||||
Expect(len(layers[0].Diffs.Changes[0].Name) > 0).To(BeTrue())
|
||||
Expect(layers[0].Diffs.Changes[0].Size > 0).To(BeTrue())
|
||||
|
||||
Expect(len(layers[0].Diffs.Additions) > 0).To(BeTrue())
|
||||
Expect(len(layers[0].Diffs.Additions[0].Name) > 0).To(BeTrue())
|
||||
Expect(layers[0].Diffs.Additions[0].Size > 0).To(BeTrue())
|
||||
|
||||
Expect(len(layers[0].Diffs.Deletions)).To(Equal(0))
|
||||
})
|
||||
})
|
||||
})
|
@@ -16,7 +16,6 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -222,64 +221,9 @@ func (*SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPer
|
||||
return nil
|
||||
}
|
||||
|
||||
// container-diff diff daemon://luet/base alpine --type=file -j
|
||||
// [
|
||||
// {
|
||||
// "Image1": "luet/base",
|
||||
// "Image2": "alpine",
|
||||
// "DiffType": "File",
|
||||
// "Diff": {
|
||||
// "Adds": null,
|
||||
// "Dels": [
|
||||
// {
|
||||
// "Name": "/luetbuild",
|
||||
// "Size": 5830706
|
||||
// },
|
||||
// {
|
||||
// "Name": "/luetbuild/Dockerfile",
|
||||
// "Size": 50
|
||||
// },
|
||||
// {
|
||||
// "Name": "/luetbuild/output1",
|
||||
// "Size": 5830656
|
||||
// }
|
||||
// ],
|
||||
// "Mods": null
|
||||
// }
|
||||
// }
|
||||
// ]
|
||||
// Changes uses container-diff (https://github.com/GoogleContainerTools/container-diff) for retrieving out layer diffs
|
||||
func (*SimpleDocker) Changes(fromImage, toImage string) ([]compiler.ArtifactLayer, error) {
|
||||
tmpdiffs, err := config.LuetCfg.GetSystem().TempDir("tmpdiffs")
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
defer os.RemoveAll(tmpdiffs) // clean up
|
||||
var errorBuffer bytes.Buffer
|
||||
|
||||
diffargs := []string{"diff", fromImage, toImage, "-v", "error", "-q", "--type=file", "-j", "-n", "-c", tmpdiffs}
|
||||
cmd := exec.Command("container-diff", diffargs...)
|
||||
cmd.Stderr = &errorBuffer
|
||||
out, err := cmd.Output()
|
||||
|
||||
if string(errorBuffer.Bytes()) != "" {
|
||||
Warning("container-diff errored with: " + string(errorBuffer.Bytes()))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Failed Resolving layer diffs: "+string(out))
|
||||
}
|
||||
|
||||
if config.LuetCfg.GetGeneral().ShowBuildOutput {
|
||||
Info(string(out))
|
||||
}
|
||||
|
||||
var diffs []compiler.ArtifactLayer
|
||||
|
||||
err = json.Unmarshal(out, &diffs)
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Failed unmarshalling json response: "+string(out))
|
||||
}
|
||||
// Changes retrieves changes between image layers
|
||||
func (d *SimpleDocker) Changes(fromImage, toImage string) ([]compiler.ArtifactLayer, error) {
|
||||
diffs, err := GenerateChanges(d, fromImage, toImage)
|
||||
|
||||
if config.LuetCfg.GetGeneral().Debug {
|
||||
summary := compiler.ComputeArtifactLayerSummary(diffs)
|
||||
@@ -292,5 +236,5 @@ func (*SimpleDocker) Changes(fromImage, toImage string) ([]compiler.ArtifactLaye
|
||||
}
|
||||
}
|
||||
|
||||
return diffs, nil
|
||||
return diffs, err
|
||||
}
|
||||
|
@@ -149,8 +149,8 @@ func (*SimpleImg) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms
|
||||
|
||||
// TODO: Use container-diff (https://github.com/GoogleContainerTools/container-diff) for checking out layer diffs
|
||||
// Changes uses container-diff (https://github.com/GoogleContainerTools/container-diff) for retrieving out layer diffs
|
||||
func (*SimpleImg) Changes(fromImage, toImage string) ([]compiler.ArtifactLayer, error) {
|
||||
return NewSimpleDockerBackend().Changes(fromImage, toImage)
|
||||
func (i *SimpleImg) Changes(fromImage, toImage string) ([]compiler.ArtifactLayer, error) {
|
||||
return GenerateChanges(i, fromImage, toImage)
|
||||
}
|
||||
|
||||
func (*SimpleImg) Push(opts compiler.CompilerBackendOptions) error {
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -229,21 +230,37 @@ func (cs *LuetCompiler) stripIncludesFromRootfs(includes []string, rootfs string
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage string, concurrency int, keepPermissions, keepImg bool, p CompilationSpec) (Artifact, error) {
|
||||
fp := p.GetPackage().HashFingerprint()
|
||||
|
||||
pkgTag := ":package: " + p.GetPackage().GetName()
|
||||
|
||||
// Use packageImage as salt into the fp being used
|
||||
// so the hash is unique also in cases where
|
||||
// some package deps does have completely different
|
||||
// depgraphs
|
||||
// TODO: As the salt contains the packageImage ( in registry/organization/imagename:tag format)
|
||||
// the images hashes are broken with registry mirrors.
|
||||
// We should use the image tag, or pass by the package assertion hash which is unique
|
||||
// and identifies the deptree of the package.
|
||||
|
||||
fp := p.GetPackage().HashFingerprint(packageImage)
|
||||
|
||||
if buildertaggedImage == "" {
|
||||
buildertaggedImage = cs.ImageRepository + "-" + fp + "-builder"
|
||||
Debug(pkgTag, "Creating intermediary image", buildertaggedImage, "from", image)
|
||||
}
|
||||
|
||||
// TODO: Cleanup, not actually hit
|
||||
if packageImage == "" {
|
||||
packageImage = cs.ImageRepository + "-" + fp
|
||||
}
|
||||
|
||||
if !cs.Clean {
|
||||
exists := cs.Backend.ImageExists(buildertaggedImage) && cs.Backend.ImageExists(packageImage)
|
||||
if art, err := LoadArtifactFromYaml(p); err == nil && exists {
|
||||
if art, err := LoadArtifactFromYaml(p); err == nil && (cs.Options.SkipIfMetadataExists || exists) {
|
||||
Debug("Artifact reloaded. Skipping build")
|
||||
return art, err
|
||||
}
|
||||
}
|
||||
pkgTag := ":package: " + p.GetPackage().GetName()
|
||||
|
||||
p.SetSeedImage(image) // In this case, we ignore the build deps as we suppose that the image has them - otherwise we recompose the tree with a solver,
|
||||
// and we build all the images first.
|
||||
@@ -355,7 +372,6 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
}
|
||||
// }
|
||||
|
||||
var diffs []ArtifactLayer
|
||||
var artifact Artifact
|
||||
unpack := p.ImageUnpack()
|
||||
|
||||
@@ -365,14 +381,6 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
unpack = true
|
||||
}
|
||||
|
||||
if !unpack {
|
||||
// we have to get diffs only if spec is not unpacked
|
||||
diffs, err = cs.Backend.Changes(p.Rel(p.GetPackage().GetFingerPrint()+"-builder.image.tar"), p.Rel(p.GetPackage().GetFingerPrint()+".image.tar"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not generate changes from layers")
|
||||
}
|
||||
}
|
||||
|
||||
rootfs, err := ioutil.TempDir(p.GetOutputPath(), "rootfs")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not create tempdir")
|
||||
@@ -415,6 +423,7 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
}
|
||||
artifact = NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
||||
artifact.SetCompressionType(cs.CompressionType)
|
||||
|
||||
err = artifact.Compress(rootfs, concurrency)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
@@ -423,7 +432,10 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
artifact.SetCompileSpec(p)
|
||||
} else {
|
||||
Info(pkgTag, "Generating delta")
|
||||
|
||||
diffs, err := cs.Backend.Changes(p.Rel(p.GetPackage().GetFingerPrint()+"-builder.image.tar"), p.Rel(p.GetPackage().GetFingerPrint()+".image.tar"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not generate changes from layers")
|
||||
}
|
||||
artifact, err = ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), cs.CompressionType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not generate deltas")
|
||||
@@ -439,6 +451,8 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
|
||||
artifact.SetFiles(filelist)
|
||||
|
||||
artifact.GetCompileSpec().GetPackage().SetBuildTimestamp(time.Now().String())
|
||||
|
||||
err = artifact.WriteYaml(p.GetOutputPath())
|
||||
if err != nil {
|
||||
return artifact, errors.Wrap(err, "Failed while writing metadata file")
|
||||
@@ -513,7 +527,6 @@ func (cs *LuetCompiler) ComputeDepTree(p CompilationSpec) (solver.PackagesAssert
|
||||
for _, assertion := range dependencies { //highly dependent on the order
|
||||
if assertion.Value {
|
||||
nthsolution := dependencies.Cut(assertion.Package)
|
||||
|
||||
assertion.Hash = solver.PackageHash{
|
||||
BuildHash: nthsolution.HashFrom(assertion.Package),
|
||||
PackageHash: nthsolution.AssertionHash(),
|
||||
@@ -540,7 +553,8 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
|
||||
if len(p.GetPackage().GetRequires()) == 0 && p.GetImage() == "" {
|
||||
Error("Package with no deps and no seed image supplied, bailing out")
|
||||
return nil, errors.New("Package " + p.GetPackage().GetFingerPrint() + "with no deps and no seed image supplied, bailing out")
|
||||
return nil, errors.New("Package " + p.GetPackage().GetFingerPrint() +
|
||||
" with no deps and no seed image supplied, bailing out")
|
||||
}
|
||||
|
||||
targetAssertion := p.GetSourceAssertion().Search(p.GetPackage().GetFingerPrint())
|
||||
|
@@ -52,9 +52,10 @@ type CompilerOptions struct {
|
||||
Clean bool
|
||||
KeepImageExport bool
|
||||
|
||||
OnlyDeps bool
|
||||
NoDeps bool
|
||||
SolverOptions config.LuetSolverOptions
|
||||
OnlyDeps bool
|
||||
NoDeps bool
|
||||
SolverOptions config.LuetSolverOptions
|
||||
SkipIfMetadataExists bool
|
||||
}
|
||||
|
||||
func NewDefaultCompilerOptions() *CompilerOptions {
|
||||
|
@@ -36,15 +36,15 @@ import (
|
||||
)
|
||||
|
||||
type LuetInstallerOptions struct {
|
||||
SolverOptions config.LuetSolverOptions
|
||||
Concurrency int
|
||||
NoDeps bool
|
||||
OnlyDeps bool
|
||||
Force bool
|
||||
PreserveSystemEssentialData bool
|
||||
FullUninstall, FullCleanUninstall bool
|
||||
CheckConflicts bool
|
||||
SolverUpgrade, RemoveUnavailableOnUpgrade bool
|
||||
SolverOptions config.LuetSolverOptions
|
||||
Concurrency int
|
||||
NoDeps bool
|
||||
OnlyDeps bool
|
||||
Force bool
|
||||
PreserveSystemEssentialData bool
|
||||
FullUninstall, FullCleanUninstall bool
|
||||
CheckConflicts bool
|
||||
SolverUpgrade, RemoveUnavailableOnUpgrade, UpgradeNewRevisions bool
|
||||
}
|
||||
|
||||
type LuetInstaller struct {
|
||||
@@ -90,12 +90,18 @@ func (l *LuetInstaller) Upgrade(s *System) error {
|
||||
}
|
||||
}
|
||||
|
||||
Info("Marked for uninstall")
|
||||
if len(uninstall) > 0 {
|
||||
Info("Packages marked for uninstall:")
|
||||
}
|
||||
|
||||
for _, p := range uninstall {
|
||||
Info(fmt.Sprintf("- %s", p.HumanReadableString()))
|
||||
}
|
||||
|
||||
Info("Marked for upgrade")
|
||||
if len(solution) > 0 {
|
||||
Info("Packages marked for upgrade:")
|
||||
}
|
||||
|
||||
toInstall := pkg.Packages{}
|
||||
for _, assertion := range solution {
|
||||
// Be sure to filter from solutions packages already installed in the system
|
||||
@@ -105,6 +111,37 @@ func (l *LuetInstaller) Upgrade(s *System) error {
|
||||
}
|
||||
}
|
||||
|
||||
if l.Options.UpgradeNewRevisions {
|
||||
Info("Checking packages with new revisions available")
|
||||
for _, p := range s.Database.World() {
|
||||
matches := syncedRepos.PackageMatches(pkg.Packages{p})
|
||||
if len(matches) == 0 {
|
||||
// Package missing. the user should run luet upgrade --universe
|
||||
Info("Installed packages seems to be missing from remote repositories.")
|
||||
Info("It is suggested to run 'luet upgrade --universe'")
|
||||
continue
|
||||
}
|
||||
for _, artefact := range matches[0].Repo.GetIndex() {
|
||||
if artefact.GetCompileSpec().GetPackage() == nil {
|
||||
return errors.New("Package in compilespec empty")
|
||||
|
||||
}
|
||||
if artefact.GetCompileSpec().GetPackage().Matches(p) && artefact.GetCompileSpec().GetPackage().GetBuildTimestamp() != p.GetBuildTimestamp() {
|
||||
toInstall = append(toInstall, matches[0].Package).Unique()
|
||||
uninstall = append(uninstall, p).Unique()
|
||||
Info(
|
||||
fmt.Sprintf("- %s ( %s vs %s ) repo: %s (date: %s)",
|
||||
p.HumanReadableString(),
|
||||
artefact.GetCompileSpec().GetPackage().GetBuildTimestamp(),
|
||||
p.GetBuildTimestamp(),
|
||||
matches[0].Repo.GetName(),
|
||||
matches[0].Repo.GetLastUpdate(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return l.swap(syncedRepos, uninstall, toInstall, s)
|
||||
}
|
||||
|
||||
@@ -349,6 +386,7 @@ func (l *LuetInstaller) install(syncedRepos Repositories, cp pkg.Packages, s *Sy
|
||||
|
||||
}
|
||||
if matches[0].Package.Matches(artefact.GetCompileSpec().GetPackage()) {
|
||||
currentPack.SetBuildTimestamp(artefact.GetCompileSpec().GetPackage().GetBuildTimestamp())
|
||||
// Filter out already installed
|
||||
if _, err := s.Database.FindPackage(currentPack); err != nil {
|
||||
toInstall[currentPack.GetFingerPrint()] = ArtifactMatch{Package: currentPack, Artifact: artefact, Repository: matches[0].Repo}
|
||||
|
@@ -78,23 +78,25 @@ func Spinner(i int) {
|
||||
i = 43
|
||||
}
|
||||
|
||||
if !s.Active() {
|
||||
if s != nil && !s.Active() {
|
||||
// s.UpdateCharSet(spinner.CharSets[i])
|
||||
s.Start() // Start the spinner
|
||||
}
|
||||
}
|
||||
|
||||
func SpinnerText(suffix, prefix string) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if LuetCfg.GetGeneral().Debug {
|
||||
fmt.Println(fmt.Sprintf("%s %s",
|
||||
Bold(Cyan(prefix)).String(),
|
||||
Bold(Magenta(suffix)).BgBlack().String(),
|
||||
))
|
||||
} else {
|
||||
s.Suffix = Bold(Magenta(suffix)).BgBlack().String()
|
||||
s.Prefix = Bold(Cyan(prefix)).String()
|
||||
if s != nil {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if LuetCfg.GetGeneral().Debug {
|
||||
fmt.Println(fmt.Sprintf("%s %s",
|
||||
Bold(Cyan(prefix)).String(),
|
||||
Bold(Magenta(suffix)).BgBlack().String(),
|
||||
))
|
||||
} else {
|
||||
s.Suffix = Bold(Magenta(suffix)).BgBlack().String()
|
||||
s.Prefix = Bold(Cyan(prefix)).String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,7 +110,9 @@ func SpinnerStop() {
|
||||
if 2 > confLevel {
|
||||
return
|
||||
}
|
||||
s.Stop()
|
||||
if s != nil {
|
||||
s.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func level2Number(level string) int {
|
||||
|
@@ -42,8 +42,7 @@ type Package interface {
|
||||
Encode(PackageDatabase) (string, error)
|
||||
|
||||
BuildFormula(PackageDatabase, PackageDatabase) ([]bf.Formula, error)
|
||||
IsFlagged(bool) Package
|
||||
Flagged() bool
|
||||
|
||||
GetFingerPrint() string
|
||||
GetPackageName() string
|
||||
Requires([]*DefaultPackage) Package
|
||||
@@ -65,6 +64,7 @@ type Package interface {
|
||||
GetCategory() string
|
||||
|
||||
GetVersion() string
|
||||
SetVersion(string)
|
||||
RequiresContains(PackageDatabase, Package) (bool, error)
|
||||
Matches(m Package) bool
|
||||
BumpBuildVersion() error
|
||||
@@ -99,13 +99,17 @@ type Package interface {
|
||||
HasAnnotation(string) bool
|
||||
MatchAnnotation(*regexp.Regexp) bool
|
||||
|
||||
IsHidden() bool
|
||||
IsSelector() bool
|
||||
VersionMatchSelector(string, version.Versioner) (bool, error)
|
||||
SelectorMatchVersion(string, version.Versioner) (bool, error)
|
||||
|
||||
String() string
|
||||
HumanReadableString() string
|
||||
HashFingerprint() string
|
||||
HashFingerprint(string) string
|
||||
|
||||
SetBuildTimestamp(s string)
|
||||
GetBuildTimestamp() string
|
||||
|
||||
Clone() Package
|
||||
}
|
||||
@@ -161,8 +165,8 @@ type DefaultPackage struct {
|
||||
State State `json:"state,omitempty"`
|
||||
PackageRequires []*DefaultPackage `json:"requires"` // Affects YAML field names too.
|
||||
PackageConflicts []*DefaultPackage `json:"conflicts"` // Affects YAML field names too.
|
||||
IsSet bool `json:"set,omitempty"` // Affects YAML field names too.
|
||||
Provides []*DefaultPackage `json:"provides,omitempty"` // Affects YAML field names too.
|
||||
Hidden bool `json:"hidden,omitempty"` // Affects YAML field names too.
|
||||
|
||||
// Annotations are used for core features/options
|
||||
Annotations map[string]string `json:"annotations,omitempty"` // Affects YAML field names too
|
||||
@@ -170,9 +174,10 @@ type DefaultPackage struct {
|
||||
// Path is set only internally when tree is loaded from disk
|
||||
Path string `json:"path,omitempty"`
|
||||
|
||||
Description string `json:"description,omitempty"`
|
||||
Uri []string `json:"uri,omitempty"`
|
||||
License string `json:"license,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Uri []string `json:"uri,omitempty"`
|
||||
License string `json:"license,omitempty"`
|
||||
BuildTimestamp string `json:"buildtimestamp,omitempty"`
|
||||
|
||||
Labels map[string]string `json:"labels,omitempty"` // Affects YAML field names too.
|
||||
}
|
||||
@@ -205,9 +210,9 @@ func (p *DefaultPackage) GetFingerPrint() string {
|
||||
return fmt.Sprintf("%s-%s-%s", p.Name, p.Category, p.Version)
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) HashFingerprint() string {
|
||||
func (p *DefaultPackage) HashFingerprint(salt string) string {
|
||||
h := md5.New()
|
||||
io.WriteString(h, p.GetFingerPrint())
|
||||
io.WriteString(h, fmt.Sprintf("%s-%s", p.GetFingerPrint(), salt))
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
@@ -229,6 +234,16 @@ func (p *DefaultPackage) GetPackageName() string {
|
||||
return fmt.Sprintf("%s-%s", p.Name, p.Category)
|
||||
}
|
||||
|
||||
// GetBuildTimestamp returns the package build timestamp
|
||||
func (p *DefaultPackage) GetBuildTimestamp() string {
|
||||
return p.BuildTimestamp
|
||||
}
|
||||
|
||||
// SetBuildTimestamp sets the package Build timestamp
|
||||
func (p *DefaultPackage) SetBuildTimestamp(s string) {
|
||||
p.BuildTimestamp = s
|
||||
}
|
||||
|
||||
// GetPath returns the path where the definition file was found
|
||||
func (p *DefaultPackage) GetPath() string {
|
||||
return p.Path
|
||||
@@ -246,6 +261,10 @@ func (p *DefaultPackage) IsSelector() bool {
|
||||
return strings.ContainsAny(p.GetVersion(), "<>=")
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) IsHidden() bool {
|
||||
return p.Hidden
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) HasLabel(label string) bool {
|
||||
return helpers.MapHasKey(&p.Labels, label)
|
||||
}
|
||||
@@ -302,15 +321,6 @@ func (p *DefaultPackage) Yaml() ([]byte, error) {
|
||||
return y, nil
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) IsFlagged(b bool) Package {
|
||||
p.IsSet = b
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) Flagged() bool {
|
||||
return p.IsSet
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) GetName() string {
|
||||
return p.Name
|
||||
}
|
||||
@@ -318,6 +328,9 @@ func (p *DefaultPackage) GetName() string {
|
||||
func (p *DefaultPackage) GetVersion() string {
|
||||
return p.Version
|
||||
}
|
||||
func (p *DefaultPackage) SetVersion(v string) {
|
||||
p.Version = v
|
||||
}
|
||||
func (p *DefaultPackage) GetDescription() string {
|
||||
return p.Description
|
||||
}
|
||||
@@ -749,7 +762,6 @@ func (p *DefaultPackage) Explain() {
|
||||
fmt.Println("Name: ", p.GetName())
|
||||
fmt.Println("Category: ", p.GetCategory())
|
||||
fmt.Println("Version: ", p.GetVersion())
|
||||
fmt.Println("Installed: ", p.IsSet)
|
||||
|
||||
for _, req := range p.GetRequires() {
|
||||
fmt.Println("\t-> ", req)
|
||||
|
@@ -36,8 +36,8 @@ var _ = Describe("Package", func() {
|
||||
})
|
||||
|
||||
It("Generates packages fingerprint's hashes", func() {
|
||||
Expect(a.HashFingerprint()).ToNot(Equal(a1.HashFingerprint()))
|
||||
Expect(a.HashFingerprint()).To(Equal("c64caa391b79adb598ad98e261aa79a0"))
|
||||
Expect(a.HashFingerprint("")).ToNot(Equal(a1.HashFingerprint("")))
|
||||
Expect(a.HashFingerprint("")).To(Equal("76972ef6991ec6102f33b401105c1351"))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -314,7 +314,6 @@ var _ = Describe("Package", func() {
|
||||
|
||||
Expect(p.GetVersion()).To(Equal(a.GetVersion()))
|
||||
Expect(p.GetName()).To(Equal(a.GetName()))
|
||||
Expect(p.Flagged()).To(Equal(a.Flagged()))
|
||||
Expect(p.GetFingerPrint()).To(Equal(a.GetFingerPrint()))
|
||||
Expect(len(p.GetConflicts())).To(Equal(len(a.GetConflicts())))
|
||||
Expect(len(p.GetRequires())).To(Equal(len(a.GetRequires())))
|
||||
@@ -374,7 +373,6 @@ var _ = Describe("Package", func() {
|
||||
a2 := a.Clone()
|
||||
Expect(a2.GetVersion()).To(Equal(a.GetVersion()))
|
||||
Expect(a2.GetName()).To(Equal(a.GetName()))
|
||||
Expect(a2.Flagged()).To(Equal(a.Flagged()))
|
||||
Expect(a2.GetFingerPrint()).To(Equal(a.GetFingerPrint()))
|
||||
Expect(len(a2.GetConflicts())).To(Equal(len(a.GetConflicts())))
|
||||
Expect(len(a2.GetRequires())).To(Equal(len(a.GetRequires())))
|
||||
|
@@ -28,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
func LoadRepositories(c *LuetConfig) error {
|
||||
var regexRepo = regexp.MustCompile(`.yml$`)
|
||||
var regexRepo = regexp.MustCompile(`.yml$|.yaml$`)
|
||||
|
||||
for _, rdir := range c.RepositoriesConfDir {
|
||||
Debug("Parsing Repository Directory", rdir, "...")
|
||||
|
@@ -35,12 +35,22 @@ var _ = Describe("Repository", func() {
|
||||
|
||||
It("Chec Load Repository 1", func() {
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(len(cfg.SystemRepositories)).Should(Equal(1))
|
||||
Expect(len(cfg.SystemRepositories)).Should(Equal(2))
|
||||
Expect(cfg.SystemRepositories[0].Name).Should(Equal("test1"))
|
||||
Expect(cfg.SystemRepositories[0].Priority).Should(Equal(999))
|
||||
Expect(cfg.SystemRepositories[0].Type).Should(Equal("disk"))
|
||||
Expect(len(cfg.SystemRepositories[0].Urls)).Should(Equal(1))
|
||||
Expect(cfg.SystemRepositories[0].Urls[0]).Should(Equal("tests/repos/test1"))
|
||||
})
|
||||
|
||||
It("Chec Load Repository 2", func() {
|
||||
Expect(err).Should(BeNil())
|
||||
Expect(len(cfg.SystemRepositories)).Should(Equal(2))
|
||||
Expect(cfg.SystemRepositories[1].Name).Should(Equal("test2"))
|
||||
Expect(cfg.SystemRepositories[1].Priority).Should(Equal(1000))
|
||||
Expect(cfg.SystemRepositories[1].Type).Should(Equal("disk"))
|
||||
Expect(len(cfg.SystemRepositories[1].Urls)).Should(Equal(1))
|
||||
Expect(cfg.SystemRepositories[1].Urls[0]).Should(Equal("tests/repos/test2"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -532,7 +532,7 @@ func (s *Solver) Uninstall(c pkg.Package, checkconflicts, full bool) (pkg.Packag
|
||||
for _, a := range asserts {
|
||||
if a.Value {
|
||||
if !checkconflicts {
|
||||
res = append(res, a.Package.IsFlagged(false))
|
||||
res = append(res, a.Package)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -543,7 +543,7 @@ func (s *Solver) Uninstall(c pkg.Package, checkconflicts, full bool) (pkg.Packag
|
||||
|
||||
// If doesn't conflict with installed we just consider it for removal and look for the next one
|
||||
if !c {
|
||||
res = append(res, a.Package.IsFlagged(false))
|
||||
res = append(res, a.Package)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -553,7 +553,7 @@ func (s *Solver) Uninstall(c pkg.Package, checkconflicts, full bool) (pkg.Packag
|
||||
return nil, err
|
||||
}
|
||||
if !c {
|
||||
res = append(res, a.Package.IsFlagged(false))
|
||||
res = append(res, a.Package)
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -596,7 +596,7 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.Uninstall(A, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
@@ -622,7 +622,7 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.Uninstall(&pkg.DefaultPackage{Name: "A", Version: ">1.0"}, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
@@ -646,7 +646,7 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.Uninstall(A, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
@@ -670,7 +670,7 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.Uninstall(A, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
})
|
||||
@@ -693,8 +693,8 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.Uninstall(A, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).ToNot(ContainElement(B.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
Expect(solution).ToNot(ContainElement(B))
|
||||
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
})
|
||||
@@ -718,8 +718,8 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.Uninstall(A, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(C.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
Expect(solution).To(ContainElement(C))
|
||||
|
||||
Expect(len(solution)).To(Equal(2))
|
||||
})
|
||||
@@ -744,9 +744,9 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.Uninstall(A, true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(B.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(D.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
Expect(solution).To(ContainElement(B))
|
||||
Expect(solution).To(ContainElement(D))
|
||||
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
|
||||
@@ -773,7 +773,7 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.UninstallUniverse(pkg.Packages{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
@@ -800,7 +800,7 @@ var _ = Describe("Solver", func() {
|
||||
&pkg.DefaultPackage{Name: "A", Version: ">1.0"}})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
@@ -824,7 +824,7 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.UninstallUniverse(pkg.Packages{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
@@ -848,8 +848,8 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.UninstallUniverse(pkg.Packages{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(B.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
Expect(solution).To(ContainElement(B))
|
||||
|
||||
Expect(len(solution)).To(Equal(2))
|
||||
})
|
||||
@@ -874,9 +874,9 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.UninstallUniverse(pkg.Packages{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(B.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(C.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
Expect(solution).To(ContainElement(B))
|
||||
Expect(solution).To(ContainElement(C))
|
||||
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
})
|
||||
@@ -900,8 +900,8 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.UninstallUniverse(pkg.Packages{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(C.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
Expect(solution).To(ContainElement(C))
|
||||
|
||||
Expect(len(solution)).To(Equal(2))
|
||||
})
|
||||
@@ -926,9 +926,9 @@ var _ = Describe("Solver", func() {
|
||||
solution, err := s.UninstallUniverse(pkg.Packages{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(B.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(D.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(A))
|
||||
Expect(solution).To(ContainElement(B))
|
||||
Expect(solution).To(ContainElement(D))
|
||||
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
|
||||
|
@@ -29,7 +29,6 @@ type DefaultPackageSanitized struct {
|
||||
UseFlags []string `json:"use_flags,omitempty" yaml:"use_flags,omitempty"`
|
||||
PackageRequires []*DefaultPackageSanitized `json:"requires,omitempty" yaml:"requires,omitempty"`
|
||||
PackageConflicts []*DefaultPackageSanitized `json:"conflicts,omitempty" yaml:"conflicts,omitempty"`
|
||||
IsSet bool `json:"set,omitempty" yaml:"set,omitempty"`
|
||||
Provides []*DefaultPackageSanitized `json:"provides,omitempty" yaml:"provides,omitempty"`
|
||||
|
||||
Annotations map[string]string `json:"annotations,omitempty" yaml:"annotations,omitempty"`
|
||||
@@ -40,6 +39,7 @@ type DefaultPackageSanitized struct {
|
||||
Description string `json:"description,omitempty" yaml:"description,omitempty"`
|
||||
Uri []string `json:"uri,omitempty" yaml:"uri,omitempty"`
|
||||
License string `json:"license,omitempty" yaml:"license,omitempty"`
|
||||
Hidden bool `json:"hidden,omitempty" yaml:"hidden,omitempty"`
|
||||
|
||||
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
|
||||
}
|
||||
@@ -50,7 +50,7 @@ func NewDefaultPackageSanitized(p pkg.Package) *DefaultPackageSanitized {
|
||||
Version: p.GetVersion(),
|
||||
Category: p.GetCategory(),
|
||||
UseFlags: p.GetUses(),
|
||||
IsSet: p.Flagged(),
|
||||
Hidden: p.IsHidden(),
|
||||
Path: p.GetPath(),
|
||||
Description: p.GetDescription(),
|
||||
Uri: p.GetURI(),
|
||||
@@ -68,6 +68,7 @@ func NewDefaultPackageSanitized(p pkg.Package) *DefaultPackageSanitized {
|
||||
Name: r.Name,
|
||||
Version: r.Version,
|
||||
Category: r.Category,
|
||||
Hidden: r.IsHidden(),
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -82,6 +83,7 @@ func NewDefaultPackageSanitized(p pkg.Package) *DefaultPackageSanitized {
|
||||
Name: c.Name,
|
||||
Version: c.Version,
|
||||
Category: c.Category,
|
||||
Hidden: c.IsHidden(),
|
||||
},
|
||||
)
|
||||
}
|
||||
@@ -96,6 +98,7 @@ func NewDefaultPackageSanitized(p pkg.Package) *DefaultPackageSanitized {
|
||||
Name: prov.Name,
|
||||
Version: prov.Version,
|
||||
Category: prov.Category,
|
||||
Hidden: prov.IsHidden(),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
9
tests/fixtures/caps/pkgA/0.1/build.yaml
vendored
Normal file
9
tests/fixtures/caps/pkgA/0.1/build.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- apk add libcap
|
||||
unpack: true
|
||||
includes:
|
||||
- /file1
|
||||
steps:
|
||||
- echo "test" > /file1
|
||||
- setcap cap_net_raw+ep /file1
|
3
tests/fixtures/caps/pkgA/0.1/definition.yaml
vendored
Normal file
3
tests/fixtures/caps/pkgA/0.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "caps"
|
||||
version: "0.1"
|
6
tests/fixtures/caps/pkgB/0.1/build.yaml
vendored
Normal file
6
tests/fixtures/caps/pkgB/0.1/build.yaml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- apk add libcap
|
||||
steps:
|
||||
- echo "test" > /file2
|
||||
- setcap cap_net_raw+ep /file2
|
3
tests/fixtures/caps/pkgB/0.1/definition.yaml
vendored
Normal file
3
tests/fixtures/caps/pkgB/0.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "caps2"
|
||||
version: "0.1"
|
9
tests/fixtures/repos.conf.d/repo-test2.yaml
vendored
Normal file
9
tests/fixtures/repos.conf.d/repo-test2.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
name: "test2"
|
||||
description: "Test2 Repository"
|
||||
type: "disk"
|
||||
urls:
|
||||
- "tests/repos/test2"
|
||||
priority: 1000
|
||||
enable: true
|
||||
# auth:
|
||||
# token: "xxxxx"
|
5
tests/fixtures/symlinks/pkgA/0.1/build.yaml
vendored
Normal file
5
tests/fixtures/symlinks/pkgA/0.1/build.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo "test" > /file2
|
||||
steps:
|
||||
- ln -s /file2 /file1
|
3
tests/fixtures/symlinks/pkgA/0.1/definition.yaml
vendored
Normal file
3
tests/fixtures/symlinks/pkgA/0.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "pkgAsym"
|
||||
version: "0.1"
|
8
tests/fixtures/symlinks/pkgB/0.1/build.yaml
vendored
Normal file
8
tests/fixtures/symlinks/pkgB/0.1/build.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
image: "alpine"
|
||||
unpack: true
|
||||
includes:
|
||||
- /file3
|
||||
prelude:
|
||||
- echo "test" > /file2
|
||||
steps:
|
||||
- ln -s /file2 /file3
|
3
tests/fixtures/symlinks/pkgB/0.1/definition.yaml
vendored
Normal file
3
tests/fixtures/symlinks/pkgB/0.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "pkgBsym"
|
||||
version: "0.1"
|
10
tests/fixtures/upgrade_old_repo_revision/c/build.yaml
vendored
Normal file
10
tests/fixtures/upgrade_old_repo_revision/c/build.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo c > /c
|
||||
- echo c > /cd
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "a"
|
||||
version: ">=1.0"
|
3
tests/fixtures/upgrade_old_repo_revision/c/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_old_repo_revision/c/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.0"
|
11
tests/fixtures/upgrade_old_repo_revision/cat/a/a/build.yaml
vendored
Normal file
11
tests/fixtures/upgrade_old_repo_revision/cat/a/a/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
8
tests/fixtures/upgrade_old_repo_revision/cat/a/a/definition.yaml
vendored
Normal file
8
tests/fixtures/upgrade_old_repo_revision/cat/a/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.1"
|
||||
requires:
|
||||
- category: "test2"
|
||||
name: "b"
|
||||
version: "1.0"
|
||||
|
9
tests/fixtures/upgrade_old_repo_revision/cat/b-1.1/build.yaml
vendored
Normal file
9
tests/fixtures/upgrade_old_repo_revision/cat/b-1.1/build.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
- chmod +x generate.sh
|
||||
steps:
|
||||
- echo artifact5 > /newc
|
||||
- echo artifact6 > /newnewc
|
||||
- ./generate.sh
|
3
tests/fixtures/upgrade_old_repo_revision/cat/b-1.1/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_old_repo_revision/cat/b-1.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
1
tests/fixtures/upgrade_old_repo_revision/cat/b-1.1/generate.sh
vendored
Normal file
1
tests/fixtures/upgrade_old_repo_revision/cat/b-1.1/generate.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
echo generated > /sonewc
|
@@ -60,19 +60,15 @@ testInstall() {
|
||||
docker build --rm --no-cache -t luet:test .
|
||||
docker rm luet-runtime-test || true
|
||||
docker run --name luet-runtime-test \
|
||||
-ti -v /tmp:/tmp \
|
||||
-v /tmp:/tmp \
|
||||
-v $tmpdir/luet.yaml:/etc/luet/luet.yaml:ro \
|
||||
luet:test install seed/alpine
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "0" "$installst"
|
||||
|
||||
docker commit luet-runtime-test luet-runtime-test-image
|
||||
test=$(docker run --rm -t --entrypoint /bin/sh luet-runtime-test-image -c 'echo "ftw"')
|
||||
test=$(docker run --rm --entrypoint /bin/sh luet-runtime-test-image -c 'echo "ftw"')
|
||||
assertContains 'generated image runs successfully' "$test" "ftw"
|
||||
# docker rm luet-runtime-test || true
|
||||
# docker rmi luet-runtime-test-image || true
|
||||
|
||||
# docker rmi luet:test || true
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
|
116
tests/integration/14_upgrade_revision.sh
Executable file
116
tests/integration/14_upgrade_revision.sh
Executable file
@@ -0,0 +1,116 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_old_repo" --destination $tmpdir/testbuild --compression gzip --full --clean=true
|
||||
buildst=$?
|
||||
assertTrue 'create package B 1.0' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.gz' ]"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
|
||||
mkdir $tmpdir/testbuild_revision
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_old_repo_revision" --destination $tmpdir/testbuild_revision --compression gzip --full --clean=true
|
||||
buildst=$?
|
||||
assertTrue 'create package B 1.0' "[ -e '$tmpdir/testbuild_revision/b-test-1.0.package.tar.gz' ]"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/upgrade_old_repo" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type http
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild_revision/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/upgrade_old_repo_revision" \
|
||||
--output $tmpdir/testbuild_revision \
|
||||
--packages $tmpdir/testbuild_revision \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type http
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild_revision/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testUpgrade() {
|
||||
luet install --config $tmpdir/luet.yaml test/b-1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/test5' ]"
|
||||
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild_revision"
|
||||
EOF
|
||||
|
||||
luet cleanup --config $tmpdir/luet.yaml
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
|
||||
luet upgrade --sync --config $tmpdir/luet.yaml
|
||||
installst=$?
|
||||
assertEquals 'upgrade test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled B' "[ ! -e '$tmpdir/testrootfs/test5' ]"
|
||||
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/newc' ]"
|
||||
|
||||
content=$(luet upgrade --sync --config $tmpdir/luet.yaml)
|
||||
installst=$?
|
||||
assertNotContains 'didn not upgrade' "$content" "Uninstalling"
|
||||
}
|
||||
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
71
tests/integration/15_symlinks.sh
Executable file
71
tests/integration/15_symlinks.sh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/symlinks" --destination $tmpdir/testbuild --compression gzip --full --clean=true
|
||||
buildst=$?
|
||||
assertTrue 'create package pkgAsym 0.1' "[ -e '$tmpdir/testbuild/pkgAsym-test-0.1.package.tar.gz' ]"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/symlinks" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type http
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install --config $tmpdir/luet.yaml test/pkgAsym test/pkgBsym
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
ls -liah $tmpdir/testrootfs/
|
||||
assertTrue 'package did not install file2' "[ ! -e '$tmpdir/testrootfs/file2' ]"
|
||||
assertTrue 'package installed file3 as symlink' "[ -L '$tmpdir/testrootfs/file3' ]"
|
||||
assertTrue 'package installed file1 as symlink' "[ -L '$tmpdir/testrootfs/file1' ]"
|
||||
|
||||
}
|
||||
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
72
tests/integration/16_caps.sh
Executable file
72
tests/integration/16_caps.sh
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build -d --tree "$ROOT_DIR/tests/fixtures/caps" --same-owner=true --destination $tmpdir/testbuild --compression gzip --full --clean=true
|
||||
buildst=$?
|
||||
assertTrue 'create package caps 0.1' "[ -e '$tmpdir/testbuild/caps-test-0.1.package.tar.gz' ]"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/caps" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type http
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
$ROOT_DIR/tests/integration/bin/luet install --config $tmpdir/luet.yaml test/caps-0.1 test/caps2-0.1
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
assertTrue 'package installed file1' "[ -e '$tmpdir/testrootfs/file1' ]"
|
||||
assertTrue 'package installed file2' "[ -e '$tmpdir/testrootfs/file2' ]"
|
||||
|
||||
assertContains 'caps' "$(getcap $tmpdir/testrootfs/file1)" "cap_net_raw+ep"
|
||||
assertContains 'caps' "$(getcap $tmpdir/testrootfs/file2)" "cap_net_raw+ep"
|
||||
}
|
||||
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
Reference in New Issue
Block a user