mirror of
https://github.com/mudler/luet.git
synced 2025-09-02 15:54:39 +00:00
Compare commits
72 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
228e4a17cd | ||
|
8cab02ad88 | ||
|
57181d7cbf | ||
|
556668fcc4 | ||
|
595c10f357 | ||
|
097f2ec827 | ||
|
adeaa3b867 | ||
|
2f9d12a248 | ||
|
1a584d0bfa | ||
|
58169770e1 | ||
|
ebf818ff08 | ||
|
55fa7265e5 | ||
|
ddfb4fb8e8 | ||
|
b33ddb7b99 | ||
|
e23e38b571 | ||
|
f4fff415f4 | ||
|
34996906e1 | ||
|
c7e1803540 | ||
|
848b270e81 | ||
|
30a7312911 | ||
|
ff3b322ea2 | ||
|
e31f8820e2 | ||
|
5d2d92e684 | ||
|
2c7391fcc9 | ||
|
d0c84d24a0 | ||
|
91b3daf180 | ||
|
4031a4ae81 | ||
|
fa46601638 | ||
|
f71cc5281e | ||
|
c9d93454b8 | ||
|
66cd6d1027 | ||
|
b5381e0248 | ||
|
da9e14fb45 | ||
|
6d25e5d881 | ||
|
393164da36 | ||
|
1cf5f2eb74 | ||
|
caca3fbf8c | ||
|
1e426b93a1 | ||
|
5915688830 | ||
|
a54040dcd3 | ||
|
9c5a349d5d | ||
|
d67742c28f | ||
|
a674c6515c | ||
|
08897b5105 | ||
|
62af9c81d5 | ||
|
7bbd6d5e68 | ||
|
820a157698 | ||
|
f3ba014593 | ||
|
c9c803a819 | ||
|
5c0971de2e | ||
|
b751b02830 | ||
|
914ac68eea | ||
|
a8b350fd8e | ||
|
0d7b2cf448 | ||
|
b1272392b5 | ||
|
a0d2f9cc12 | ||
|
3a7cfae9ae | ||
|
7d99d57ad5 | ||
|
d4bb0e346e | ||
|
3b9337a03b | ||
|
8f6393e157 | ||
|
e9c01b46a7 | ||
|
a879411c54 | ||
|
542d45a646 | ||
|
43ab851cb9 | ||
|
e664f4f2cf | ||
|
75c1b66ae3 | ||
|
dd0face86d | ||
|
a2f23d3bf5 | ||
|
df78308e98 | ||
|
d134690560 | ||
|
6cf6c2104d |
10
Dockerfile
Normal file
10
Dockerfile
Normal file
@@ -0,0 +1,10 @@
|
||||
FROM golang as builder
|
||||
ADD . /luet
|
||||
RUN cd /luet && make build
|
||||
|
||||
FROM scratch
|
||||
ENV LUET_NOLOCK=true
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=builder /luet/luet /usr/bin/luet
|
||||
|
||||
ENTRYPOINT ["/usr/bin/luet"]
|
10
Makefile
10
Makefile
@@ -49,16 +49,18 @@ deps:
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
go build
|
||||
CGO_ENABLED=0 go build
|
||||
|
||||
.PHONY: image
|
||||
image:
|
||||
docker build --rm -t luet/base .
|
||||
|
||||
.PHONY: gox-build
|
||||
gox-build:
|
||||
# Building gitlab-ci-multi-runner for $(BUILD_PLATFORMS)
|
||||
gox $(BUILD_PLATFORMS) -output="release/$(NAME)-$(VERSION)-{{.OS}}-{{.Arch}}"
|
||||
CGO_ENABLED=0 gox $(BUILD_PLATFORMS) -output="release/$(NAME)-$(VERSION)-{{.OS}}-{{.Arch}}"
|
||||
|
||||
.PHONY: lint
|
||||
lint:
|
||||
# Checking project code style...
|
||||
golint ./... | grep -v "be unexported"
|
||||
|
||||
.PHONY: vendor
|
||||
|
18
README.md
18
README.md
@@ -1,9 +1,21 @@
|
||||
# luet - Package manager
|
||||
# luet - Container-based Package manager
|
||||
[](https://goreportcard.com/report/github.com/mudler/luet)
|
||||
[](https://travis-ci.org/mudler/luet)
|
||||
[](https://godoc.org/github.com/mudler/luet)
|
||||
[](https://codecov.io/gh/mudler/luet)
|
||||
|
||||
No dep hell for your own package manager !
|
||||
Luet is a Package Manager based off from containers - it uses Docker (and other tech) to sandbox your builds and generate packages from them. It has no dependencies and it is well suitable for "from scratch" environments.
|
||||
|
||||
Package manager that reuses Gentoo's Portage output binaries, but which doesn't depend on it.
|
||||
## In a glance
|
||||
|
||||
- Luet can reuse Gentoo's portage tree hierarchy, and it is heavily inspired from it.
|
||||
- It builds, installs, uninstalls and perform upgrades on machines
|
||||
- Installer doesn't depend on anything
|
||||
- Support for packages as "layers"
|
||||
- It uses SAT solving techniques to solve the deptree ( Inspired by [OPIUM](https://ranjitjhala.github.io/static/opium.pdf) )
|
||||
|
||||
## Status
|
||||
|
||||
Luet is not feature-complete yet, it can build, install/uninstall/upgrade packages - but it doesn't support yet all the features you would normally expect from a Package Manager nowadays.
|
||||
|
||||
Check out the [Wiki](https://github.com/mudler/luet/wiki) for more informations.
|
||||
|
41
cmd/build.go
41
cmd/build.go
@@ -34,10 +34,20 @@ var buildCmd = &cobra.Command{
|
||||
Use: "build <package name> <package name> <package name> ...",
|
||||
Short: "build a package or a tree",
|
||||
Long: `build packages or trees from luet tree definitions. Packages are in [category]/[name]-[version] form`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
viper.BindPFlag("tree", cmd.Flags().Lookup("tree"))
|
||||
viper.BindPFlag("destination", cmd.Flags().Lookup("destination"))
|
||||
viper.BindPFlag("backend", cmd.Flags().Lookup("backend"))
|
||||
viper.BindPFlag("concurrency", cmd.Flags().Lookup("concurrency"))
|
||||
viper.BindPFlag("privileged", cmd.Flags().Lookup("privileged"))
|
||||
viper.BindPFlag("database", cmd.Flags().Lookup("database"))
|
||||
viper.BindPFlag("revdeps", cmd.Flags().Lookup("revdeps"))
|
||||
viper.BindPFlag("all", cmd.Flags().Lookup("all"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
src := viper.GetString("tree")
|
||||
dst := viper.GetString("output")
|
||||
dst := viper.GetString("destination")
|
||||
concurrency := viper.GetInt("concurrency")
|
||||
backendType := viper.GetString("backend")
|
||||
privileged := viper.GetBool("privileged")
|
||||
@@ -58,28 +68,28 @@ var buildCmd = &cobra.Command{
|
||||
switch databaseType {
|
||||
case "memory":
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
|
||||
case "boltdb":
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
db = pkg.NewBoltDatabase(tmpdir)
|
||||
|
||||
}
|
||||
defer db.Clean()
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(db)
|
||||
|
||||
Info("Loading", src)
|
||||
Info("Building in", dst)
|
||||
|
||||
err := generalRecipe.Load(src)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase())
|
||||
|
||||
err = luetCompiler.Prepare(concurrency)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
if !all {
|
||||
for _, a := range args {
|
||||
decodepackage, err := regexp.Compile(`^([<>]?\~?=?)((([^\/]+)\/)?(?U)(\S+))(-(\d+(\.\d+)*[a-z]?(_(alpha|beta|pre|rc|p)\d*)*(-r\d+)?))?$`)
|
||||
@@ -100,16 +110,15 @@ var buildCmd = &cobra.Command{
|
||||
compilerSpecs.Add(spec)
|
||||
}
|
||||
} else {
|
||||
w, e := generalRecipe.Tree().World()
|
||||
if e != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
w := generalRecipe.GetDatabase().World()
|
||||
|
||||
for _, p := range w {
|
||||
spec, err := luetCompiler.FromPackage(p)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
Info(":package: Selecting ", p.GetName(), p.GetVersion())
|
||||
spec.SetOutputPath(dst)
|
||||
compilerSpecs.Add(spec)
|
||||
}
|
||||
}
|
||||
@@ -141,21 +150,13 @@ func init() {
|
||||
Fatal(err)
|
||||
}
|
||||
buildCmd.Flags().String("tree", path, "Source luet tree")
|
||||
viper.BindPFlag("tree", buildCmd.Flags().Lookup("tree"))
|
||||
buildCmd.Flags().String("output", path, "Destination folder")
|
||||
viper.BindPFlag("output", buildCmd.Flags().Lookup("output"))
|
||||
buildCmd.Flags().String("backend", "docker", "backend used (docker,img)")
|
||||
viper.BindPFlag("backend", buildCmd.Flags().Lookup("backend"))
|
||||
buildCmd.Flags().Int("concurrency", runtime.NumCPU(), "Concurrency")
|
||||
viper.BindPFlag("concurrency", buildCmd.Flags().Lookup("concurrency"))
|
||||
buildCmd.Flags().Bool("privileged", false, "Privileged (Keep permissions)")
|
||||
viper.BindPFlag("privileged", buildCmd.Flags().Lookup("privileged"))
|
||||
buildCmd.Flags().String("database", "memory", "database used for solving (memory,boltdb)")
|
||||
viper.BindPFlag("database", buildCmd.Flags().Lookup("database"))
|
||||
buildCmd.Flags().Bool("revdeps", false, "Build with revdeps")
|
||||
viper.BindPFlag("revdeps", buildCmd.Flags().Lookup("revdeps"))
|
||||
|
||||
buildCmd.Flags().Bool("all", false, "Build all packages in the tree")
|
||||
viper.BindPFlag("all", buildCmd.Flags().Lookup("all"))
|
||||
buildCmd.Flags().String("destination", path, "Destination folder")
|
||||
|
||||
RootCmd.AddCommand(buildCmd)
|
||||
}
|
||||
|
@@ -31,6 +31,11 @@ var convertCmd = &cobra.Command{
|
||||
Use: "convert",
|
||||
Short: "convert other package manager tree into luet",
|
||||
Long: `Parses external PM and produces a luet parsable tree`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
viper.BindPFlag("type", cmd.Flags().Lookup("type"))
|
||||
viper.BindPFlag("concurrency", cmd.Flags().Lookup("concurrency"))
|
||||
viper.BindPFlag("database", cmd.Flags().Lookup("database"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
t := viper.GetString("type")
|
||||
@@ -71,13 +76,12 @@ var convertCmd = &cobra.Command{
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
defer packageTree.GetPackageSet().Clean()
|
||||
defer packageTree.Clean()
|
||||
Info("Tree generated")
|
||||
|
||||
generalRecipe := tree.NewGeneralRecipe(db)
|
||||
generalRecipe := tree.NewGeneralRecipe(packageTree)
|
||||
Info("Saving generated tree to " + output)
|
||||
|
||||
generalRecipe.WithTree(packageTree)
|
||||
err = generalRecipe.Save(output)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
@@ -87,11 +91,8 @@ var convertCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
convertCmd.Flags().String("type", "gentoo", "source type")
|
||||
viper.BindPFlag("type", convertCmd.Flags().Lookup("type"))
|
||||
convertCmd.Flags().Int("concurrency", runtime.NumCPU(), "Concurrency")
|
||||
viper.BindPFlag("concurrency", convertCmd.Flags().Lookup("concurrency"))
|
||||
convertCmd.Flags().String("database", "memory", "database used for solving (memory,boltdb)")
|
||||
viper.BindPFlag("database", convertCmd.Flags().Lookup("database"))
|
||||
|
||||
RootCmd.AddCommand(convertCmd)
|
||||
}
|
||||
|
74
cmd/create-repo.go
Normal file
74
cmd/create-repo.go
Normal file
@@ -0,0 +1,74 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var createrepoCmd = &cobra.Command{
|
||||
Use: "create-repo",
|
||||
Short: "Create a luet repository from a build",
|
||||
Long: `Generate and renew repository metadata`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
viper.BindPFlag("packages", cmd.Flags().Lookup("packages"))
|
||||
viper.BindPFlag("tree", cmd.Flags().Lookup("tree"))
|
||||
viper.BindPFlag("output", cmd.Flags().Lookup("output"))
|
||||
viper.BindPFlag("name", cmd.Flags().Lookup("name"))
|
||||
viper.BindPFlag("uri", cmd.Flags().Lookup("uri"))
|
||||
viper.BindPFlag("type", cmd.Flags().Lookup("type"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
tree := viper.GetString("tree")
|
||||
dst := viper.GetString("output")
|
||||
packages := viper.GetString("packages")
|
||||
name := viper.GetString("name")
|
||||
uri := viper.GetString("uri")
|
||||
t := viper.GetString("type")
|
||||
|
||||
repo, err := installer.GenerateRepository(name, uri, t, 1, packages, tree, pkg.NewInMemoryDatabase(false))
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
err = repo.Write(dst)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
createrepoCmd.Flags().String("packages", path, "Packages folder (output from build)")
|
||||
createrepoCmd.Flags().String("tree", path, "Source luet tree")
|
||||
createrepoCmd.Flags().String("output", path, "Destination folder")
|
||||
createrepoCmd.Flags().String("name", "luet", "Repository name")
|
||||
createrepoCmd.Flags().String("uri", path, "Repository uri")
|
||||
createrepoCmd.Flags().String("type", "local", "Repository type (local)")
|
||||
|
||||
RootCmd.AddCommand(createrepoCmd)
|
||||
}
|
98
cmd/install.go
Normal file
98
cmd/install.go
Normal file
@@ -0,0 +1,98 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var installCmd = &cobra.Command{
|
||||
Use: "install <pkg1> <pkg2> ...",
|
||||
Short: "Install a package",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
viper.BindPFlag("system-dbpath", cmd.Flags().Lookup("system-dbpath"))
|
||||
viper.BindPFlag("system-target", cmd.Flags().Lookup("system-target"))
|
||||
viper.BindPFlag("concurrency", cmd.Flags().Lookup("concurrency"))
|
||||
},
|
||||
Long: `Install packages in parallel`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
c := []*installer.LuetRepository{}
|
||||
err := viper.UnmarshalKey("system-repositories", &c)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
var toInstall []pkg.Package
|
||||
|
||||
for _, a := range args {
|
||||
decodepackage, err := regexp.Compile(`^([<>]?\~?=?)((([^\/]+)\/)?(?U)(\S+))(-(\d+(\.\d+)*[a-z]?(_(alpha|beta|pre|rc|p)\d*)*(-r\d+)?))?$`)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
packageInfo := decodepackage.FindAllStringSubmatch(a, -1)
|
||||
|
||||
category := packageInfo[0][4]
|
||||
name := packageInfo[0][5]
|
||||
version := packageInfo[0][7]
|
||||
toInstall = append(toInstall, &pkg.DefaultPackage{Name: name, Category: category, Version: version})
|
||||
|
||||
}
|
||||
|
||||
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
|
||||
synced := installer.Repositories{}
|
||||
for _, toSync := range c {
|
||||
s, err := toSync.Sync()
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
synced = append(synced, s)
|
||||
}
|
||||
|
||||
inst := installer.NewLuetInstaller(viper.GetInt("concurrency"))
|
||||
|
||||
inst.Repositories(synced)
|
||||
|
||||
os.MkdirAll(viper.GetString("system-dbpath"), os.ModePerm)
|
||||
systemDB := pkg.NewBoltDatabase(filepath.Join(viper.GetString("system-dbpath"), "luet.db"))
|
||||
system := &installer.System{Database: systemDB, Target: viper.GetString("system-target")}
|
||||
err = inst.Install(toInstall, system)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
installCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
installCmd.Flags().String("system-target", path, "System rootpath")
|
||||
installCmd.Flags().Int("concurrency", runtime.NumCPU(), "Concurrency")
|
||||
|
||||
RootCmd.AddCommand(installCmd)
|
||||
}
|
112
cmd/query.go
112
cmd/query.go
@@ -1,112 +0,0 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
tree "github.com/mudler/luet/pkg/tree"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var queryCmd = &cobra.Command{
|
||||
Use: "query install <pkg>",
|
||||
Short: "query other package manager tree into luet",
|
||||
Long: `Parses external PM and produces a luet parsable tree`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
input := viper.GetString("input")
|
||||
|
||||
if len(args) != 4 {
|
||||
log.Fatalln("Incorrect number of arguments")
|
||||
}
|
||||
databaseType := viper.GetString("database")
|
||||
var db pkg.PackageDatabase
|
||||
|
||||
switch databaseType {
|
||||
case "memory":
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
case "boltdb":
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
db = pkg.NewBoltDatabase(tmpdir)
|
||||
}
|
||||
defer db.Clean()
|
||||
|
||||
generalRecipe := tree.NewGeneralRecipe(db)
|
||||
fmt.Println("Loading generated tree from " + input)
|
||||
|
||||
err := generalRecipe.Load(input)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
defer generalRecipe.Tree().GetPackageSet().Clean()
|
||||
|
||||
t := args[0]
|
||||
v := args[1]
|
||||
version := args[2]
|
||||
cat := args[3]
|
||||
switch t {
|
||||
case "install":
|
||||
// XXX: pack needs to be the same which is present in world.
|
||||
// Tree caches generated world when using FindPackage
|
||||
pack, err := generalRecipe.Tree().FindPackage(&pkg.DefaultPackage{Category: cat, Name: v, Version: version})
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
fmt.Println("Install query from " + input + " [" + v + "]")
|
||||
world, err := generalRecipe.Tree().World()
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
fmt.Println(">>> World")
|
||||
for _, packss := range world {
|
||||
packss.Explain()
|
||||
}
|
||||
s := solver.NewSolver([]pkg.Package{}, world, generalRecipe.Tree().GetPackageSet())
|
||||
solution, err := s.Install([]pkg.Package{pack})
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
fmt.Println(">>> Solution")
|
||||
|
||||
for _, assertion := range solution {
|
||||
assertion.Explain()
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
queryCmd.Flags().String("input", "", "source folder")
|
||||
viper.BindPFlag("input", queryCmd.Flags().Lookup("input"))
|
||||
queryCmd.Flags().String("database", "memory", "database used for solving (memory,boltdb)")
|
||||
viper.BindPFlag("database", queryCmd.Flags().Lookup("database"))
|
||||
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
}
|
19
cmd/root.go
19
cmd/root.go
@@ -20,6 +20,7 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/marcsauter/single"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -29,7 +30,7 @@ import (
|
||||
var cfgFile string
|
||||
var Verbose bool
|
||||
|
||||
const LuetCLIVersion = "0.1"
|
||||
const LuetCLIVersion = "0.2"
|
||||
|
||||
// RootCmd represents the base command when called without any subcommands
|
||||
var RootCmd = &cobra.Command{
|
||||
@@ -42,6 +43,17 @@ var RootCmd = &cobra.Command{
|
||||
// Execute adds all child commands to the root command sets flags appropriately.
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
// XXX: This is mostly from scratch images.
|
||||
if os.Getenv("LUET_NOLOCK") != "true" {
|
||||
s := single.New("luet")
|
||||
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
|
||||
Fatal("another instance of the app is already running, exiting")
|
||||
} else if err != nil {
|
||||
// Another error occurred, might be worth handling it as well
|
||||
Fatal("failed to acquire exclusive app lock:", err.Error())
|
||||
}
|
||||
defer s.TryUnlock()
|
||||
}
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
Error(err)
|
||||
os.Exit(-1)
|
||||
@@ -56,12 +68,13 @@ func init() {
|
||||
|
||||
// initConfig reads in config file and ENV variables if set.
|
||||
func initConfig() {
|
||||
|
||||
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
|
||||
if err != nil {
|
||||
Error(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
viper.SetConfigType("yaml")
|
||||
viper.SetConfigName(".luet") // name of config file (without extension)
|
||||
if cfgFile != "" { // enable ability to specify config file via flag
|
||||
Info(">>> cfgFile: ", cfgFile)
|
||||
@@ -75,6 +88,8 @@ func initConfig() {
|
||||
viper.AddConfigPath(dir)
|
||||
viper.AddConfigPath(".")
|
||||
viper.AddConfigPath("$HOME")
|
||||
viper.AddConfigPath("/etc/luet")
|
||||
|
||||
viper.AutomaticEnv() // read in environment variables that match
|
||||
|
||||
// If a config file is found, read it in.
|
||||
|
97
cmd/search.go
Normal file
97
cmd/search.go
Normal file
@@ -0,0 +1,97 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var searchCmd = &cobra.Command{
|
||||
Use: "search <term>",
|
||||
Short: "Search packages",
|
||||
Long: `Search for installed and available packages`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
viper.BindPFlag("system-dbpath", cmd.Flags().Lookup("system-dbpath"))
|
||||
viper.BindPFlag("system-target", cmd.Flags().Lookup("system-target"))
|
||||
viper.BindPFlag("concurrency", cmd.Flags().Lookup("concurrency"))
|
||||
viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
c := []*installer.LuetRepository{}
|
||||
err := viper.UnmarshalKey("system-repositories", &c)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
if len(args) != 1 {
|
||||
Fatal("Wrong number of arguments (expected 1)")
|
||||
}
|
||||
installed := viper.GetBool("installed")
|
||||
|
||||
if !installed {
|
||||
synced := installer.Repositories{}
|
||||
|
||||
for _, toSync := range c {
|
||||
s, err := toSync.Sync()
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
synced = append(synced, s)
|
||||
}
|
||||
Info("--- Search results: ---")
|
||||
|
||||
matches := synced.Search(args[0])
|
||||
for _, m := range matches {
|
||||
Info(":package:", m.Package.GetCategory(), m.Package.GetName(), m.Package.GetVersion(), "repository:", m.Repo.GetName())
|
||||
}
|
||||
} else {
|
||||
os.MkdirAll(viper.GetString("system-dbpath"), os.ModePerm)
|
||||
systemDB := pkg.NewBoltDatabase(filepath.Join(viper.GetString("system-dbpath"), "luet.db"))
|
||||
system := &installer.System{Database: systemDB, Target: viper.GetString("system-target")}
|
||||
var term = regexp.MustCompile(args[0])
|
||||
|
||||
for _, k := range system.Database.GetPackages() {
|
||||
pack, err := system.Database.GetPackage(k)
|
||||
if err == nil && term.MatchString(pack.GetName()) {
|
||||
Info(":package:", pack.GetCategory(), pack.GetName(), pack.GetVersion())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
searchCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
searchCmd.Flags().String("system-target", path, "System rootpath")
|
||||
searchCmd.Flags().Int("concurrency", runtime.NumCPU(), "Concurrency")
|
||||
searchCmd.Flags().Bool("installed", false, "Search between system packages")
|
||||
RootCmd.AddCommand(searchCmd)
|
||||
}
|
77
cmd/uninstall.go
Normal file
77
cmd/uninstall.go
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var uninstallCmd = &cobra.Command{
|
||||
Use: "uninstall <pkg>",
|
||||
Short: "Uninstall a package",
|
||||
Long: `Uninstall packages`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
viper.BindPFlag("system-dbpath", cmd.Flags().Lookup("system-dbpath"))
|
||||
viper.BindPFlag("system-target", cmd.Flags().Lookup("system-target"))
|
||||
viper.BindPFlag("concurrency", cmd.Flags().Lookup("concurrency"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
Fatal("Wrong number of args")
|
||||
}
|
||||
|
||||
a := args[0]
|
||||
decodepackage, err := regexp.Compile(`^([<>]?\~?=?)((([^\/]+)\/)?(?U)(\S+))(-(\d+(\.\d+)*[a-z]?(_(alpha|beta|pre|rc|p)\d*)*(-r\d+)?))?$`)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
packageInfo := decodepackage.FindAllStringSubmatch(a, -1)
|
||||
|
||||
category := packageInfo[0][4]
|
||||
name := packageInfo[0][5]
|
||||
version := packageInfo[0][7]
|
||||
|
||||
inst := installer.NewLuetInstaller(viper.GetInt("concurrency"))
|
||||
os.MkdirAll(viper.GetString("system-dbpath"), os.ModePerm)
|
||||
systemDB := pkg.NewBoltDatabase(filepath.Join(viper.GetString("system-dbpath"), "luet.db"))
|
||||
system := &installer.System{Database: systemDB, Target: viper.GetString("system-target")}
|
||||
err = inst.Uninstall(&pkg.DefaultPackage{Name: name, Category: category, Version: version}, system)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
uninstallCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
uninstallCmd.Flags().String("system-target", path, "System rootpath")
|
||||
uninstallCmd.Flags().Int("concurrency", runtime.NumCPU(), "Concurrency")
|
||||
RootCmd.AddCommand(uninstallCmd)
|
||||
}
|
81
cmd/upgrade.go
Normal file
81
cmd/upgrade.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var upgradeCmd = &cobra.Command{
|
||||
Use: "upgrade",
|
||||
Short: "Upgrades the system",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
viper.BindPFlag("system-dbpath", cmd.Flags().Lookup("system-dbpath"))
|
||||
viper.BindPFlag("system-target", cmd.Flags().Lookup("system-target"))
|
||||
viper.BindPFlag("concurrency", cmd.Flags().Lookup("concurrency"))
|
||||
},
|
||||
Long: `Upgrades packages in parallel`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
c := []*installer.LuetRepository{}
|
||||
err := viper.UnmarshalKey("system-repositories", &c)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
||||
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
|
||||
synced := installer.Repositories{}
|
||||
for _, toSync := range c {
|
||||
s, err := toSync.Sync()
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
synced = append(synced, s)
|
||||
}
|
||||
|
||||
inst := installer.NewLuetInstaller(viper.GetInt("concurrency"))
|
||||
|
||||
inst.Repositories(synced)
|
||||
|
||||
os.MkdirAll(viper.GetString("system-dbpath"), os.ModePerm)
|
||||
systemDB := pkg.NewBoltDatabase(filepath.Join(viper.GetString("system-dbpath"), "luet.db"))
|
||||
system := &installer.System{Database: systemDB, Target: viper.GetString("system-target")}
|
||||
err = inst.Upgrade(system)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
path, err := os.Getwd()
|
||||
if err != nil {
|
||||
Fatal(err)
|
||||
}
|
||||
upgradeCmd.Flags().String("system-dbpath", path, "System db path")
|
||||
upgradeCmd.Flags().String("system-target", path, "System rootpath")
|
||||
upgradeCmd.Flags().Int("concurrency", runtime.NumCPU(), "Concurrency")
|
||||
|
||||
RootCmd.AddCommand(upgradeCmd)
|
||||
}
|
3
go.mod
3
go.mod
@@ -8,6 +8,7 @@ require (
|
||||
github.com/Sabayon/pkgs-checker v0.4.1
|
||||
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154
|
||||
github.com/briandowns/spinner v1.7.0
|
||||
github.com/cavaliercoder/grab v2.0.0+incompatible
|
||||
github.com/crillab/gophersat v1.1.7
|
||||
github.com/docker/docker v0.7.3-0.20180827131323-0c5f8d2b9b23
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
@@ -16,10 +17,12 @@ require (
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
|
||||
github.com/kyokomi/emoji v2.1.0+incompatible
|
||||
github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e
|
||||
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0
|
||||
github.com/mattn/go-isatty v0.0.10 // indirect
|
||||
github.com/mudler/docker-companion v0.4.6-0.20191110154655-b8b364100616
|
||||
github.com/onsi/ginkgo v1.10.1
|
||||
github.com/onsi/gomega v1.7.0
|
||||
github.com/otiai10/copy v1.0.2
|
||||
github.com/pelletier/go-toml v1.6.0 // indirect
|
||||
github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f
|
||||
github.com/pkg/errors v0.8.1
|
||||
|
8
go.sum
8
go.sum
@@ -35,6 +35,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/briandowns/spinner v1.7.0 h1:aan1hBBOoscry2TXAkgtxkJiq7Se0+9pt+TUWaPrB4g=
|
||||
github.com/briandowns/spinner v1.7.0/go.mod h1://Zf9tMcxfRUA36V23M6YGEAv+kECGfvpnLTnb8n4XQ=
|
||||
github.com/cavaliercoder/grab v2.0.0+incompatible h1:wZHbBQx56+Yxjx2TCGDcenhh3cJn7cCLMfkEPmySTSE=
|
||||
github.com/cavaliercoder/grab v2.0.0+incompatible/go.mod h1:tTBkfNqSBfuMmMBFaO2phgyhdYhiZQ/+iXCZDzcDsMI=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/containerd/containerd v1.2.4 h1:qN8LCvw+KA5wVCOnHspD/n2K9cJ34+YOs05qBBWhHiw=
|
||||
@@ -155,6 +157,8 @@ github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e/go.mod h1:7rIyQ
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0 h1:c1oKPqtIulBHwu1rkz3dXsPt5hgDqJCPMN/RAdT8lvs=
|
||||
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0/go.mod h1:uUA07IN7rYmbr5YlZM5nDVLyoxiqqpprFlXBrjqI24A=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
@@ -191,6 +195,10 @@ github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJ
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
|
||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/otiai10/copy v1.0.2 h1:DDNipYy6RkIkjMwy+AWzgKiNTyj2RUI9yEMeETEpVyc=
|
||||
github.com/otiai10/copy v1.0.2/go.mod h1:c7RpqBkwMom4bYTSkLSym4VSJz/XtncWRAj/J4PEIMY=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.6.0 h1:aetoXYr0Tv7xRU/V4B4IZJ2QcbtMUFoNb3ORp7TzIK4=
|
||||
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
|
||||
|
@@ -18,29 +18,94 @@ package compiler
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
//"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type PackageArtifact struct {
|
||||
Path string
|
||||
Dependencies []Artifact
|
||||
CompileSpec CompilationSpec
|
||||
type ArtifactIndex []Artifact
|
||||
|
||||
SourceAssertion solver.PackagesAssertions
|
||||
func (i ArtifactIndex) CleanPath() ArtifactIndex {
|
||||
newIndex := ArtifactIndex{}
|
||||
for _, n := range i {
|
||||
art := n.(*PackageArtifact)
|
||||
newIndex = append(newIndex, &PackageArtifact{Path: path.Base(n.GetPath()), SourceAssertion: art.SourceAssertion, CompileSpec: art.CompileSpec, Dependencies: art.Dependencies})
|
||||
}
|
||||
return newIndex
|
||||
//Update if exists, otherwise just create
|
||||
}
|
||||
|
||||
// When compiling, we write also a fingerprint.metadata.yaml file with PackageArtifact. In this way we can have another command to create the repository
|
||||
// which will consist in just of an repository.yaml which is just the repository structure with the list of package artifact.
|
||||
// In this way a generic client can fetch the packages and, after unpacking the tree, performing queries to install packages.
|
||||
type PackageArtifact struct {
|
||||
Path string `json:"path"`
|
||||
Dependencies []*PackageArtifact `json:"dependencies"`
|
||||
CompileSpec *LuetCompilationSpec `json:"compilationspec"`
|
||||
|
||||
SourceAssertion solver.PackagesAssertions `json:"-"`
|
||||
}
|
||||
|
||||
func NewPackageArtifact(path string) Artifact {
|
||||
return &PackageArtifact{Path: path, Dependencies: []Artifact{}}
|
||||
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}}
|
||||
}
|
||||
|
||||
func NewPackageArtifactFromYaml(data []byte) (Artifact, error) {
|
||||
p := &PackageArtifact{}
|
||||
err := yaml.Unmarshal(data, &p)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) WriteYaml(dst string) error {
|
||||
//p := a.CompileSpec.GetPackage().GetPath()
|
||||
|
||||
//a.CompileSpec.GetPackage().SetPath("")
|
||||
// for _, ass := range a.CompileSpec.GetSourceAssertion() {
|
||||
// ass.Package.SetPath("")
|
||||
// }
|
||||
data, err := yaml.Marshal(a)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
|
||||
}
|
||||
|
||||
mangle, err := NewPackageArtifactFromYaml(data)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Generated invalid artifact")
|
||||
}
|
||||
//p := a.CompileSpec.GetPackage().GetPath()
|
||||
|
||||
mangle.GetCompileSpec().GetPackage().SetPath("")
|
||||
for _, ass := range mangle.GetCompileSpec().GetSourceAssertion() {
|
||||
ass.Package.SetPath("")
|
||||
}
|
||||
|
||||
data, err = yaml.Marshal(mangle)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While marshalling for PackageArtifact YAML")
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(dst, a.GetCompileSpec().GetPackage().GetFingerPrint()+".metadata.yaml"), data, os.ModePerm)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While writing PackageArtifact YAML")
|
||||
}
|
||||
//a.CompileSpec.GetPackage().SetPath(p)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetSourceAssertion() solver.PackagesAssertions {
|
||||
@@ -48,7 +113,7 @@ func (a *PackageArtifact) GetSourceAssertion() solver.PackagesAssertions {
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetCompileSpec(as CompilationSpec) {
|
||||
a.CompileSpec = as
|
||||
a.CompileSpec = as.(*LuetCompilationSpec)
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetCompileSpec() CompilationSpec {
|
||||
@@ -60,11 +125,19 @@ func (a *PackageArtifact) SetSourceAssertion(as solver.PackagesAssertions) {
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetDependencies() []Artifact {
|
||||
return a.Dependencies
|
||||
ret := []Artifact{}
|
||||
for _, d := range a.Dependencies {
|
||||
ret = append(ret, d)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) SetDependencies(d []Artifact) {
|
||||
a.Dependencies = d
|
||||
ret := []*PackageArtifact{}
|
||||
for _, dd := range d {
|
||||
ret = append(ret, dd.(*PackageArtifact))
|
||||
}
|
||||
a.Dependencies = ret
|
||||
}
|
||||
|
||||
func (a *PackageArtifact) GetPath() string {
|
||||
@@ -84,18 +157,18 @@ func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
||||
defer wg.Done()
|
||||
|
||||
for job := range s {
|
||||
Info("#"+strconv.Itoa(i), "copying", job.Src, "to", job.Dst)
|
||||
if dir, err := helpers.IsDirectory(job.Src); err == nil && dir {
|
||||
err = helpers.CopyDir(job.Src, job.Dst)
|
||||
if err != nil {
|
||||
Fatal("Error copying dir", job, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
//Info("#"+strconv.Itoa(i), "copying", job.Src, "to", job.Dst)
|
||||
// if dir, err := helpers.IsDirectory(job.Src); err == nil && dir {
|
||||
// err = helpers.CopyDir(job.Src, job.Dst)
|
||||
// if err != nil {
|
||||
// Warning("Error copying dir", job, err)
|
||||
// }
|
||||
// continue
|
||||
// }
|
||||
|
||||
if !helpers.Exists(job.Dst) {
|
||||
if err := helpers.CopyFile(job.Src, job.Dst); err != nil {
|
||||
Fatal("Error copying", job, err)
|
||||
Warning("Error copying", job, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -38,11 +38,10 @@ var _ = Describe("Artifact", func() {
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildtree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(1))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -76,7 +75,9 @@ var _ = Describe("Artifact", func() {
|
||||
FROM alpine
|
||||
COPY . /luetbuild
|
||||
WORKDIR /luetbuild
|
||||
`))
|
||||
ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin`))
|
||||
b := NewSimpleDockerBackend()
|
||||
opts := CompilerBackendOptions{
|
||||
ImageName: "luet/base",
|
||||
@@ -94,6 +95,9 @@ WORKDIR /luetbuild
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM luet/base
|
||||
ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin
|
||||
RUN echo foo > /test
|
||||
RUN echo bar > /test2`))
|
||||
opts = CompilerBackendOptions{
|
||||
|
@@ -162,11 +162,13 @@ func (*SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPer
|
||||
|
||||
layers_sha := []string{}
|
||||
|
||||
if len(manifestData) != 1 {
|
||||
return errors.New("Manifest should have one entry")
|
||||
}
|
||||
for _, l := range manifestData[0].Layers {
|
||||
layers_sha = append(layers_sha, strings.Replace(l, "/layer.tar", "", -1))
|
||||
for _, data := range manifestData {
|
||||
|
||||
for _, l := range data.Layers {
|
||||
if strings.Contains(l, "layer.tar") {
|
||||
layers_sha = append(layers_sha, strings.Replace(l, "/layer.tar", "", -1))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export, err := capi.CreateExport(rootfs)
|
||||
@@ -221,8 +223,8 @@ func (*SimpleDocker) Changes(fromImage, toImage string) ([]compiler.ArtifactLaye
|
||||
}
|
||||
defer os.RemoveAll(tmpdiffs) // clean up
|
||||
|
||||
diffargs := []string{"diff", fromImage, toImage, "--type=file", "-j", "-n", "-c", tmpdiffs}
|
||||
out, err := exec.Command("container-diff", diffargs...).CombinedOutput()
|
||||
diffargs := []string{"diff", fromImage, toImage, "-v", "error", "-q", "--type=file", "-j", "-n", "-c", tmpdiffs}
|
||||
out, err := exec.Command("container-diff", diffargs...).Output()
|
||||
if err != nil {
|
||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Failed Resolving layer diffs: "+string(out))
|
||||
}
|
||||
|
@@ -37,11 +37,10 @@ var _ = Describe("Docker backend", func() {
|
||||
|
||||
err := generalRecipe.Load("../../../tests/fixtures/buildtree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(1))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -67,7 +66,9 @@ var _ = Describe("Docker backend", func() {
|
||||
FROM alpine
|
||||
COPY . /luetbuild
|
||||
WORKDIR /luetbuild
|
||||
`))
|
||||
ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin`))
|
||||
b := NewSimpleDockerBackend()
|
||||
opts := CompilerBackendOptions{
|
||||
ImageName: "luet/base",
|
||||
@@ -85,6 +86,9 @@ WORKDIR /luetbuild
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM luet/base
|
||||
ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin
|
||||
RUN echo foo > /test
|
||||
RUN echo bar > /test2`))
|
||||
opts = CompilerBackendOptions{
|
||||
|
@@ -16,6 +16,7 @@
|
||||
package backend
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
@@ -36,15 +37,18 @@ func (*SimpleImg) BuildImage(opts compiler.CompilerBackendOptions) error {
|
||||
path := opts.SourcePath
|
||||
dockerfileName := opts.DockerFileName
|
||||
|
||||
buildarg := []string{"build", "-t", name, path, "-f ", dockerfileName}
|
||||
buildarg := []string{"build", "-f", dockerfileName, "-t", name, "."}
|
||||
Spinner(22)
|
||||
Debug("Building image "+name+" - running img with: ", buildarg)
|
||||
out, err := exec.Command("img", buildarg...).CombinedOutput()
|
||||
defer SpinnerStop()
|
||||
Debug(":tea: Building image " + name)
|
||||
cmd := exec.Command("img", buildarg...)
|
||||
cmd.Dir = path
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed building image: "+string(out))
|
||||
}
|
||||
SpinnerStop()
|
||||
Info(out)
|
||||
Info(":tea: Building image " + name + " done")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -52,12 +56,13 @@ func (*SimpleImg) RemoveImage(opts compiler.CompilerBackendOptions) error {
|
||||
name := opts.ImageName
|
||||
buildarg := []string{"rm", name}
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
out, err := exec.Command("img", buildarg...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed building image: "+string(out))
|
||||
}
|
||||
SpinnerStop()
|
||||
Info(out)
|
||||
|
||||
Info(":tea: Image " + name + " removed")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -66,24 +71,29 @@ func (*SimpleImg) DownloadImage(opts compiler.CompilerBackendOptions) error {
|
||||
name := opts.ImageName
|
||||
buildarg := []string{"pull", name}
|
||||
|
||||
Debug("Downloading image "+name+" - running img with: ", buildarg)
|
||||
Debug(":tea: Downloading image " + name)
|
||||
cmd := exec.Command("img", buildarg...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed building image: "+string(out))
|
||||
}
|
||||
|
||||
Info(":tea: Image " + name + " downloaded")
|
||||
|
||||
return nil
|
||||
}
|
||||
func (*SimpleImg) CopyImage(src, dst string) error {
|
||||
Spinner(22)
|
||||
defer SpinnerStop()
|
||||
|
||||
Debug("Tagging image - running img with: ", src, dst)
|
||||
Debug(":tea: Tagging image", src, dst)
|
||||
cmd := exec.Command("img", "tag", src, dst)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed tagging image: "+string(out))
|
||||
}
|
||||
Info(":tea: Image " + dst + " tagged")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -103,18 +113,31 @@ func (s *SimpleImg) ImageDefinitionToTar(opts compiler.CompilerBackendOptions) e
|
||||
func (*SimpleImg) ExportImage(opts compiler.CompilerBackendOptions) error {
|
||||
name := opts.ImageName
|
||||
path := opts.Destination
|
||||
buildarg := []string{"save", name, "-o", path}
|
||||
Debug("Saving image "+name+" - running img with: ", buildarg)
|
||||
buildarg := []string{"save", "-o", path, name}
|
||||
Debug(":tea: Saving image " + name)
|
||||
out, err := exec.Command("img", buildarg...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed building image: "+string(out))
|
||||
}
|
||||
Info(":tea: Image " + name + " saved")
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Dup in docker, refactor common code in helpers for shared parts
|
||||
func (*SimpleImg) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
|
||||
return NewSimpleDockerBackend().ExtractRootfs(opts, keepPerms)
|
||||
name := opts.ImageName
|
||||
path := opts.Destination
|
||||
|
||||
os.RemoveAll(path)
|
||||
buildarg := []string{"unpack", "-o", path, name}
|
||||
Debug(":tea: Extracting image " + name)
|
||||
out, err := exec.Command("img", buildarg...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed extracting image: "+string(out))
|
||||
}
|
||||
Info(":tea: Image " + name + " extracted")
|
||||
return nil
|
||||
//return NewSimpleDockerBackend().ExtractRootfs(opts, keepPerms)
|
||||
}
|
||||
|
||||
// TODO: Use container-diff (https://github.com/GoogleContainerTools/container-diff) for checking out layer diffs
|
||||
|
@@ -20,11 +20,12 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
@@ -35,18 +36,23 @@ const BuildFile = "build.yaml"
|
||||
|
||||
type LuetCompiler struct {
|
||||
*tree.CompilerRecipe
|
||||
Backend CompilerBackend
|
||||
Database pkg.PackageDatabase
|
||||
Backend CompilerBackend
|
||||
Database pkg.PackageDatabase
|
||||
ImageRepository string
|
||||
PullFirst, KeepImg bool
|
||||
}
|
||||
|
||||
func NewLuetCompiler(backend CompilerBackend, t pkg.Tree, db pkg.PackageDatabase) Compiler {
|
||||
func NewLuetCompiler(backend CompilerBackend, db pkg.PackageDatabase) Compiler {
|
||||
// The CompilerRecipe will gives us a tree with only build deps listed.
|
||||
return &LuetCompiler{
|
||||
Backend: backend,
|
||||
CompilerRecipe: &tree.CompilerRecipe{
|
||||
tree.Recipe{PackageTree: t},
|
||||
tree.Recipe{Database: db},
|
||||
},
|
||||
Database: db,
|
||||
Database: db,
|
||||
ImageRepository: "luet/cache",
|
||||
PullFirst: true,
|
||||
KeepImg: true,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,11 +79,8 @@ func (cs *LuetCompiler) CompileWithReverseDeps(concurrency int, keepPermissions
|
||||
Info(":ant: Resolving reverse dependencies")
|
||||
toCompile := NewLuetCompilationspecs()
|
||||
for _, a := range artifacts {
|
||||
w, asserterr := cs.Tree().World()
|
||||
if asserterr != nil {
|
||||
return nil, append(err, asserterr)
|
||||
}
|
||||
revdeps := a.GetCompileSpec().GetPackage().Revdeps(&w)
|
||||
|
||||
revdeps := a.GetCompileSpec().GetPackage().Revdeps(cs.Database)
|
||||
for _, r := range revdeps {
|
||||
spec, asserterr := cs.FromPackage(r)
|
||||
if err != nil {
|
||||
@@ -155,11 +158,66 @@ func (cs *LuetCompiler) CompileParallel(concurrency int, keepPermissions bool, p
|
||||
return artifacts, allErrors
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage string, concurrency int, keepPermissions bool, p CompilationSpec) (Artifact, error) {
|
||||
func (cs *LuetCompiler) stripIncludesFromRootfs(includes []string, rootfs string) error {
|
||||
var includeRegexp []*regexp.Regexp
|
||||
for _, i := range includes {
|
||||
r, e := regexp.Compile(i)
|
||||
if e != nil {
|
||||
return errors.Wrap(e, "Could not compile regex in the include of the package")
|
||||
}
|
||||
includeRegexp = append(includeRegexp, r)
|
||||
}
|
||||
|
||||
toRemove := []string{}
|
||||
|
||||
// the function that handles each file or dir
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
|
||||
// if info.Name() != DefinitionFile {
|
||||
// return nil // Skip with no errors
|
||||
// }
|
||||
if currentpath == rootfs {
|
||||
return nil
|
||||
}
|
||||
|
||||
abspath := strings.ReplaceAll(currentpath, rootfs, "")
|
||||
|
||||
match := false
|
||||
|
||||
for _, i := range includeRegexp {
|
||||
if i.MatchString(abspath) {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
|
||||
if !match {
|
||||
toRemove = append(toRemove, currentpath)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(rootfs, ff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, s := range toRemove {
|
||||
e := os.RemoveAll(s)
|
||||
if e != nil {
|
||||
Warning("Failed removing", s, e.Error())
|
||||
return e
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage string, concurrency int, keepPermissions, keepImg bool, p CompilationSpec) (Artifact, error) {
|
||||
pkgTag := ":package: " + p.GetPackage().GetName()
|
||||
|
||||
p.SetSeedImage(image) // In this case, we ignore the build deps as we suppose that the image has them - otherwise we recompose the tree with a solver,
|
||||
// and we build all the images first.
|
||||
keepImg := true
|
||||
keepPackageImg := true
|
||||
|
||||
err := os.MkdirAll(p.Rel("build"), os.ModePerm)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating tempdir for building")
|
||||
@@ -177,14 +235,20 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
|
||||
}
|
||||
if buildertaggedImage == "" {
|
||||
keepImg = false
|
||||
buildertaggedImage = "luet/" + p.GetPackage().GetFingerPrint() + "-builder"
|
||||
buildertaggedImage = cs.ImageRepository + "-" + p.GetPackage().GetFingerPrint() + "-builder"
|
||||
}
|
||||
if packageImage == "" {
|
||||
keepPackageImg = false
|
||||
packageImage = "luet/" + p.GetPackage().GetFingerPrint()
|
||||
packageImage = cs.ImageRepository + "-" + p.GetPackage().GetFingerPrint()
|
||||
}
|
||||
|
||||
if cs.PullFirst {
|
||||
//Best effort pull
|
||||
cs.Backend.DownloadImage(CompilerBackendOptions{ImageName: buildertaggedImage})
|
||||
cs.Backend.DownloadImage(CompilerBackendOptions{ImageName: packageImage})
|
||||
}
|
||||
|
||||
Info(pkgTag, "Generating :whale: definition for builder image from", image)
|
||||
|
||||
// First we create the builder image
|
||||
p.WriteBuildImageDefinition(filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+"-builder.dockerfile"))
|
||||
builderOpts := CompilerBackendOptions{
|
||||
@@ -213,19 +277,19 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
Destination: p.Rel(p.GetPackage().GetFingerPrint() + ".image.tar"),
|
||||
}
|
||||
|
||||
if !keepPackageImg {
|
||||
err = cs.Backend.ImageDefinitionToTar(runnerOpts)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not export image to tar")
|
||||
}
|
||||
} else {
|
||||
if err := cs.Backend.BuildImage(runnerOpts); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed building image for "+runnerOpts.ImageName+" "+runnerOpts.DockerFileName)
|
||||
}
|
||||
if err := cs.Backend.ExportImage(runnerOpts); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed exporting image")
|
||||
}
|
||||
// if !keepPackageImg {
|
||||
// err = cs.Backend.ImageDefinitionToTar(runnerOpts)
|
||||
// if err != nil {
|
||||
// return nil, errors.Wrap(err, "Could not export image to tar")
|
||||
// }
|
||||
// } else {
|
||||
if err := cs.Backend.BuildImage(runnerOpts); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed building image for "+runnerOpts.ImageName+" "+runnerOpts.DockerFileName)
|
||||
}
|
||||
if err := cs.Backend.ExportImage(runnerOpts); err != nil {
|
||||
return nil, errors.Wrap(err, "Failed exporting image")
|
||||
}
|
||||
// }
|
||||
|
||||
var diffs []ArtifactLayer
|
||||
var artifact Artifact
|
||||
@@ -238,6 +302,20 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
}
|
||||
}
|
||||
|
||||
rootfs, err := ioutil.TempDir(p.GetOutputPath(), "rootfs")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not create tempdir")
|
||||
}
|
||||
defer os.RemoveAll(rootfs) // clean up
|
||||
|
||||
// TODO: Compression and such
|
||||
err = cs.Backend.ExtractRootfs(CompilerBackendOptions{
|
||||
ImageName: packageImage,
|
||||
SourcePath: runnerOpts.Destination, Destination: rootfs}, keepPermissions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not extract rootfs")
|
||||
}
|
||||
|
||||
if !keepImg {
|
||||
// We keep them around, so to not reload them from the tar (which should be the "correct way") and we automatically share the same layers
|
||||
// TODO: Handle caching and optionally do not remove things
|
||||
@@ -247,20 +325,21 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
Warning("Could not remove image ", builderOpts.ImageName)
|
||||
// return nil, errors.Wrap(err, "Could not remove image")
|
||||
}
|
||||
}
|
||||
rootfs, err := ioutil.TempDir(p.GetOutputPath(), "rootfs")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not create tempdir")
|
||||
}
|
||||
defer os.RemoveAll(rootfs) // clean up
|
||||
|
||||
// TODO: Compression and such
|
||||
err = cs.Backend.ExtractRootfs(CompilerBackendOptions{SourcePath: runnerOpts.Destination, Destination: rootfs}, keepPermissions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not extract rootfs")
|
||||
err = cs.Backend.RemoveImage(runnerOpts)
|
||||
if err != nil {
|
||||
// TODO: Have a --fatal flag which enables Warnings to exit.
|
||||
Warning("Could not remove image ", builderOpts.ImageName)
|
||||
// return nil, errors.Wrap(err, "Could not remove image")
|
||||
}
|
||||
}
|
||||
|
||||
if p.ImageUnpack() {
|
||||
|
||||
if len(p.GetIncludes()) > 0 {
|
||||
// strip from includes
|
||||
cs.stripIncludesFromRootfs(p.GetIncludes(), rootfs)
|
||||
}
|
||||
|
||||
err = helpers.Tar(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
@@ -269,6 +348,8 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
artifact = NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
||||
artifact.SetCompileSpec(p)
|
||||
} else {
|
||||
Info(pkgTag, "Generating delta")
|
||||
|
||||
artifact, err = ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not generate deltas")
|
||||
@@ -276,18 +357,16 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
artifact.SetCompileSpec(p)
|
||||
}
|
||||
|
||||
err = artifact.WriteYaml(p.GetOutputPath())
|
||||
if err != nil {
|
||||
return artifact, err
|
||||
}
|
||||
Info(pkgTag, " :white_check_mark: Done")
|
||||
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) Prepare(concurrency int) error {
|
||||
|
||||
err := cs.Tree().ResolveDeps(concurrency) // FIXME: When done in parallel, this could be done on top before starting
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While resoolving tree world deps")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (cs *LuetCompiler) packageFromImage(p CompilationSpec, tag string, keepPermissions bool) (Artifact, error) {
|
||||
func (cs *LuetCompiler) packageFromImage(p CompilationSpec, tag string, keepPermissions, keepImg bool) (Artifact, error) {
|
||||
pkgTag := ":package: " + p.GetPackage().GetName()
|
||||
|
||||
Info(pkgTag, " 🍩 Build starts 🔨 🔨 🔨 ")
|
||||
@@ -319,7 +398,9 @@ func (cs *LuetCompiler) packageFromImage(p CompilationSpec, tag string, keepPerm
|
||||
defer os.RemoveAll(rootfs) // clean up
|
||||
|
||||
// TODO: Compression and such
|
||||
err = cs.Backend.ExtractRootfs(CompilerBackendOptions{SourcePath: builderOpts.Destination, Destination: rootfs}, keepPermissions)
|
||||
err = cs.Backend.ExtractRootfs(CompilerBackendOptions{
|
||||
ImageName: p.GetImage(),
|
||||
SourcePath: builderOpts.Destination, Destination: rootfs}, keepPermissions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not extract rootfs")
|
||||
}
|
||||
@@ -329,45 +410,49 @@ func (cs *LuetCompiler) packageFromImage(p CompilationSpec, tag string, keepPerm
|
||||
return nil, errors.Wrap(err, "Error met while creating package archive")
|
||||
}
|
||||
|
||||
if !keepImg {
|
||||
// We keep them around, so to not reload them from the tar (which should be the "correct way") and we automatically share the same layers
|
||||
// TODO: Handle caching and optionally do not remove things
|
||||
err = cs.Backend.RemoveImage(builderOpts)
|
||||
if err != nil {
|
||||
// TODO: Have a --fatal flag which enables Warnings to exit.
|
||||
Warning("Could not remove image ", builderOpts.ImageName)
|
||||
// return nil, errors.Wrap(err, "Could not remove image")
|
||||
}
|
||||
}
|
||||
|
||||
Info(pkgTag, " :white_check_mark: Done")
|
||||
artifact := NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
||||
artifact.SetCompileSpec(p)
|
||||
err = artifact.WriteYaml(p.GetOutputPath())
|
||||
if err != nil {
|
||||
return artifact, err
|
||||
}
|
||||
return artifact, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) ComputeDepTree(p CompilationSpec) (solver.PackagesAssertions, error) {
|
||||
|
||||
// Get build deps tree (ordered)
|
||||
world, err := cs.Tree().World()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While computing tree world")
|
||||
}
|
||||
s := solver.NewSolver([]pkg.Package{}, world, cs.Database)
|
||||
pack, err := cs.Tree().FindPackage(p.GetPackage())
|
||||
s := solver.NewSolver(pkg.NewInMemoryDatabase(false), cs.Database, pkg.NewInMemoryDatabase(false))
|
||||
|
||||
solution, err := s.Install([]pkg.Package{p.GetPackage()})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().GetName())
|
||||
}
|
||||
solution, err := s.Install([]pkg.Package{pack})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().GetName())
|
||||
}
|
||||
dependencies := solution.Order(p.GetPackage().GetFingerPrint())
|
||||
|
||||
dependencies := solution.Order(cs.Database, p.GetPackage().GetFingerPrint())
|
||||
assertions := solver.PackagesAssertions{}
|
||||
|
||||
for _, assertion := range dependencies { //highly dependent on the order
|
||||
if assertion.Value && assertion.Package.Flagged() {
|
||||
depPack, err := cs.Tree().FindPackage(assertion.Package)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().GetName())
|
||||
}
|
||||
nthsolution, err := s.Install([]pkg.Package{depPack})
|
||||
if assertion.Value {
|
||||
nthsolution, err := s.Install([]pkg.Package{assertion.Package})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().GetName())
|
||||
}
|
||||
|
||||
assertion.Hash = solver.PackageHash{
|
||||
BuildHash: nthsolution.Order(depPack.GetFingerPrint()).Drop(depPack).AssertionHash(),
|
||||
PackageHash: nthsolution.Order(depPack.GetFingerPrint()).AssertionHash(),
|
||||
BuildHash: nthsolution.Order(cs.Database, assertion.Package.GetFingerPrint()).Drop(assertion.Package).AssertionHash(),
|
||||
PackageHash: nthsolution.Order(cs.Database, assertion.Package.GetFingerPrint()).AssertionHash(),
|
||||
}
|
||||
assertions = append(assertions, assertion)
|
||||
}
|
||||
@@ -398,10 +483,10 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
// Treat last case (easier) first. The image is provided and we just compute a plain dockerfile with the images listed as above
|
||||
if p.GetImage() != "" {
|
||||
if p.ImageUnpack() { // If it is just an entire image, create a package from it
|
||||
return cs.packageFromImage(p, "", keepPermissions)
|
||||
return cs.packageFromImage(p, "", keepPermissions, cs.KeepImg)
|
||||
}
|
||||
|
||||
return cs.compileWithImage(p.GetImage(), "", "", concurrency, keepPermissions, p)
|
||||
return cs.compileWithImage(p.GetImage(), "", "", concurrency, keepPermissions, cs.KeepImg, p)
|
||||
}
|
||||
|
||||
// - If image is not set, we read a base_image. Then we will build one image from it to kick-off our build based
|
||||
@@ -411,7 +496,6 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
|
||||
dependencies := p.GetSourceAssertion().Drop(p.GetPackage()) // at this point we should have a flattened list of deps to build, including all of them (with all constraints propagated already)
|
||||
departifacts := []Artifact{} // TODO: Return this somehow
|
||||
deperrs := []error{}
|
||||
var lastHash string
|
||||
depsN := 0
|
||||
currentN := 0
|
||||
@@ -429,13 +513,13 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
Info(pkgTag, " :zap: Building dependency")
|
||||
compileSpec, err := cs.FromPackage(assertion.Package)
|
||||
if err != nil {
|
||||
return nil, errors.New("Error while generating compilespec for " + assertion.Package.GetName())
|
||||
return nil, errors.Wrap(err, "Error while generating compilespec for "+assertion.Package.GetName())
|
||||
}
|
||||
compileSpec.SetOutputPath(p.GetOutputPath())
|
||||
|
||||
buildImageHash := "luet/cache:" + assertion.Hash.BuildHash
|
||||
currentPackageImageHash := "luet/cache:" + assertion.Hash.PackageHash
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Builder image name", buildImageHash)
|
||||
buildImageHash := cs.ImageRepository + ":" + assertion.Hash.BuildHash
|
||||
currentPackageImageHash := cs.ImageRepository + ":" + assertion.Hash.PackageHash
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Builder image from", buildImageHash)
|
||||
Debug(pkgTag, " :arrow_right_hook: :whale: Package image name", currentPackageImageHash)
|
||||
|
||||
lastHash = currentPackageImageHash
|
||||
@@ -445,28 +529,27 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
if compileSpec.GetImage() == "" {
|
||||
return nil, errors.New("No image defined for package: " + assertion.Package.GetName())
|
||||
}
|
||||
Info(":whale: Sourcing package from image", compileSpec.GetImage())
|
||||
artifact, err := cs.packageFromImage(compileSpec, currentPackageImageHash, keepPermissions)
|
||||
Info(pkgTag, ":whale: Sourcing package from image", compileSpec.GetImage())
|
||||
artifact, err := cs.packageFromImage(compileSpec, currentPackageImageHash, keepPermissions, cs.KeepImg)
|
||||
if err != nil {
|
||||
deperrs = append(deperrs, err)
|
||||
break // stop at first error
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().GetName())
|
||||
}
|
||||
departifacts = append(departifacts, artifact)
|
||||
continue
|
||||
}
|
||||
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().GetFingerPrint()+" from image")
|
||||
artifact, err := cs.compileWithImage(compileSpec.GetImage(), buildImageHash, currentPackageImageHash, concurrency, keepPermissions, compileSpec)
|
||||
artifact, err := cs.compileWithImage(compileSpec.GetImage(), buildImageHash, currentPackageImageHash, concurrency, keepPermissions, cs.KeepImg, compileSpec)
|
||||
if err != nil {
|
||||
deperrs = append(deperrs, err)
|
||||
break // stop at first error
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().GetName())
|
||||
}
|
||||
departifacts = append(departifacts, artifact)
|
||||
Info(pkgTag, ":white_check_mark: Done")
|
||||
continue
|
||||
}
|
||||
|
||||
artifact, err := cs.compileWithImage(buildImageHash, "", currentPackageImageHash, concurrency, keepPermissions, compileSpec)
|
||||
Debug(pkgTag, " :wrench: Compiling "+compileSpec.GetPackage().GetFingerPrint()+" from tree")
|
||||
artifact, err := cs.compileWithImage(buildImageHash, "", currentPackageImageHash, concurrency, keepPermissions, cs.KeepImg, compileSpec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed compiling "+compileSpec.GetPackage().GetName())
|
||||
// deperrs = append(deperrs, err)
|
||||
@@ -475,8 +558,9 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
departifacts = append(departifacts, artifact)
|
||||
Info(pkgTag, ":collision: Done")
|
||||
}
|
||||
|
||||
Info(":package:", p.GetPackage().GetName(), ":cyclone: Building package target from:", lastHash)
|
||||
artifact, err := cs.compileWithImage(lastHash, "", "", concurrency, keepPermissions, p)
|
||||
artifact, err := cs.compileWithImage(lastHash, "", "", concurrency, keepPermissions, cs.KeepImg, p)
|
||||
if err != nil {
|
||||
return artifact, err
|
||||
}
|
||||
@@ -488,7 +572,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, p Compila
|
||||
|
||||
func (cs *LuetCompiler) FromPackage(p pkg.Package) (CompilationSpec, error) {
|
||||
|
||||
pack, err := cs.Tree().FindPackage(p)
|
||||
pack, err := cs.Database.FindPackageCandidate(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -35,13 +35,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(3))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -80,13 +77,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(3))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -120,14 +114,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/buildableseed")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(4))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -180,13 +170,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/layers")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(2))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "extra", Category: "layer", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -225,13 +212,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/include")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(1))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -254,6 +238,44 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and includes ony wanted files also from unpacked packages", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/includeimage")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// err = generalRecipe.Tree().ResolveDeps(3)
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(1, false, NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("var/lib/udhcpd"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test2"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("lib/firmware"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles a more complex tree", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
@@ -262,13 +284,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/layered")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(3))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "pkgs-checker", Category: "package", Version: "9999"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -292,6 +311,8 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("usr/bin/pkgs-checker"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("base-layer-0.1.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("base-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.metadata.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("extra-layer-0.1.package.tar"))).To(BeTrue())
|
||||
})
|
||||
|
||||
@@ -303,13 +324,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/layered")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(3))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "extra", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -344,13 +362,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/buildableseed")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(4))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
|
||||
@@ -399,13 +414,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/expansion")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(3))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -443,13 +455,10 @@ var _ = Describe("Compiler", func() {
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/packagelayers")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(2))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
err = compiler.Prepare(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "runtime", Category: "layer", Version: "0.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -466,7 +475,7 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
Expect(len(artifacts[0].GetDependencies())).To(Equal(1))
|
||||
Expect(helpers.Untar(spec.Rel("runtime-layer-0.1.package.tar"), tmpdir, false)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(spec.Rel("bin/sh"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("bin/busybox"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("var"))).ToNot(BeTrue())
|
||||
})
|
||||
})
|
||||
|
@@ -25,7 +25,6 @@ type Compiler interface {
|
||||
CompileParallel(concurrency int, keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error)
|
||||
CompileWithReverseDeps(concurrency int, keepPermissions bool, ps CompilationSpecs) ([]Artifact, []error)
|
||||
ComputeDepTree(p CompilationSpec) (solver.PackagesAssertions, error)
|
||||
Prepare(concurrency int) error
|
||||
|
||||
FromPackage(pkg.Package) (CompilationSpec, error)
|
||||
|
||||
@@ -62,6 +61,7 @@ type Artifact interface {
|
||||
|
||||
SetCompileSpec(as CompilationSpec)
|
||||
GetCompileSpec() CompilationSpec
|
||||
WriteYaml(dst string) error
|
||||
}
|
||||
|
||||
type ArtifactNode struct {
|
||||
|
@@ -87,11 +87,12 @@ func (specs *LuetCompilationspecs) Unique() CompilationSpecs {
|
||||
}
|
||||
|
||||
type LuetCompilationSpec struct {
|
||||
Steps []string `json:"steps"` // Are run inside a container and the result layer diff is saved
|
||||
Steps []string `json:"steps"` // Are run inside a container and the result layer diff is saved
|
||||
Env []string `json:"env"`
|
||||
Prelude []string `json:"prelude"` // Are run inside the image which will be our builder
|
||||
Image string `json:"image"`
|
||||
Seed string `json:"seed"`
|
||||
Package pkg.Package `json:"-"`
|
||||
Package *pkg.DefaultPackage `json:"package"`
|
||||
SourceAssertion solver.PackagesAssertions `json:"-"`
|
||||
|
||||
OutputPath string `json:"-"` // Where the build processfiles go
|
||||
@@ -105,7 +106,7 @@ func NewLuetCompilationSpec(b []byte, p pkg.Package) (CompilationSpec, error) {
|
||||
if err != nil {
|
||||
return &spec, err
|
||||
}
|
||||
spec.Package = p
|
||||
spec.Package = p.(*pkg.DefaultPackage)
|
||||
return &spec, nil
|
||||
}
|
||||
func (a *LuetCompilationSpec) GetSourceAssertion() solver.PackagesAssertions {
|
||||
@@ -169,7 +170,15 @@ func (cs *LuetCompilationSpec) RenderBuildImage() (string, error) {
|
||||
FROM ` + cs.GetSeedImage() + `
|
||||
COPY . /luetbuild
|
||||
WORKDIR /luetbuild
|
||||
`
|
||||
ENV PACKAGE_NAME=` + cs.Package.GetName() + `
|
||||
ENV PACKAGE_VERSION=` + cs.Package.GetVersion() + `
|
||||
ENV PACKAGE_CATEGORY=` + cs.Package.GetCategory()
|
||||
|
||||
for _, s := range cs.Env {
|
||||
spec = spec + `
|
||||
ENV ` + s
|
||||
}
|
||||
|
||||
for _, s := range cs.GetPreBuildSteps() {
|
||||
spec = spec + `
|
||||
RUN ` + s
|
||||
@@ -180,7 +189,15 @@ RUN ` + s
|
||||
// TODO: docker build image first. Then a backend can be used to actually spin up a container with it and run the steps within
|
||||
func (cs *LuetCompilationSpec) RenderStepImage(image string) (string, error) {
|
||||
spec := `
|
||||
FROM ` + image
|
||||
FROM ` + image + `
|
||||
ENV PACKAGE_NAME=` + cs.Package.GetName() + `
|
||||
ENV PACKAGE_VERSION=` + cs.Package.GetVersion() + `
|
||||
ENV PACKAGE_CATEGORY=` + cs.Package.GetCategory()
|
||||
|
||||
for _, s := range cs.Env {
|
||||
spec = spec + `
|
||||
ENV ` + s
|
||||
}
|
||||
for _, s := range cs.BuildSteps() {
|
||||
spec = spec + `
|
||||
RUN ` + s
|
||||
|
@@ -58,11 +58,10 @@ var _ = Describe("Spec", func() {
|
||||
|
||||
err := generalRecipe.Load("../../tests/fixtures/buildtree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(1))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.Tree(), generalRecipe.Tree().GetPackageSet())
|
||||
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase())
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "enman", Category: "app-admin", Version: "1.4.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -76,6 +75,7 @@ var _ = Describe("Spec", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
lspec.Env = []string{"test=1"}
|
||||
err = lspec.WriteBuildImageDefinition(filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
dockerfile, err := helpers.Read(filepath.Join(tmpdir, "Dockerfile"))
|
||||
@@ -84,7 +84,10 @@ var _ = Describe("Spec", func() {
|
||||
FROM alpine
|
||||
COPY . /luetbuild
|
||||
WORKDIR /luetbuild
|
||||
`))
|
||||
ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin
|
||||
ENV test=1`))
|
||||
|
||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "Dockerfile"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -92,6 +95,10 @@ WORKDIR /luetbuild
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(dockerfile).To(Equal(`
|
||||
FROM luet/base
|
||||
ENV PACKAGE_NAME=enman
|
||||
ENV PACKAGE_VERSION=1.4.0
|
||||
ENV PACKAGE_CATEGORY=app-admin
|
||||
ENV test=1
|
||||
RUN echo foo > /test
|
||||
RUN echo bar > /test2`))
|
||||
|
||||
|
@@ -16,11 +16,11 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
copy "github.com/otiai10/copy"
|
||||
)
|
||||
|
||||
// Exists reports whether the named file or directory exists.
|
||||
@@ -57,44 +57,7 @@ func ensureDir(fileName string) {
|
||||
// of the source file. The file mode will be copied from the source and
|
||||
// the copied data is synced/flushed to stable storage.
|
||||
func CopyFile(src, dst string) (err error) {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
ensureDir(dst) // FIXME: Breaks permissions
|
||||
|
||||
out, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
if e := out.Close(); e != nil {
|
||||
err = e
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = out.Sync()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
si, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = os.Chmod(dst, si.Mode()) // FIXME: Needs owners copy as well.
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
return copy.Copy(src, dst)
|
||||
}
|
||||
|
||||
func IsDirectory(path string) (bool, error) {
|
||||
@@ -111,54 +74,5 @@ func IsDirectory(path string) (bool, error) {
|
||||
func CopyDir(src string, dst string) (err error) {
|
||||
src = filepath.Clean(src)
|
||||
dst = filepath.Clean(dst)
|
||||
|
||||
si, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !si.IsDir() {
|
||||
return fmt.Errorf("source is not a directory")
|
||||
}
|
||||
|
||||
_, err = os.Stat(dst)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
if err == nil {
|
||||
// return fmt.Errorf("destination already exists")
|
||||
}
|
||||
|
||||
err = os.MkdirAll(dst, si.Mode())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
entries, err := ioutil.ReadDir(src)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
srcPath := filepath.Join(src, entry.Name())
|
||||
dstPath := filepath.Join(dst, entry.Name())
|
||||
|
||||
if entry.IsDir() {
|
||||
err = CopyDir(srcPath, dstPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Skip symlinks.
|
||||
if entry.Mode()&os.ModeSymlink != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
err = CopyFile(srcPath, dstPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return copy.Copy(src, dst)
|
||||
}
|
||||
|
28
pkg/installer/client/client_suite_test.go
Normal file
28
pkg/installer/client/client_suite_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestClient(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Client Suite")
|
||||
}
|
97
pkg/installer/client/http.go
Normal file
97
pkg/installer/client/http.go
Normal file
@@ -0,0 +1,97 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
"github.com/cavaliercoder/grab"
|
||||
)
|
||||
|
||||
type HttpClient struct {
|
||||
RepoData RepoData
|
||||
}
|
||||
|
||||
func NewHttpClient(r RepoData) *HttpClient {
|
||||
return &HttpClient{RepoData: r}
|
||||
}
|
||||
|
||||
func (c *HttpClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
|
||||
artifactName := path.Base(artifact.GetPath())
|
||||
Info("Downloading artifact", artifactName, "from", c.RepoData.Uri)
|
||||
|
||||
temp, err := ioutil.TempDir(os.TempDir(), "tree")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
file, err := ioutil.TempFile(temp, "HttpClient")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(c.RepoData.Uri)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.Path = path.Join(u.Path, artifactName)
|
||||
|
||||
_, err = grab.Get(temp, u.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = helpers.CopyFile(filepath.Join(temp, artifactName), file.Name())
|
||||
|
||||
return compiler.NewPackageArtifact(file.Name()), nil
|
||||
}
|
||||
|
||||
func (c *HttpClient) DownloadFile(name string) (string, error) {
|
||||
temp, err := ioutil.TempDir(os.TempDir(), "tree")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
file, err := ioutil.TempFile(os.TempDir(), "HttpClient")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
//defer os.Remove(file.Name())
|
||||
u, err := url.Parse(c.RepoData.Uri)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
u.Path = path.Join(u.Path, name)
|
||||
|
||||
Info("Downloading", u.String())
|
||||
|
||||
_, err = grab.Get(temp, u.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = helpers.CopyFile(filepath.Join(temp, name), file.Name())
|
||||
|
||||
return file.Name(), err
|
||||
}
|
73
pkg/installer/client/http_test.go
Normal file
73
pkg/installer/client/http_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Http client", func() {
|
||||
Context("With repository", func() {
|
||||
|
||||
It("Downloads single files", func() {
|
||||
// setup small staticfile webserver with content
|
||||
tmpdir, err := ioutil.TempDir("", "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir(tmpdir)))
|
||||
defer ts.Close()
|
||||
err = ioutil.WriteFile(filepath.Join(tmpdir, "test.txt"), []byte(`test`), os.ModePerm)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c := NewHttpClient(RepoData{Uri: ts.URL})
|
||||
path, err := c.DownloadFile("test.txt")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path)).To(Equal("test"))
|
||||
os.RemoveAll(path)
|
||||
})
|
||||
|
||||
It("Downloads artifacts", func() {
|
||||
// setup small staticfile webserver with content
|
||||
tmpdir, err := ioutil.TempDir("", "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
ts := httptest.NewServer(http.FileServer(http.Dir(tmpdir)))
|
||||
defer ts.Close()
|
||||
err = ioutil.WriteFile(filepath.Join(tmpdir, "test.txt"), []byte(`test`), os.ModePerm)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c := NewHttpClient(RepoData{Uri: ts.URL})
|
||||
path, err := c.DownloadArtifact(&compiler.PackageArtifact{Path: "test.txt"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path.GetPath())).To(Equal("test"))
|
||||
os.RemoveAll(path.GetPath())
|
||||
})
|
||||
|
||||
})
|
||||
})
|
20
pkg/installer/client/interface.go
Normal file
20
pkg/installer/client/interface.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client
|
||||
|
||||
type RepoData struct {
|
||||
Uri string
|
||||
}
|
63
pkg/installer/client/local.go
Normal file
63
pkg/installer/client/local.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
)
|
||||
|
||||
type LocalClient struct {
|
||||
RepoData RepoData
|
||||
}
|
||||
|
||||
func NewLocalClient(r RepoData) *LocalClient {
|
||||
return &LocalClient{RepoData: r}
|
||||
}
|
||||
|
||||
func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
|
||||
artifactName := path.Base(artifact.GetPath())
|
||||
Info("Downloading artifact", artifactName, "from", c.RepoData.Uri)
|
||||
file, err := ioutil.TempFile(os.TempDir(), "localclient")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//defer os.Remove(file.Name())
|
||||
|
||||
err = helpers.CopyFile(filepath.Join(c.RepoData.Uri, artifactName), file.Name())
|
||||
|
||||
return compiler.NewPackageArtifact(file.Name()), nil
|
||||
}
|
||||
func (c *LocalClient) DownloadFile(name string) (string, error) {
|
||||
Info("Downloading file", name, "from", c.RepoData.Uri)
|
||||
|
||||
file, err := ioutil.TempFile(os.TempDir(), "localclient")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
//defer os.Remove(file.Name())
|
||||
|
||||
err = helpers.CopyFile(filepath.Join(c.RepoData.Uri, name), file.Name())
|
||||
|
||||
return file.Name(), err
|
||||
}
|
66
pkg/installer/client/local_test.go
Normal file
66
pkg/installer/client/local_test.go
Normal file
@@ -0,0 +1,66 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package client_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
helpers "github.com/mudler/luet/pkg/helpers"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer/client"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Local client", func() {
|
||||
Context("With repository", func() {
|
||||
It("Downloads single files", func() {
|
||||
tmpdir, err := ioutil.TempDir("", "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
// write the whole body at once
|
||||
err = ioutil.WriteFile(filepath.Join(tmpdir, "test.txt"), []byte(`test`), os.ModePerm)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c := NewLocalClient(RepoData{Uri: tmpdir})
|
||||
path, err := c.DownloadFile("test.txt")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path)).To(Equal("test"))
|
||||
os.RemoveAll(path)
|
||||
})
|
||||
|
||||
It("Downloads artifacts", func() {
|
||||
tmpdir, err := ioutil.TempDir("", "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
// write the whole body at once
|
||||
err = ioutil.WriteFile(filepath.Join(tmpdir, "test.txt"), []byte(`test`), os.ModePerm)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
c := NewLocalClient(RepoData{Uri: tmpdir})
|
||||
path, err := c.DownloadArtifact(&compiler.PackageArtifact{Path: "test.txt"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Read(path.GetPath())).To(Equal("test"))
|
||||
os.RemoveAll(path.GetPath())
|
||||
})
|
||||
|
||||
})
|
||||
})
|
373
pkg/installer/installer.go
Normal file
373
pkg/installer/installer.go
Normal file
@@ -0,0 +1,373 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type LuetInstaller struct {
|
||||
PackageRepositories Repositories
|
||||
Concurrency int
|
||||
}
|
||||
|
||||
type ArtifactMatch struct {
|
||||
Package pkg.Package
|
||||
Artifact compiler.Artifact
|
||||
Repository Repository
|
||||
}
|
||||
|
||||
type LuetFinalizer struct {
|
||||
Install []string `json:"install"`
|
||||
Uninstall []string `json:"uninstall"` // TODO: Where to store?
|
||||
}
|
||||
|
||||
func (f *LuetFinalizer) RunInstall() error {
|
||||
for _, c := range f.Install {
|
||||
Debug("finalizer:", "sh", "-c", c)
|
||||
cmd := exec.Command("sh", "-c", c)
|
||||
stdoutStderr, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed running command: "+string(stdoutStderr))
|
||||
}
|
||||
Info(stdoutStderr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: We don't store uninstall finalizers ?!
|
||||
func (f *LuetFinalizer) RunUnInstall() error {
|
||||
for _, c := range f.Install {
|
||||
Debug("finalizer:", "sh", "-c", c)
|
||||
cmd := exec.Command("sh", "-c", c)
|
||||
stdoutStderr, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed running command: "+string(stdoutStderr))
|
||||
}
|
||||
Info(stdoutStderr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewLuetFinalizerFromYaml(data []byte) (*LuetFinalizer, error) {
|
||||
var p LuetFinalizer
|
||||
err := yaml.Unmarshal(data, &p)
|
||||
if err != nil {
|
||||
return &p, err
|
||||
}
|
||||
return &p, err
|
||||
}
|
||||
|
||||
func NewLuetInstaller(concurrency int) Installer {
|
||||
return &LuetInstaller{Concurrency: concurrency}
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Upgrade(s *System) error {
|
||||
Spinner(32)
|
||||
defer SpinnerStop()
|
||||
syncedRepos := Repositories{}
|
||||
for _, r := range l.PackageRepositories {
|
||||
repo, err := r.Sync()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed syncing repository: "+r.GetName())
|
||||
}
|
||||
syncedRepos = append(syncedRepos, repo)
|
||||
}
|
||||
|
||||
// compute what to install and from where
|
||||
sort.Sort(syncedRepos)
|
||||
|
||||
// First match packages against repositories by priority
|
||||
// matches := syncedRepos.PackageMatches(p)
|
||||
|
||||
// compute a "big" world
|
||||
allRepos := pkg.NewInMemoryDatabase(false)
|
||||
syncedRepos.SyncDatabase(allRepos)
|
||||
solv := solver.NewSolver(s.Database, allRepos, pkg.NewInMemoryDatabase(false))
|
||||
uninstall, solution, err := solv.Upgrade()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed solving solution for upgrade")
|
||||
}
|
||||
|
||||
for _, u := range uninstall {
|
||||
err := l.Uninstall(u, s)
|
||||
if err != nil {
|
||||
Warning("Failed uninstall for ", u.GetFingerPrint())
|
||||
}
|
||||
}
|
||||
|
||||
toInstall := []pkg.Package{}
|
||||
for _, assertion := range solution {
|
||||
if assertion.Value {
|
||||
toInstall = append(toInstall, assertion.Package)
|
||||
}
|
||||
}
|
||||
|
||||
return l.Install(toInstall, s)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Install(p []pkg.Package, s *System) error {
|
||||
// First get metas from all repos (and decodes trees)
|
||||
|
||||
Spinner(32)
|
||||
defer SpinnerStop()
|
||||
syncedRepos := Repositories{}
|
||||
for _, r := range l.PackageRepositories {
|
||||
repo, err := r.Sync()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed syncing repository: "+r.GetName())
|
||||
}
|
||||
syncedRepos = append(syncedRepos, repo)
|
||||
}
|
||||
|
||||
// compute what to install and from where
|
||||
sort.Sort(syncedRepos)
|
||||
|
||||
// First match packages against repositories by priority
|
||||
// matches := syncedRepos.PackageMatches(p)
|
||||
|
||||
// compute a "big" world
|
||||
allRepos := pkg.NewInMemoryDatabase(false)
|
||||
syncedRepos.SyncDatabase(allRepos)
|
||||
|
||||
solv := solver.NewSolver(s.Database, allRepos, pkg.NewInMemoryDatabase(false))
|
||||
solution, err := solv.Install(p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed solving solution for package")
|
||||
}
|
||||
// Gathers things to install
|
||||
toInstall := map[string]ArtifactMatch{}
|
||||
for _, assertion := range solution {
|
||||
if assertion.Value {
|
||||
matches := syncedRepos.PackageMatches([]pkg.Package{assertion.Package})
|
||||
if len(matches) != 1 {
|
||||
return errors.New("Failed matching solutions against repository - where are definitions coming from?!")
|
||||
}
|
||||
A:
|
||||
for _, artefact := range matches[0].Repo.GetIndex() {
|
||||
if artefact.GetCompileSpec().GetPackage() == nil {
|
||||
return errors.New("Package in compilespec empty")
|
||||
|
||||
}
|
||||
if matches[0].Package.Matches(artefact.GetCompileSpec().GetPackage()) {
|
||||
// Filter out already installed
|
||||
if _, err := s.Database.FindPackage(assertion.Package); err != nil {
|
||||
toInstall[assertion.Package.GetFingerPrint()] = ArtifactMatch{Package: assertion.Package, Artifact: artefact, Repository: matches[0].Repo}
|
||||
}
|
||||
break A
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Install packages into rootfs in parallel.
|
||||
all := make(chan ArtifactMatch)
|
||||
|
||||
var wg = new(sync.WaitGroup)
|
||||
for i := 0; i < l.Concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go l.installerWorker(i, wg, all, s)
|
||||
}
|
||||
|
||||
for _, c := range toInstall {
|
||||
all <- c
|
||||
}
|
||||
close(all)
|
||||
wg.Wait()
|
||||
|
||||
executedFinalizer := map[string]bool{}
|
||||
|
||||
// TODO: Lower those errors as warning
|
||||
for _, w := range p {
|
||||
// Finalizers needs to run in order and in sequence.
|
||||
ordered := solution.Order(allRepos, w.GetFingerPrint())
|
||||
for _, ass := range ordered {
|
||||
if ass.Value {
|
||||
// Annotate to the system that the package was installed
|
||||
// TODO: Annotate also files that belong to the package, somewhere to uninstall
|
||||
if _, err := s.Database.FindPackage(ass.Package); err == nil {
|
||||
err := s.Database.UpdatePackage(ass.Package)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed updating package")
|
||||
}
|
||||
} else {
|
||||
_, err := s.Database.CreatePackage(ass.Package)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed creating package")
|
||||
}
|
||||
}
|
||||
installed, ok := toInstall[ass.Package.GetFingerPrint()]
|
||||
if !ok {
|
||||
return errors.New("Couldn't find ArtifactMatch for " + ass.Package.GetFingerPrint())
|
||||
}
|
||||
|
||||
treePackage, err := installed.Repository.GetTree().GetDatabase().FindPackage(ass.Package)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error getting package "+ass.Package.GetFingerPrint())
|
||||
}
|
||||
if helpers.Exists(treePackage.Rel(tree.FinalizerFile)) {
|
||||
Info("Executing finalizer for " + ass.Package.GetName())
|
||||
finalizerRaw, err := ioutil.ReadFile(treePackage.Rel(tree.FinalizerFile))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+treePackage.Rel(tree.FinalizerFile))
|
||||
}
|
||||
if _, exists := executedFinalizer[ass.Package.GetFingerPrint()]; !exists {
|
||||
finalizer, err := NewLuetFinalizerFromYaml(finalizerRaw)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading finalizer "+treePackage.Rel(tree.FinalizerFile))
|
||||
}
|
||||
err = finalizer.RunInstall()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error executing install finalizer "+treePackage.Rel(tree.FinalizerFile))
|
||||
}
|
||||
executedFinalizer[ass.Package.GetFingerPrint()] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) installPackage(a ArtifactMatch, s *System) error {
|
||||
|
||||
// FIXME: Implement
|
||||
artifact, err := a.Repository.Client().DownloadArtifact(a.Artifact)
|
||||
defer os.Remove(artifact.GetPath())
|
||||
|
||||
tarFile, err := os.Open(artifact.GetPath())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Could not open package archive")
|
||||
}
|
||||
defer tarFile.Close()
|
||||
tr := tar.NewReader(tarFile)
|
||||
|
||||
var files []string
|
||||
// untar each segment
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// determine proper file path info
|
||||
finfo := hdr.FileInfo()
|
||||
fileName := hdr.Name
|
||||
if finfo.Mode().IsDir() {
|
||||
continue
|
||||
}
|
||||
files = append(files, fileName)
|
||||
|
||||
// if a dir, create it, then go to next segment
|
||||
}
|
||||
|
||||
err = helpers.Untar(artifact.GetPath(), s.Target, true)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while unpacking rootfs")
|
||||
}
|
||||
|
||||
// First create client and download
|
||||
// Then unpack to system
|
||||
return s.Database.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: a.Package.GetFingerPrint(), Files: files})
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) installerWorker(i int, wg *sync.WaitGroup, c <-chan ArtifactMatch, s *System) error {
|
||||
defer wg.Done()
|
||||
|
||||
for p := range c {
|
||||
// TODO: Keep trace of what was added from the tar, and save it into system
|
||||
err := l.installPackage(p, s)
|
||||
if err != nil {
|
||||
//TODO: Uninstall, rollback.
|
||||
Fatal("Failed installing package "+p.Package.GetName(), err.Error())
|
||||
return errors.Wrap(err, "Failed installing package "+p.Package.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
files, err := s.Database.GetPackageFiles(p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed getting installed files")
|
||||
}
|
||||
|
||||
// Remove from target
|
||||
for _, f := range files {
|
||||
target := filepath.Join(s.Target, f)
|
||||
Info("Removing", target)
|
||||
err := os.Remove(target)
|
||||
if err != nil {
|
||||
Warning("Failed removing file (not present in the system target ?)", target)
|
||||
}
|
||||
}
|
||||
err = s.Database.RemovePackageFiles(p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed removing package files from database")
|
||||
}
|
||||
err = s.Database.RemovePackage(p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed removing package from database")
|
||||
}
|
||||
|
||||
Info(p.GetFingerPrint(), "Removed")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Uninstall(p pkg.Package, s *System) error {
|
||||
// compute uninstall from all world - remove packages in parallel - run uninstall finalizer (in order) - mark the uninstallation in db
|
||||
// Get installed definition
|
||||
|
||||
solv := solver.NewSolver(s.Database, s.Database, pkg.NewInMemoryDatabase(false))
|
||||
solution, err := solv.Uninstall(p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Uninstall failed")
|
||||
}
|
||||
for _, p := range solution {
|
||||
Info("Uninstalling", p.GetFingerPrint())
|
||||
err := l.uninstall(p, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Uninstall failed")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Repositories(r []Repository) { l.PackageRepositories = r }
|
28
pkg/installer/installer_suite_test.go
Normal file
28
pkg/installer/installer_suite_test.go
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestInstaller(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Installer Suite")
|
||||
}
|
372
pkg/installer/installer_test.go
Normal file
372
pkg/installer/installer_test.go
Normal file
@@ -0,0 +1,372 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
// . "github.com/mudler/luet/pkg/installer"
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Installer", func() {
|
||||
Context("Writes a repository definition", func() {
|
||||
It("Writes a repo and can install packages from it", func() {
|
||||
//repo:=NewLuetRepository()
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err = ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
Expect(spec.BuildSteps()).To(Equal([]string{"echo artifact5 > /test5", "echo artifact6 > /test6", "./generate.sh"}))
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2", "chmod +x generate.sh"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
artifact, err := compiler.Compile(2, false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := GenerateRepository("test", tmpdir, "local", 1, tmpdir, "../../tests/fixtures/buildable", pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("tree.tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("tree.tar"))).To(BeTrue())
|
||||
Expect(repo.GetUri()).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("local"))
|
||||
|
||||
fakeroot, err := ioutil.TempDir("", "fakeroot")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(fakeroot) // clean up
|
||||
|
||||
inst := NewLuetInstaller(1)
|
||||
repo2, err := NewLuetRepositoryFromYaml([]byte(`
|
||||
name: "test"
|
||||
type: "local"
|
||||
uri: "`+tmpdir+`"
|
||||
`), pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
inst.Repositories(Repositories{repo2})
|
||||
Expect(repo.GetUri()).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("local"))
|
||||
systemDB := pkg.NewInMemoryDatabase(false)
|
||||
system := &System{Database: systemDB, Target: fakeroot}
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
files, err := systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(files).To(Equal([]string{"artifact42", "test5", "test6"}))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(system.Database.GetPackages())).To(Equal(1))
|
||||
p, err := system.Database.GetPackage(system.Database.GetPackages()[0])
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p.GetName()).To(Equal("b"))
|
||||
|
||||
err = inst.Uninstall(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
Context("Installation", func() {
|
||||
It("Installs in a system with a persistent db", func() {
|
||||
//repo:=NewLuetRepository()
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err = ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
Expect(spec.BuildSteps()).To(Equal([]string{"echo artifact5 > /test5", "echo artifact6 > /test6", "./generate.sh"}))
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2", "chmod +x generate.sh"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
artifact, err := compiler.Compile(2, false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := GenerateRepository("test", tmpdir, "local", 1, tmpdir, "../../tests/fixtures/buildable", pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("tree.tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("tree.tar"))).To(BeTrue())
|
||||
Expect(repo.GetUri()).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("local"))
|
||||
|
||||
fakeroot, err := ioutil.TempDir("", "fakeroot")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(fakeroot) // clean up
|
||||
|
||||
inst := NewLuetInstaller(1)
|
||||
repo2, err := NewLuetRepositoryFromYaml([]byte(`
|
||||
name: "test"
|
||||
type: "local"
|
||||
uri: "`+tmpdir+`"
|
||||
`), pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
inst.Repositories(Repositories{repo2})
|
||||
Expect(repo.GetUri()).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("local"))
|
||||
|
||||
bolt, err := ioutil.TempDir("", "db")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(bolt) // clean up
|
||||
|
||||
systemDB := pkg.NewBoltDatabase(filepath.Join(bolt, "db.db"))
|
||||
system := &System{Database: systemDB, Target: fakeroot}
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(system.Database.GetPackages())).To(Equal(1))
|
||||
p, err := system.Database.GetPackage(system.Database.GetPackages()[0])
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p.GetName()).To(Equal("b"))
|
||||
|
||||
files, err := systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(files).To(Equal([]string{"artifact42", "test5", "test6"}))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = inst.Uninstall(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
Context("Simple upgrades", func() {
|
||||
It("Installs packages and Upgrades a system with a persistent db", func() {
|
||||
//repo:=NewLuetRepository()
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/upgrade")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec2, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec3, err := c.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err = ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
_, errs := c.CompileParallel(2, false, compiler.NewLuetCompilationspecs(spec, spec2, spec3))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
repo, err := GenerateRepository("test", tmpdir, "local", 1, tmpdir, "../../tests/fixtures/upgrade", pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("tree.tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("tree.tar"))).To(BeTrue())
|
||||
Expect(repo.GetUri()).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("local"))
|
||||
|
||||
fakeroot, err := ioutil.TempDir("", "fakeroot")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(fakeroot) // clean up
|
||||
|
||||
inst := NewLuetInstaller(1)
|
||||
repo2, err := NewLuetRepositoryFromYaml([]byte(`
|
||||
name: "test"
|
||||
type: "local"
|
||||
uri: "`+tmpdir+`"
|
||||
`), pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
inst.Repositories(Repositories{repo2})
|
||||
Expect(repo.GetUri()).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("local"))
|
||||
|
||||
bolt, err := ioutil.TempDir("", "db")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(bolt) // clean up
|
||||
|
||||
systemDB := pkg.NewBoltDatabase(filepath.Join(bolt, "db.db"))
|
||||
system := &System{Database: systemDB, Target: fakeroot}
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(system.Database.GetPackages())).To(Equal(1))
|
||||
p, err := system.Database.GetPackage(system.Database.GetPackages()[0])
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p.GetName()).To(Equal("b"))
|
||||
|
||||
files, err := systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(files).To(Equal([]string{"artifact42", "test5", "test6"}))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = inst.Upgrade(system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Nothing should be there anymore (files, packagedb entry)
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test5"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "test6"))).ToNot(BeTrue())
|
||||
|
||||
// New version - new files
|
||||
Expect(helpers.Exists(filepath.Join(fakeroot, "newc"))).To(BeTrue())
|
||||
_, err = system.Database.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// New package should be there
|
||||
_, err = system.Database.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
})
|
54
pkg/installer/interface.go
Normal file
54
pkg/installer/interface.go
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer
|
||||
|
||||
import (
|
||||
compiler "github.com/mudler/luet/pkg/compiler"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
//"github.com/mudler/luet/pkg/solver"
|
||||
)
|
||||
|
||||
type Installer interface {
|
||||
Install([]pkg.Package, *System) error
|
||||
Uninstall(pkg.Package, *System) error
|
||||
Upgrade(s *System) error
|
||||
Repositories([]Repository)
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
DownloadArtifact(compiler.Artifact) (compiler.Artifact, error)
|
||||
DownloadFile(string) (string, error)
|
||||
}
|
||||
|
||||
type Repositories []Repository
|
||||
|
||||
type Repository interface {
|
||||
GetName() string
|
||||
GetUri() string
|
||||
SetUri(string)
|
||||
GetPriority() int
|
||||
GetIndex() compiler.ArtifactIndex
|
||||
GetTree() tree.Builder
|
||||
SetTree(tree.Builder)
|
||||
Write(path string) error
|
||||
Sync() (Repository, error)
|
||||
GetTreePath() string
|
||||
SetTreePath(string)
|
||||
GetType() string
|
||||
SetType(string)
|
||||
Client() Client
|
||||
}
|
340
pkg/installer/repository.go
Normal file
340
pkg/installer/repository.go
Normal file
@@ -0,0 +1,340 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/mudler/luet/pkg/installer/client"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
tree "github.com/mudler/luet/pkg/tree"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type LuetRepository struct {
|
||||
Name string `json:"name"`
|
||||
Uri string `json:"uri"`
|
||||
Priority int `json:"priority"`
|
||||
Index compiler.ArtifactIndex `json:"index"`
|
||||
Tree tree.Builder `json:"-"`
|
||||
TreePath string `json:"-"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type LuetRepositorySerialized struct {
|
||||
Name string `json:"name"`
|
||||
Uri string `json:"uri"`
|
||||
Priority int `json:"priority"`
|
||||
Index []*compiler.PackageArtifact `json:"index"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
func GenerateRepository(name, uri, t string, priority int, src, treeDir string, db pkg.PackageDatabase) (Repository, error) {
|
||||
|
||||
art, err := buildPackageIndex(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := tree.NewInstallerRecipe(db)
|
||||
err = tr.Load(treeDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewLuetRepository(name, uri, t, priority, art, tr), nil
|
||||
}
|
||||
|
||||
func NewLuetRepository(name, uri, t string, priority int, art []compiler.Artifact, builder tree.Builder) Repository {
|
||||
return &LuetRepository{Index: art, Type: t, Tree: builder, Name: name, Uri: uri, Priority: priority}
|
||||
}
|
||||
|
||||
func NewLuetRepositoryFromYaml(data []byte, db pkg.PackageDatabase) (Repository, error) {
|
||||
var p *LuetRepositorySerialized
|
||||
r := &LuetRepository{}
|
||||
err := yaml.Unmarshal(data, &p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.Name = p.Name
|
||||
r.Uri = p.Uri
|
||||
r.Priority = p.Priority
|
||||
r.Type = p.Type
|
||||
i := compiler.ArtifactIndex{}
|
||||
for _, ii := range p.Index {
|
||||
i = append(i, ii)
|
||||
}
|
||||
r.Index = i
|
||||
r.Tree = tree.NewInstallerRecipe(db)
|
||||
|
||||
return r, err
|
||||
}
|
||||
|
||||
func buildPackageIndex(path string) ([]compiler.Artifact, error) {
|
||||
|
||||
var art []compiler.Artifact
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
|
||||
if !strings.HasSuffix(info.Name(), ".metadata.yaml") {
|
||||
return nil // Skip with no errors
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
|
||||
artifact, err := compiler.NewPackageArtifactFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
art = append(art, artifact)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(path, ff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
}
|
||||
return art, nil
|
||||
}
|
||||
|
||||
func (r *LuetRepository) GetName() string {
|
||||
return r.Name
|
||||
}
|
||||
func (r *LuetRepository) GetTreePath() string {
|
||||
return r.TreePath
|
||||
}
|
||||
func (r *LuetRepository) SetTreePath(p string) {
|
||||
r.TreePath = p
|
||||
}
|
||||
|
||||
func (r *LuetRepository) SetTree(b tree.Builder) {
|
||||
r.Tree = b
|
||||
}
|
||||
|
||||
func (r *LuetRepository) GetType() string {
|
||||
return r.Type
|
||||
}
|
||||
func (r *LuetRepository) SetType(p string) {
|
||||
r.Type = p
|
||||
}
|
||||
|
||||
func (r *LuetRepository) SetUri(p string) {
|
||||
r.Uri = p
|
||||
}
|
||||
func (r *LuetRepository) GetUri() string {
|
||||
return r.Uri
|
||||
}
|
||||
func (r *LuetRepository) GetPriority() int {
|
||||
return r.Priority
|
||||
}
|
||||
func (r *LuetRepository) GetIndex() compiler.ArtifactIndex {
|
||||
return r.Index
|
||||
}
|
||||
func (r *LuetRepository) GetTree() tree.Builder {
|
||||
return r.Tree
|
||||
}
|
||||
|
||||
func (r *LuetRepository) Write(dst string) error {
|
||||
|
||||
os.MkdirAll(dst, os.ModePerm)
|
||||
r.Index = r.Index.CleanPath()
|
||||
|
||||
data, err := yaml.Marshal(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(dst, "repository.yaml"), data, os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
archive, err := ioutil.TempDir(os.TempDir(), "archive")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while creating tempdir for archive")
|
||||
}
|
||||
defer os.RemoveAll(archive) // clean up
|
||||
err = r.GetTree().Save(archive)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while saving the tree")
|
||||
}
|
||||
err = helpers.Tar(archive, filepath.Join(dst, "tree.tar"))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error met while creating package archive")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LuetRepository) Client() Client {
|
||||
switch r.GetType() {
|
||||
case "local":
|
||||
return client.NewLocalClient(client.RepoData{Uri: r.GetUri()})
|
||||
case "http":
|
||||
return client.NewHttpClient(client.RepoData{Uri: r.GetUri()})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
func (r *LuetRepository) Sync() (Repository, error) {
|
||||
c := r.Client()
|
||||
if c == nil {
|
||||
return nil, errors.New("No client could be generated from repository.")
|
||||
}
|
||||
file, err := c.DownloadFile("repository.yaml")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While downloading repository.yaml from "+r.GetUri())
|
||||
}
|
||||
dat, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error reading file "+file)
|
||||
}
|
||||
defer os.Remove(file)
|
||||
|
||||
// TODO: make it swappable
|
||||
repo, err := NewLuetRepositoryFromYaml(dat, pkg.NewInMemoryDatabase(false))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error reading repository from file "+file)
|
||||
|
||||
}
|
||||
|
||||
archivetree, err := c.DownloadFile("tree.tar")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "While downloading repository.yaml from "+r.GetUri())
|
||||
}
|
||||
defer os.RemoveAll(archivetree) // clean up
|
||||
|
||||
treefs, err := ioutil.TempDir(os.TempDir(), "treefs")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
}
|
||||
//defer os.RemoveAll(treefs) // clean up
|
||||
|
||||
// TODO: Following as option if archive as output?
|
||||
// archive, err := ioutil.TempDir(os.TempDir(), "archive")
|
||||
// if err != nil {
|
||||
// return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||
// }
|
||||
// defer os.RemoveAll(archive) // clean up
|
||||
|
||||
err = helpers.Untar(archivetree, treefs, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while unpacking rootfs")
|
||||
}
|
||||
|
||||
reciper := tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
err = reciper.Load(treefs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error met while unpacking rootfs")
|
||||
}
|
||||
repo.SetTree(reciper)
|
||||
repo.SetTreePath(treefs)
|
||||
repo.SetUri(r.GetUri())
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (r Repositories) Len() int { return len(r) }
|
||||
func (r Repositories) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
|
||||
func (r Repositories) Less(i, j int) bool {
|
||||
return r[i].GetPriority() < r[j].GetPriority()
|
||||
}
|
||||
|
||||
func (r Repositories) World() []pkg.Package {
|
||||
cache := map[string]pkg.Package{}
|
||||
world := []pkg.Package{}
|
||||
|
||||
// Get Uniques. Walk in reverse so the definitions of most prio-repo overwrites lower ones
|
||||
// In this way, when we will walk again later the deps sorting them by most higher prio we have better chance of success.
|
||||
for i := len(r) - 1; i >= 0; i-- {
|
||||
for _, p := range r[i].GetTree().GetDatabase().World() {
|
||||
cache[p.GetFingerPrint()] = p
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range cache {
|
||||
world = append(world, v)
|
||||
}
|
||||
|
||||
return world
|
||||
}
|
||||
|
||||
func (r Repositories) SyncDatabase(d pkg.PackageDatabase) {
|
||||
cache := map[string]bool{}
|
||||
|
||||
// Get Uniques. Walk in reverse so the definitions of most prio-repo overwrites lower ones
|
||||
// In this way, when we will walk again later the deps sorting them by most higher prio we have better chance of success.
|
||||
for i := len(r) - 1; i >= 0; i-- {
|
||||
for _, p := range r[i].GetTree().GetDatabase().World() {
|
||||
if _, ok := cache[p.GetFingerPrint()]; !ok {
|
||||
cache[p.GetFingerPrint()] = true
|
||||
d.CreatePackage(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type PackageMatch struct {
|
||||
Repo Repository
|
||||
Package pkg.Package
|
||||
}
|
||||
|
||||
func (re Repositories) PackageMatches(p []pkg.Package) []PackageMatch {
|
||||
// TODO: Better heuristic. here we pick the first repo that contains the atom, sorted by priority but
|
||||
// we should do a permutations and get the best match, and in case there are more solutions the user should be able to pick
|
||||
sort.Sort(re)
|
||||
|
||||
var matches []PackageMatch
|
||||
PACKAGE:
|
||||
for _, pack := range p {
|
||||
for _, r := range re {
|
||||
c, err := r.GetTree().GetDatabase().FindPackage(pack)
|
||||
if err == nil {
|
||||
matches = append(matches, PackageMatch{Package: c, Repo: r})
|
||||
continue PACKAGE
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return matches
|
||||
|
||||
}
|
||||
|
||||
func (re Repositories) Search(s string) []PackageMatch {
|
||||
sort.Sort(re)
|
||||
var term = regexp.MustCompile(s)
|
||||
var matches []PackageMatch
|
||||
|
||||
for _, r := range re {
|
||||
for _, pack := range r.GetTree().GetDatabase().World() {
|
||||
if term.MatchString(pack.GetName()) {
|
||||
matches = append(matches, PackageMatch{Package: pack, Repo: r})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return matches
|
||||
}
|
115
pkg/installer/repository_test.go
Normal file
115
pkg/installer/repository_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer_test
|
||||
|
||||
import (
|
||||
|
||||
// . "github.com/mudler/luet/pkg/installer"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
backend "github.com/mudler/luet/pkg/compiler/backend"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/tree"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Repository", func() {
|
||||
Context("Generation", func() {
|
||||
It("Generate repository metadat", func() {
|
||||
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/buildable")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
|
||||
|
||||
compiler := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(), generalRecipe.GetDatabase())
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err = ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
Expect(spec.BuildSteps()).To(Equal([]string{"echo artifact5 > /test5", "echo artifact6 > /test6", "./generate.sh"}))
|
||||
Expect(spec.GetPreBuildSteps()).To(Equal([]string{"echo foo > /test", "echo bar > /test2", "chmod +x generate.sh"}))
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
artifact, err := compiler.Compile(2, false, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
|
||||
content1, err := helpers.Read(spec.Rel("test5"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
content2, err := helpers.Read(spec.Rel("test6"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(content1).To(Equal("artifact5\n"))
|
||||
Expect(content2).To(Equal("artifact6\n"))
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.package.tar"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("b-test-1.0.metadata.yaml"))).To(BeTrue())
|
||||
|
||||
repo, err := GenerateRepository("test", tmpdir, "local", 1, tmpdir, "../../tests/fixtures/buildable", pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("tree.tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(tmpdir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(helpers.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("tree.tar"))).To(BeTrue())
|
||||
})
|
||||
})
|
||||
Context("Matching packages", func() {
|
||||
It("Matches packages in different repositories by priority", func() {
|
||||
package1 := &pkg.DefaultPackage{Name: "Test"}
|
||||
package2 := &pkg.DefaultPackage{Name: "Test2"}
|
||||
builder1 := tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
builder2 := tree.NewInstallerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
_, err := builder1.GetDatabase().CreatePackage(package1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = builder2.GetDatabase().CreatePackage(package2)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
repo1 := &LuetRepository{Name: "test1", Tree: builder1}
|
||||
repo2 := &LuetRepository{Name: "test2", Tree: builder2}
|
||||
repositories := Repositories{repo1, repo2}
|
||||
matches := repositories.PackageMatches([]pkg.Package{package1})
|
||||
Expect(matches).To(Equal([]PackageMatch{{Repo: repo1, Package: package1}}))
|
||||
|
||||
})
|
||||
|
||||
})
|
||||
})
|
14
pkg/installer/system.go
Normal file
14
pkg/installer/system.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package installer
|
||||
|
||||
import (
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
)
|
||||
|
||||
type System struct {
|
||||
Database pkg.PackageDatabase
|
||||
Target string
|
||||
}
|
||||
|
||||
func (s *System) World() ([]pkg.Package, error) {
|
||||
return s.Database.World(), nil
|
||||
}
|
@@ -23,6 +23,32 @@ type PackageDatabase interface {
|
||||
Get(s string) (string, error)
|
||||
Set(k, v string) error
|
||||
|
||||
Create([]byte) (string, error)
|
||||
Create(string, []byte) (string, error)
|
||||
Retrieve(ID string) ([]byte, error)
|
||||
}
|
||||
|
||||
type PackageSet interface {
|
||||
GetPackages() []string //Ids
|
||||
CreatePackage(pkg Package) (string, error)
|
||||
GetPackage(ID string) (Package, error)
|
||||
Clean() error
|
||||
FindPackage(Package) (Package, error)
|
||||
FindPackages(p Package) ([]Package, error)
|
||||
UpdatePackage(p Package) error
|
||||
GetAllPackages(packages chan Package) error
|
||||
RemovePackage(Package) error
|
||||
|
||||
GetPackageFiles(Package) ([]string, error)
|
||||
SetPackageFiles(*PackageFile) error
|
||||
RemovePackageFiles(Package) error
|
||||
|
||||
World() []Package
|
||||
|
||||
FindPackageCandidate(p Package) (Package, error)
|
||||
}
|
||||
|
||||
type PackageFile struct {
|
||||
ID int `storm:"id,increment"` // primary key with auto increment
|
||||
PackageFingerprint string
|
||||
Files []string
|
||||
}
|
||||
|
@@ -16,11 +16,13 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
version "github.com/hashicorp/go-version"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
storm "github.com/asdine/storm"
|
||||
@@ -44,20 +46,42 @@ func NewBoltDatabase(path string) PackageDatabase {
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) Get(s string) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
bolt, err := storm.Open(db.Path, storm.BoltOptions(0600, &bbolt.Options{Timeout: 30 * time.Second}))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer bolt.Close()
|
||||
var str string
|
||||
bolt.Get("solver", s, &str)
|
||||
|
||||
return str, errors.New("Not implemented")
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) Set(k, v string) error {
|
||||
return errors.New("Not implemented")
|
||||
|
||||
bolt, err := storm.Open(db.Path, storm.BoltOptions(0600, &bbolt.Options{Timeout: 30 * time.Second}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer bolt.Close()
|
||||
return bolt.Set("solver", k, v)
|
||||
}
|
||||
func (db *BoltDatabase) Create(id string, v []byte) (string, error) {
|
||||
enc := base64.StdEncoding.EncodeToString(v)
|
||||
|
||||
func (db *BoltDatabase) Create(v []byte) (string, error) {
|
||||
return "", errors.New("Not implemented")
|
||||
return id, db.Set(id, enc)
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) Retrieve(ID string) ([]byte, error) {
|
||||
return []byte{}, errors.New("Not implemented")
|
||||
pa, err := db.Get(ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
enc, err := base64.StdEncoding.DecodeString(pa)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return enc, nil
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) FindPackage(tofind Package) (Package, error) {
|
||||
@@ -76,18 +100,13 @@ func (db *BoltDatabase) FindPackage(tofind Package) (Package, error) {
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) UpdatePackage(p Package) error {
|
||||
|
||||
bolt, err := storm.Open(db.Path, storm.BoltOptions(0600, &bbolt.Options{Timeout: 30 * time.Second}))
|
||||
// TODO: Change, but by query we cannot update by ID
|
||||
err := db.RemovePackage(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer bolt.Close()
|
||||
_, err = db.CreatePackage(p)
|
||||
|
||||
dp, ok := p.(*DefaultPackage)
|
||||
if !ok {
|
||||
return errors.New("Bolt DB support only DefaultPackage type for now")
|
||||
}
|
||||
err = bolt.Update(dp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -106,7 +125,9 @@ func (db *BoltDatabase) GetPackage(ID string) (Package, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = bolt.One("ID", iid, p)
|
||||
err = bolt.Select(q.Eq("ID", iid)).Limit(1).First(p)
|
||||
|
||||
//err = bolt.One("id", iid, p)
|
||||
return p, err
|
||||
}
|
||||
|
||||
@@ -183,3 +204,117 @@ func (db *BoltDatabase) Clean() error {
|
||||
defer db.Unlock()
|
||||
return os.RemoveAll(db.Path)
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) GetPackageFiles(p Package) ([]string, error) {
|
||||
bolt, err := storm.Open(db.Path, storm.BoltOptions(0600, &bbolt.Options{Timeout: 30 * time.Second}))
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrap(err, "Error opening boltdb "+db.Path)
|
||||
}
|
||||
defer bolt.Close()
|
||||
|
||||
files := bolt.From("files")
|
||||
var pf PackageFile
|
||||
err = files.One("PackageFingerprint", p.GetFingerPrint(), &pf)
|
||||
if err != nil {
|
||||
return []string{}, errors.Wrap(err, "While finding files")
|
||||
}
|
||||
return pf.Files, nil
|
||||
}
|
||||
func (db *BoltDatabase) SetPackageFiles(p *PackageFile) error {
|
||||
bolt, err := storm.Open(db.Path, storm.BoltOptions(0600, &bbolt.Options{Timeout: 30 * time.Second}))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error opening boltdb "+db.Path)
|
||||
}
|
||||
defer bolt.Close()
|
||||
|
||||
files := bolt.From("files")
|
||||
return files.Save(p)
|
||||
}
|
||||
func (db *BoltDatabase) RemovePackageFiles(p Package) error {
|
||||
bolt, err := storm.Open(db.Path, storm.BoltOptions(0600, &bbolt.Options{Timeout: 30 * time.Second}))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error opening boltdb "+db.Path)
|
||||
}
|
||||
defer bolt.Close()
|
||||
|
||||
files := bolt.From("files")
|
||||
var pf PackageFile
|
||||
err = files.One("PackageFingerprint", p.GetFingerPrint(), &pf)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "While finding files")
|
||||
}
|
||||
return files.DeleteStruct(&pf)
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) RemovePackage(p Package) error {
|
||||
bolt, err := storm.Open(db.Path, storm.BoltOptions(0600, &bbolt.Options{Timeout: 30 * time.Second}))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error opening boltdb "+db.Path)
|
||||
}
|
||||
defer bolt.Close()
|
||||
var found DefaultPackage
|
||||
err = bolt.Select(q.Eq("Name", p.GetName()), q.Eq("Category", p.GetCategory()), q.Eq("Version", p.GetVersion())).Limit(1).Delete(&found)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "No package found to delete")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) World() []Package {
|
||||
|
||||
var all []Package
|
||||
// FIXME: This should all be locked in the db - for now forbid the solver to be run in threads.
|
||||
for _, k := range db.GetPackages() {
|
||||
pack, err := db.GetPackage(k)
|
||||
if err == nil {
|
||||
all = append(all, pack)
|
||||
}
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
func (db *BoltDatabase) FindPackageCandidate(p Package) (Package, error) {
|
||||
|
||||
required, err := db.FindPackage(p)
|
||||
if err != nil {
|
||||
|
||||
// return nil, errors.Wrap(err, "Couldn't find required package in db definition")
|
||||
packages, err := p.Expand(db)
|
||||
// Info("Expanded", packages, err)
|
||||
if err != nil || len(packages) == 0 {
|
||||
required = p
|
||||
} else {
|
||||
required = Best(packages)
|
||||
|
||||
}
|
||||
return required, nil
|
||||
//required = &DefaultPackage{Name: "test"}
|
||||
}
|
||||
|
||||
return required, err
|
||||
|
||||
}
|
||||
|
||||
// FindPackages return the list of the packages beloging to cat/name (any versions)
|
||||
// FIXME: Optimize, see inmemorydb
|
||||
func (db *BoltDatabase) FindPackages(p Package) ([]Package, error) {
|
||||
var versionsInWorld []Package
|
||||
for _, w := range db.World() {
|
||||
if w.GetName() != p.GetName() || w.GetCategory() != p.GetCategory() {
|
||||
continue
|
||||
}
|
||||
|
||||
v, err := version.NewVersion(w.GetVersion())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
constraints, err := version.NewConstraint(p.GetVersion())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if constraints.Check(v) {
|
||||
versionsInWorld = append(versionsInWorld, w)
|
||||
}
|
||||
}
|
||||
return versionsInWorld, nil
|
||||
}
|
||||
|
@@ -18,29 +18,35 @@ package pkg
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"sync"
|
||||
|
||||
version "github.com/hashicorp/go-version"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var DBInMemoryInstance = &InMemoryDatabase{
|
||||
Mutex: &sync.Mutex{},
|
||||
|
||||
Database: map[string]string{}}
|
||||
Mutex: &sync.Mutex{},
|
||||
FileDatabase: map[string][]string{},
|
||||
Database: map[string]string{},
|
||||
CacheNoVersion: map[string]map[string]interface{}{},
|
||||
}
|
||||
|
||||
type InMemoryDatabase struct {
|
||||
*sync.Mutex
|
||||
Database map[string]string
|
||||
Database map[string]string
|
||||
FileDatabase map[string][]string
|
||||
CacheNoVersion map[string]map[string]interface{}
|
||||
}
|
||||
|
||||
func NewInMemoryDatabase(singleton bool) PackageDatabase {
|
||||
// In memoryDB is a singleton
|
||||
if !singleton {
|
||||
return &InMemoryDatabase{
|
||||
Mutex: &sync.Mutex{},
|
||||
|
||||
Database: map[string]string{}}
|
||||
Mutex: &sync.Mutex{},
|
||||
FileDatabase: map[string][]string{},
|
||||
Database: map[string]string{},
|
||||
CacheNoVersion: map[string]map[string]interface{}{},
|
||||
}
|
||||
}
|
||||
return DBInMemoryInstance
|
||||
}
|
||||
@@ -63,12 +69,10 @@ func (db *InMemoryDatabase) Set(k, v string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) Create(v []byte) (string, error) {
|
||||
func (db *InMemoryDatabase) Create(id string, v []byte) (string, error) {
|
||||
enc := base64.StdEncoding.EncodeToString(v)
|
||||
crc32q := crc32.MakeTable(0xD5828281)
|
||||
ID := fmt.Sprintf("%08x", crc32.Checksum([]byte(enc), crc32q)) // TODO: Replace with package fingerprint?
|
||||
|
||||
return ID, db.Set(ID, enc)
|
||||
return id, db.Set(id, enc)
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) Retrieve(ID string) ([]byte, error) {
|
||||
@@ -93,7 +97,13 @@ func (db *InMemoryDatabase) GetPackage(ID string) (Package, error) {
|
||||
|
||||
p := &DefaultPackage{}
|
||||
|
||||
if err := json.Unmarshal(enc, &p); err != nil {
|
||||
rawIn := json.RawMessage(enc)
|
||||
bytes, err := rawIn.MarshalJSON()
|
||||
if err != nil {
|
||||
return &DefaultPackage{}, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(bytes, &p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
@@ -119,15 +129,25 @@ func (db *InMemoryDatabase) CreatePackage(p Package) (string, error) {
|
||||
return "", errors.New("InMemoryDatabase suports only DefaultPackage")
|
||||
}
|
||||
|
||||
res, err := json.Marshal(pd)
|
||||
res, err := pd.JSON()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
ID, err := db.Create(res)
|
||||
ID, err := db.Create(pd.GetFingerPrint(), res)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create extra cache between package -> []versions
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
_, ok = db.CacheNoVersion[p.GetPackageName()]
|
||||
if !ok {
|
||||
db.CacheNoVersion[p.GetPackageName()] = make(map[string]interface{})
|
||||
}
|
||||
db.CacheNoVersion[p.GetPackageName()][p.GetVersion()] = nil
|
||||
|
||||
return ID, nil
|
||||
}
|
||||
|
||||
@@ -137,56 +157,55 @@ func (db *InMemoryDatabase) encodePackage(p Package) (string, string, error) {
|
||||
return "", "", errors.New("InMemoryDatabase suports only DefaultPackage")
|
||||
}
|
||||
|
||||
res, err := json.Marshal(pd)
|
||||
res, err := pd.JSON()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
enc := base64.StdEncoding.EncodeToString(res)
|
||||
crc32q := crc32.MakeTable(0xD5828281)
|
||||
ID := fmt.Sprintf("%08x", crc32.Checksum([]byte(enc), crc32q)) // TODO: Replace with package fingerprint?
|
||||
|
||||
return ID, enc, nil
|
||||
return p.GetFingerPrint(), enc, nil
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) FindPackage(p Package) (Package, error) {
|
||||
return db.GetPackage(p.GetFingerPrint())
|
||||
}
|
||||
|
||||
// TODO: Replace this piece, when IDs are fingerprint, findpackage becames O(1)
|
||||
|
||||
for _, k := range db.GetPackages() {
|
||||
pack, err := db.GetPackage(k)
|
||||
// FindPackages return the list of the packages beloging to cat/name (any versions)
|
||||
func (db *InMemoryDatabase) FindPackages(p Package) ([]Package, error) {
|
||||
versions, ok := db.CacheNoVersion[p.GetPackageName()]
|
||||
if !ok {
|
||||
return nil, errors.New("No versions found for package")
|
||||
}
|
||||
var versionsInWorld []Package
|
||||
for ve, _ := range versions {
|
||||
v, err := version.NewVersion(ve)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pack.Matches(p) {
|
||||
return pack, nil
|
||||
constraints, err := version.NewConstraint(p.GetVersion())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if constraints.Check(v) {
|
||||
w, err := db.FindPackage(&DefaultPackage{Name: p.GetName(), Category: p.GetCategory(), Version: ve})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Cache mismatch - this shouldn't happen")
|
||||
}
|
||||
versionsInWorld = append(versionsInWorld, w)
|
||||
}
|
||||
}
|
||||
return nil, errors.New("Package not found")
|
||||
return versionsInWorld, nil
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) UpdatePackage(p Package) error {
|
||||
var id string
|
||||
found := false
|
||||
for _, k := range db.GetPackages() {
|
||||
pack, err := db.GetPackage(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pack.Matches(p) {
|
||||
id = k
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if found {
|
||||
|
||||
_, enc, err := db.encodePackage(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.Set(id, enc)
|
||||
_, enc, err := db.encodePackage(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return db.Set(p.GetFingerPrint(), enc)
|
||||
|
||||
return errors.New("Package not found")
|
||||
}
|
||||
|
||||
@@ -204,3 +223,68 @@ func (db *InMemoryDatabase) Clean() error {
|
||||
db.Database = map[string]string{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) GetPackageFiles(p Package) ([]string, error) {
|
||||
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
|
||||
pa, ok := db.FileDatabase[p.GetFingerPrint()]
|
||||
if !ok {
|
||||
return pa, errors.New("No key found with that id")
|
||||
}
|
||||
|
||||
return pa, nil
|
||||
}
|
||||
func (db *InMemoryDatabase) SetPackageFiles(p *PackageFile) error {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
db.FileDatabase[p.PackageFingerprint] = p.Files
|
||||
return nil
|
||||
}
|
||||
func (db *InMemoryDatabase) RemovePackageFiles(p Package) error {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
delete(db.FileDatabase, p.GetFingerPrint())
|
||||
return nil
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) RemovePackage(p Package) error {
|
||||
db.Lock()
|
||||
defer db.Unlock()
|
||||
|
||||
delete(db.Database, p.GetFingerPrint())
|
||||
return nil
|
||||
}
|
||||
func (db *InMemoryDatabase) World() []Package {
|
||||
var all []Package
|
||||
// FIXME: This should all be locked in the db - for now forbid the solver to be run in threads.
|
||||
for _, k := range db.GetPackages() {
|
||||
pack, err := db.GetPackage(k)
|
||||
if err == nil {
|
||||
all = append(all, pack)
|
||||
}
|
||||
}
|
||||
return all
|
||||
}
|
||||
|
||||
func (db *InMemoryDatabase) FindPackageCandidate(p Package) (Package, error) {
|
||||
|
||||
required, err := db.FindPackage(p)
|
||||
if err != nil {
|
||||
// return nil, errors.Wrap(err, "Couldn't find required package in db definition")
|
||||
packages, err := p.Expand(db)
|
||||
// Info("Expanded", packages, err)
|
||||
if err != nil || len(packages) == 0 {
|
||||
required = p
|
||||
} else {
|
||||
required = Best(packages)
|
||||
|
||||
}
|
||||
return required, nil
|
||||
//required = &DefaultPackage{Name: "test"}
|
||||
}
|
||||
|
||||
return required, err
|
||||
|
||||
}
|
||||
|
@@ -45,7 +45,7 @@ var _ = Describe("Database", func() {
|
||||
|
||||
ids := db.GetPackages()
|
||||
|
||||
Expect(ids).To(Equal([]string{"b536b2bd"}))
|
||||
Expect(ids).To(Equal([]string{"A-->=1.0"}))
|
||||
|
||||
})
|
||||
It("Find packages", func() {
|
||||
@@ -55,6 +55,27 @@ var _ = Describe("Database", func() {
|
||||
Expect(pack).To(Equal(a))
|
||||
|
||||
})
|
||||
|
||||
It("Find best package candidate", func() {
|
||||
db := NewInMemoryDatabase(false)
|
||||
a := NewPackage("A", "1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a1 := NewPackage("A", "1.1", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a3 := NewPackage("A", "1.3", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
_, err := db.CreatePackage(a)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = db.CreatePackage(a1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = db.CreatePackage(a3)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
s := NewPackage("A", ">=1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
|
||||
pack, err := db.FindPackageCandidate(s)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pack).To(Equal(a3))
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
@@ -16,13 +16,18 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
// . "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
"github.com/crillab/gophersat/bf"
|
||||
version "github.com/hashicorp/go-version"
|
||||
"github.com/jinzhu/copier"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
@@ -32,24 +37,25 @@ import (
|
||||
type Package interface {
|
||||
Encode(PackageDatabase) (string, error)
|
||||
|
||||
BuildFormula(PackageDatabase) ([]bf.Formula, error)
|
||||
BuildFormula(PackageDatabase, PackageDatabase) ([]bf.Formula, error)
|
||||
IsFlagged(bool) Package
|
||||
Flagged() bool
|
||||
GetFingerPrint() string
|
||||
GetPackageName() string
|
||||
Requires([]*DefaultPackage) Package
|
||||
Conflicts([]*DefaultPackage) Package
|
||||
Revdeps(world *[]Package) []Package
|
||||
Revdeps(PackageDatabase) []Package
|
||||
|
||||
GetRequires() []*DefaultPackage
|
||||
GetConflicts() []*DefaultPackage
|
||||
Expand(*[]Package) ([]Package, error)
|
||||
Expand(PackageDatabase) ([]Package, error)
|
||||
SetCategory(string)
|
||||
|
||||
GetName() string
|
||||
GetCategory() string
|
||||
|
||||
GetVersion() string
|
||||
RequiresContains(Package) bool
|
||||
RequiresContains(PackageDatabase, Package) (bool, error)
|
||||
Matches(m Package) bool
|
||||
|
||||
AddUse(use string)
|
||||
@@ -64,34 +70,43 @@ type Package interface {
|
||||
Rel(string) string
|
||||
}
|
||||
|
||||
type PackageSet interface {
|
||||
GetPackages() []string //Ids
|
||||
CreatePackage(pkg Package) (string, error)
|
||||
GetPackage(ID string) (Package, error)
|
||||
Clean() error
|
||||
FindPackage(Package) (Package, error)
|
||||
UpdatePackage(p Package) error
|
||||
GetAllPackages(packages chan Package) error
|
||||
}
|
||||
|
||||
type Tree interface {
|
||||
GetPackageSet() PackageDatabase
|
||||
Prelude() string // A tree might have a prelude to be able to consume a tree
|
||||
SetPackageSet(s PackageDatabase)
|
||||
World() ([]Package, error)
|
||||
FindPackage(Package) (Package, error)
|
||||
ResolveDeps(int) error
|
||||
}
|
||||
|
||||
// >> Unmarshallers
|
||||
// DefaultPackageFromYaml decodes a package from yaml bytes
|
||||
func DefaultPackageFromYaml(source []byte) (DefaultPackage, error) {
|
||||
var pkg DefaultPackage
|
||||
err := yaml.Unmarshal(source, &pkg)
|
||||
func DefaultPackageFromYaml(yml []byte) (DefaultPackage, error) {
|
||||
|
||||
var unescaped DefaultPackage
|
||||
source, err := yaml.YAMLToJSON(yml)
|
||||
if err != nil {
|
||||
return pkg, err
|
||||
return DefaultPackage{}, err
|
||||
}
|
||||
return pkg, nil
|
||||
|
||||
rawIn := json.RawMessage(source)
|
||||
bytes, err := rawIn.MarshalJSON()
|
||||
if err != nil {
|
||||
return DefaultPackage{}, err
|
||||
}
|
||||
err = json.Unmarshal(bytes, &unescaped)
|
||||
if err != nil {
|
||||
return DefaultPackage{}, err
|
||||
}
|
||||
return unescaped, nil
|
||||
}
|
||||
|
||||
// Major and minor gets escaped when marshalling in JSON, making compiler fails recognizing selectors for expansion
|
||||
func (t *DefaultPackage) JSON() ([]byte, error) {
|
||||
buffer := &bytes.Buffer{}
|
||||
encoder := json.NewEncoder(buffer)
|
||||
encoder.SetEscapeHTML(false)
|
||||
err := encoder.Encode(t)
|
||||
return buffer.Bytes(), err
|
||||
}
|
||||
|
||||
// DefaultPackage represent a standard package definition
|
||||
@@ -121,7 +136,7 @@ func NewPackage(name, version string, requires []*DefaultPackage, conflicts []*D
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) String() string {
|
||||
b, err := json.Marshal(p)
|
||||
b, err := p.JSON()
|
||||
if err != nil {
|
||||
return fmt.Sprintf("{ id: \"%d\", name: \"%s\" }", p.ID, p.Name)
|
||||
}
|
||||
@@ -134,6 +149,10 @@ func (p *DefaultPackage) GetFingerPrint() string {
|
||||
return fmt.Sprintf("%s-%s-%s", p.Name, p.Category, p.Version)
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) GetPackageName() string {
|
||||
return fmt.Sprintf("%s-%s", p.Name, p.Category)
|
||||
}
|
||||
|
||||
// GetPath returns the path where the definition file was found
|
||||
func (p *DefaultPackage) GetPath() string {
|
||||
return p.Path
|
||||
@@ -175,7 +194,12 @@ func (p *DefaultPackage) Encode(db PackageDatabase) (string, error) {
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) Yaml() ([]byte, error) {
|
||||
y, err := yaml.Marshal(p)
|
||||
j, err := p.JSON()
|
||||
if err != nil {
|
||||
|
||||
return []byte{}, err
|
||||
}
|
||||
y, err := yaml.JSONToYAML(j)
|
||||
if err != nil {
|
||||
|
||||
return []byte{}, err
|
||||
@@ -238,14 +262,14 @@ func (p *DefaultPackage) Matches(m Package) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) Expand(world *[]Package) ([]Package, error) {
|
||||
|
||||
func (p *DefaultPackage) Expand(definitiondb PackageDatabase) ([]Package, error) {
|
||||
var versionsInWorld []Package
|
||||
for _, w := range *world {
|
||||
if w.GetName() != p.GetName() || w.GetCategory() != p.GetCategory() {
|
||||
continue
|
||||
}
|
||||
|
||||
all, err := definitiondb.FindPackages(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, w := range all {
|
||||
v, err := version.NewVersion(w.GetVersion())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -262,16 +286,16 @@ func (p *DefaultPackage) Expand(world *[]Package) ([]Package, error) {
|
||||
return versionsInWorld, nil
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) Revdeps(world *[]Package) []Package {
|
||||
func (p *DefaultPackage) Revdeps(definitiondb PackageDatabase) []Package {
|
||||
var versionsInWorld []Package
|
||||
for _, w := range *world {
|
||||
for _, w := range definitiondb.World() {
|
||||
if w.Matches(p) {
|
||||
continue
|
||||
}
|
||||
for _, re := range w.GetRequires() {
|
||||
if re.Matches(p) {
|
||||
versionsInWorld = append(versionsInWorld, w)
|
||||
versionsInWorld = append(versionsInWorld, w.Revdeps(world)...)
|
||||
versionsInWorld = append(versionsInWorld, w.Revdeps(definitiondb)...)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -283,43 +307,76 @@ func DecodePackage(ID string, db PackageDatabase) (Package, error) {
|
||||
return db.GetPackage(ID)
|
||||
}
|
||||
|
||||
func NormalizeFlagged(p Package) {
|
||||
for _, r := range p.GetRequires() {
|
||||
r.IsFlagged(true)
|
||||
NormalizeFlagged(r)
|
||||
func (pack *DefaultPackage) RequiresContains(definitiondb PackageDatabase, s Package) (bool, error) {
|
||||
p, err := definitiondb.FindPackage(pack)
|
||||
if err != nil {
|
||||
p = pack //relax things
|
||||
//return false, errors.Wrap(err, "Package not found in definition db")
|
||||
}
|
||||
for _, r := range p.GetConflicts() {
|
||||
r.IsFlagged(true)
|
||||
NormalizeFlagged(r)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) RequiresContains(s Package) bool {
|
||||
for _, re := range p.GetRequires() {
|
||||
if re.Matches(s) {
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if re.RequiresContains(s) {
|
||||
return true
|
||||
packages, _ := re.Expand(definitiondb)
|
||||
for _, pa := range packages {
|
||||
if pa.Matches(s) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
if contains, err := re.RequiresContains(definitiondb, s); err == nil && contains {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (p *DefaultPackage) BuildFormula(db PackageDatabase) ([]bf.Formula, error) {
|
||||
encodedA, err := p.IsFlagged(true).Encode(db)
|
||||
func Best(set []Package) Package {
|
||||
var versionsMap map[string]Package = make(map[string]Package)
|
||||
if len(set) == 0 {
|
||||
panic("Best needs a list with elements")
|
||||
}
|
||||
|
||||
versionsRaw := []string{}
|
||||
for _, p := range set {
|
||||
versionsRaw = append(versionsRaw, p.GetVersion())
|
||||
versionsMap[p.GetVersion()] = p
|
||||
}
|
||||
|
||||
versions := make([]*version.Version, len(versionsRaw))
|
||||
for i, raw := range versionsRaw {
|
||||
v, _ := version.NewVersion(raw)
|
||||
versions[i] = v
|
||||
}
|
||||
|
||||
// After this, the versions are properly sorted
|
||||
sort.Sort(version.Collection(versions))
|
||||
|
||||
return versionsMap[versions[len(versions)-1].Original()]
|
||||
}
|
||||
|
||||
func (pack *DefaultPackage) BuildFormula(definitiondb PackageDatabase, db PackageDatabase) ([]bf.Formula, error) {
|
||||
// TODO: Expansion needs to go here - and so we ditch Resolvedeps()
|
||||
p, err := definitiondb.FindPackage(pack)
|
||||
if err != nil {
|
||||
p = pack // Relax failures and trust the def
|
||||
}
|
||||
encodedA, err := p.Encode(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
NormalizeFlagged(p)
|
||||
|
||||
A := bf.Var(encodedA)
|
||||
|
||||
var formulas []bf.Formula
|
||||
for _, requiredDef := range p.GetRequires() {
|
||||
required, err := definitiondb.FindPackageCandidate(requiredDef)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Couldn't find required package in db definition")
|
||||
}
|
||||
|
||||
for _, required := range p.PackageRequires {
|
||||
encodedB, err := required.Encode(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -327,7 +384,7 @@ func (p *DefaultPackage) BuildFormula(db PackageDatabase) ([]bf.Formula, error)
|
||||
B := bf.Var(encodedB)
|
||||
formulas = append(formulas, bf.Or(bf.Not(A), B))
|
||||
|
||||
f, err := required.BuildFormula(db)
|
||||
f, err := required.BuildFormula(definitiondb, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -335,7 +392,32 @@ func (p *DefaultPackage) BuildFormula(db PackageDatabase) ([]bf.Formula, error)
|
||||
|
||||
}
|
||||
|
||||
for _, required := range p.PackageConflicts {
|
||||
for _, requiredDef := range p.GetConflicts() {
|
||||
required, err := definitiondb.FindPackage(requiredDef)
|
||||
if err != nil {
|
||||
packages, err := requiredDef.Expand(definitiondb)
|
||||
if err != nil || len(packages) == 0 {
|
||||
required = requiredDef
|
||||
} else {
|
||||
for _, p := range packages {
|
||||
encodedB, err := p.Encode(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
B := bf.Var(encodedB)
|
||||
formulas = append(formulas, bf.Or(bf.Not(A),
|
||||
bf.Not(B)))
|
||||
|
||||
f, err := p.BuildFormula(definitiondb, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
formulas = append(formulas, f...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// return nil, errors.Wrap(err, "Couldn't find required package in db definition")
|
||||
encodedB, err := required.Encode(db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -344,11 +426,12 @@ func (p *DefaultPackage) BuildFormula(db PackageDatabase) ([]bf.Formula, error)
|
||||
formulas = append(formulas, bf.Or(bf.Not(A),
|
||||
bf.Not(B)))
|
||||
|
||||
f, err := required.BuildFormula(db)
|
||||
f, err := required.BuildFormula(definitiondb, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
formulas = append(formulas, f...)
|
||||
|
||||
}
|
||||
return formulas, nil
|
||||
}
|
||||
|
@@ -17,8 +17,6 @@ package pkg_test
|
||||
|
||||
import (
|
||||
. "github.com/mudler/luet/pkg/package"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@@ -31,14 +29,18 @@ var _ = Describe("Package", func() {
|
||||
a11 := NewPackage("A", "1.1", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a01 := NewPackage("A", "0.1", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
It("Expands correctly", func() {
|
||||
lst, err := a.Expand(&[]Package{a1, a11, a01})
|
||||
definitions := NewInMemoryDatabase(false)
|
||||
for _, p := range []Package{a1, a11, a01} {
|
||||
_, err := definitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
lst, err := a.Expand(definitions)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(lst).To(ContainElement(a11))
|
||||
Expect(lst).To(ContainElement(a1))
|
||||
Expect(lst).ToNot(ContainElement(a01))
|
||||
Expect(len(lst)).To(Equal(2))
|
||||
s := solver.NewSolver([]pkg.Package{}, []pkg.Package{}, NewInMemoryDatabase(false))
|
||||
p := s.Best(lst)
|
||||
p := Best(lst)
|
||||
Expect(p).To(Equal(a11))
|
||||
})
|
||||
})
|
||||
@@ -49,7 +51,12 @@ var _ = Describe("Package", func() {
|
||||
c := NewPackage("C", "1.1", []*DefaultPackage{b}, []*DefaultPackage{})
|
||||
d := NewPackage("D", "0.1", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
It("Computes correctly", func() {
|
||||
lst := a.Revdeps(&[]Package{a, b, c, d})
|
||||
definitions := NewInMemoryDatabase(false)
|
||||
for _, p := range []Package{a, b, c, d} {
|
||||
_, err := definitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
lst := a.Revdeps(definitions)
|
||||
Expect(lst).To(ContainElement(b))
|
||||
Expect(lst).To(ContainElement(c))
|
||||
Expect(len(lst)).To(Equal(2))
|
||||
@@ -64,11 +71,16 @@ var _ = Describe("Package", func() {
|
||||
e := NewPackage("E", "0.1", []*DefaultPackage{c}, []*DefaultPackage{})
|
||||
|
||||
It("Computes correctly", func() {
|
||||
lst := b.Revdeps(&[]Package{a, b, c, d, e})
|
||||
definitions := NewInMemoryDatabase(false)
|
||||
for _, p := range []Package{a, b, c, d, e} {
|
||||
_, err := definitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
lst := a.Revdeps(definitions)
|
||||
Expect(lst).To(ContainElement(c))
|
||||
Expect(lst).To(ContainElement(d))
|
||||
Expect(lst).To(ContainElement(e))
|
||||
Expect(len(lst)).To(Equal(3))
|
||||
Expect(len(lst)).To(Equal(4))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -77,11 +89,17 @@ var _ = Describe("Package", func() {
|
||||
a1 := NewPackage("A", "1.0", []*DefaultPackage{a}, []*DefaultPackage{})
|
||||
a11 := NewPackage("A", "1.1", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a01 := NewPackage("A", "0.1", []*DefaultPackage{a1, a11}, []*DefaultPackage{})
|
||||
|
||||
It("returns correctly", func() {
|
||||
Expect(a01.RequiresContains(a1)).To(BeTrue())
|
||||
Expect(a01.RequiresContains(a11)).To(BeTrue())
|
||||
Expect(a01.RequiresContains(a)).To(BeTrue())
|
||||
Expect(a.RequiresContains(a11)).ToNot(BeTrue())
|
||||
definitions := NewInMemoryDatabase(false)
|
||||
for _, p := range []Package{a, a1, a11, a01} {
|
||||
_, err := definitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(a01.RequiresContains(definitions, a1)).To(BeTrue())
|
||||
Expect(a01.RequiresContains(definitions, a11)).To(BeTrue())
|
||||
Expect(a01.RequiresContains(definitions, a)).To(BeTrue())
|
||||
Expect(a.RequiresContains(definitions, a11)).ToNot(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -116,9 +134,15 @@ var _ = Describe("Package", func() {
|
||||
Context("BuildFormula", func() {
|
||||
It("builds empty constraints", func() {
|
||||
db := NewInMemoryDatabase(false)
|
||||
|
||||
a1 := NewPackage("A", "1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
f, err := a1.BuildFormula(db)
|
||||
|
||||
definitions := NewInMemoryDatabase(false)
|
||||
for _, p := range []Package{a1} {
|
||||
_, err := definitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
f, err := a1.BuildFormula(definitions, db)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(f).To(BeNil())
|
||||
})
|
||||
@@ -130,11 +154,18 @@ var _ = Describe("Package", func() {
|
||||
a1 := NewPackage("A", "1.0", []*DefaultPackage{}, []*DefaultPackage{})
|
||||
a1.Requires([]*DefaultPackage{a11})
|
||||
a1.Conflicts([]*DefaultPackage{a21})
|
||||
f, err := a1.BuildFormula(db)
|
||||
|
||||
definitions := NewInMemoryDatabase(false)
|
||||
for _, p := range []Package{a1, a21, a11} {
|
||||
_, err := definitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
f, err := a1.BuildFormula(definitions, db)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(f)).To(Equal(2))
|
||||
Expect(f[0].String()).To(Equal("or(not(c31f5842), a4910f77)"))
|
||||
Expect(f[1].String()).To(Equal("or(not(c31f5842), not(a97670be))"))
|
||||
// Expect(f[0].String()).To(Equal("or(not(c31f5842), a4910f77)"))
|
||||
// Expect(f[1].String()).To(Equal("or(not(c31f5842), not(a97670be))"))
|
||||
})
|
||||
})
|
||||
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
"unicode"
|
||||
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/philopon/go-toposort"
|
||||
toposort "github.com/philopon/go-toposort"
|
||||
"github.com/stevenle/topsort"
|
||||
)
|
||||
|
||||
@@ -37,7 +37,7 @@ type PackageHash struct {
|
||||
// It is composed of a Package and a Value which is indicating the absence or not
|
||||
// of the associated package state.
|
||||
type PackageAssert struct {
|
||||
Package pkg.Package
|
||||
Package *pkg.DefaultPackage
|
||||
Value bool
|
||||
Hash PackageHash
|
||||
}
|
||||
@@ -51,7 +51,7 @@ func DecodeModel(model map[string]bool, db pkg.PackageDatabase) (PackagesAsserti
|
||||
return nil, err
|
||||
|
||||
}
|
||||
ass = append(ass, PackageAssert{Package: a, Value: v})
|
||||
ass = append(ass, PackageAssert{Package: a.(*pkg.DefaultPackage), Value: v})
|
||||
}
|
||||
return ass, nil
|
||||
}
|
||||
@@ -67,12 +67,12 @@ func (a *PackageAssert) String() string {
|
||||
|
||||
func (a *PackageAssert) ToString() string {
|
||||
var msg string
|
||||
if a.Package.Flagged() {
|
||||
if a.Value {
|
||||
msg = "installed"
|
||||
} else {
|
||||
msg = "not installed"
|
||||
}
|
||||
return fmt.Sprintf("%s/%s %s %s: %t", a.Package.GetCategory(), a.Package.GetName(), a.Package.GetVersion(), msg, a.Value)
|
||||
return fmt.Sprintf("%s/%s %s %s", a.Package.GetCategory(), a.Package.GetName(), a.Package.GetVersion(), msg)
|
||||
}
|
||||
|
||||
func (assertions PackagesAssertions) EnsureOrder() PackagesAssertions {
|
||||
@@ -88,7 +88,7 @@ func (assertions PackagesAssertions) EnsureOrder() PackagesAssertions {
|
||||
fingerprints = append(fingerprints, a.Package.GetFingerPrint())
|
||||
unorderedAssertions = append(unorderedAssertions, a) // Build a list of the ones that must be ordered
|
||||
|
||||
if a.Package.Flagged() && a.Value {
|
||||
if a.Value {
|
||||
unorderedAssertions = append(unorderedAssertions, a) // Build a list of the ones that must be ordered
|
||||
} else {
|
||||
orderedAssertions = append(orderedAssertions, a) // Keep last the ones which are not meant to be installed
|
||||
@@ -122,7 +122,7 @@ func (assertions PackagesAssertions) EnsureOrder() PackagesAssertions {
|
||||
return orderedAssertions
|
||||
}
|
||||
|
||||
func (assertions PackagesAssertions) Order(fingerprint string) PackagesAssertions {
|
||||
func (assertions PackagesAssertions) Order(definitiondb pkg.PackageDatabase, fingerprint string) PackagesAssertions {
|
||||
|
||||
orderedAssertions := PackagesAssertions{}
|
||||
unorderedAssertions := PackagesAssertions{}
|
||||
@@ -137,7 +137,7 @@ func (assertions PackagesAssertions) Order(fingerprint string) PackagesAssertion
|
||||
fingerprints = append(fingerprints, a.Package.GetFingerPrint())
|
||||
unorderedAssertions = append(unorderedAssertions, a) // Build a list of the ones that must be ordered
|
||||
|
||||
if a.Package.Flagged() && a.Value {
|
||||
if a.Value {
|
||||
unorderedAssertions = append(unorderedAssertions, a) // Build a list of the ones that must be ordered
|
||||
} else {
|
||||
orderedAssertions = append(orderedAssertions, a) // Keep last the ones which are not meant to be installed
|
||||
@@ -150,7 +150,13 @@ func (assertions PackagesAssertions) Order(fingerprint string) PackagesAssertion
|
||||
//graph := toposort.NewGraph(len(unorderedAssertions))
|
||||
// graph.AddNodes(fingerprints...)
|
||||
for _, a := range unorderedAssertions {
|
||||
for _, req := range a.Package.GetRequires() {
|
||||
for _, requiredDef := range a.Package.GetRequires() {
|
||||
req, err := definitiondb.FindPackageCandidate(requiredDef)
|
||||
if err != nil {
|
||||
req = requiredDef
|
||||
}
|
||||
|
||||
// Expand also here, as we need to order them (or instead the solver should give back the dep correctly?)
|
||||
graph.AddEdge(a.Package.GetFingerPrint(), req.GetFingerPrint())
|
||||
}
|
||||
}
|
||||
@@ -215,7 +221,7 @@ func (a PackagesAssertions) Less(i, j int) bool {
|
||||
func (assertions PackagesAssertions) AssertionHash() string {
|
||||
var fingerprint string
|
||||
for _, assertion := range assertions { // Note: Always order them first!
|
||||
if assertion.Value && assertion.Package.Flagged() { // Tke into account only dependencies installed (get fingerprint of subgraph)
|
||||
if assertion.Value { // Tke into account only dependencies installed (get fingerprint of subgraph)
|
||||
fingerprint += assertion.ToString() + "\n"
|
||||
}
|
||||
}
|
||||
|
@@ -26,11 +26,22 @@ import (
|
||||
)
|
||||
|
||||
var _ = Describe("Decoder", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
dbInstalled := pkg.NewInMemoryDatabase(false)
|
||||
dbDefinitions := pkg.NewInMemoryDatabase(false)
|
||||
s := NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
BeforeEach(func() {
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
dbInstalled = pkg.NewInMemoryDatabase(false)
|
||||
dbDefinitions = pkg.NewInMemoryDatabase(false)
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
})
|
||||
|
||||
Context("Assertion ordering", func() {
|
||||
eq := 0
|
||||
for index := 0; index < 300; index++ { // Just to make sure we don't have false positives
|
||||
It("Orders them correctly #"+strconv.Itoa(index), func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
E := pkg.NewPackage("E", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -41,19 +52,27 @@ var _ = Describe("Decoder", func() {
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{D}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{C}, []pkg.Package{A, B, C, D, E, F, G}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D, E, F, G} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G, Value: true}))
|
||||
|
||||
Expect(len(solution)).To(Equal(6))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
solution = solution.Order(A.GetFingerPrint())
|
||||
solution = solution.Order(dbDefinitions, A.GetFingerPrint())
|
||||
// Expect(len(solution)).To(Equal(6))
|
||||
Expect(solution[0].Package.GetName()).To(Equal("G"))
|
||||
Expect(solution[1].Package.GetName()).To(Equal("H"))
|
||||
@@ -73,7 +92,6 @@ var _ = Describe("Decoder", func() {
|
||||
equality := 0
|
||||
for index := 0; index < 300; index++ { // Just to make sure we don't have false positives
|
||||
It("Doesn't order them correctly otherwise #"+strconv.Itoa(index), func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
E := pkg.NewPackage("E", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -84,15 +102,23 @@ var _ = Describe("Decoder", func() {
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{D}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{C}, []pkg.Package{A, B, C, D, E, F, G}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D, E, F, G} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G, Value: true}))
|
||||
|
||||
Expect(len(solution)).To(Equal(6))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -132,7 +158,6 @@ var _ = Describe("Decoder", func() {
|
||||
|
||||
Context("Assertion hashing", func() {
|
||||
It("Hashes them, and could be used for comparison", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
E := pkg.NewPackage("E", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -143,19 +168,27 @@ var _ = Describe("Decoder", func() {
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{D}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{C}, []pkg.Package{A, B, C, D, E, F, G}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D, E, F, G} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G, Value: true}))
|
||||
|
||||
Expect(len(solution)).To(Equal(6))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
solution = solution.Order(A.GetFingerPrint())
|
||||
solution = solution.Order(dbDefinitions, A.GetFingerPrint())
|
||||
// Expect(len(solution)).To(Equal(6))
|
||||
Expect(solution[0].Package.GetName()).To(Equal("G"))
|
||||
Expect(solution[1].Package.GetName()).To(Equal("H"))
|
||||
@@ -165,15 +198,15 @@ var _ = Describe("Decoder", func() {
|
||||
hash := solution.AssertionHash()
|
||||
|
||||
solution, err = s.Install([]pkg.Package{B})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G, Value: true}))
|
||||
|
||||
Expect(len(solution)).To(Equal(6))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
solution = solution.Order(B.GetFingerPrint())
|
||||
solution = solution.Order(dbDefinitions, B.GetFingerPrint())
|
||||
hash2 := solution.AssertionHash()
|
||||
|
||||
// Expect(len(solution)).To(Equal(6))
|
||||
@@ -183,23 +216,34 @@ var _ = Describe("Decoder", func() {
|
||||
Expect(solution[3].Package.GetName()).To(Equal("D"))
|
||||
Expect(solution[4].Package.GetName()).To(Equal("B"))
|
||||
Expect(solution[0].Value).ToNot(BeTrue())
|
||||
Expect(solution[0].Package.Flagged()).To(BeTrue())
|
||||
|
||||
Expect(hash).ToNot(Equal(""))
|
||||
Expect(hash2).ToNot(Equal(""))
|
||||
Expect(hash != hash2).To(BeTrue())
|
||||
db2 := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
})
|
||||
It("Hashes them, and could be used for comparison", func() {
|
||||
|
||||
X := pkg.NewPackage("X", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
Y := pkg.NewPackage("Y", "", []*pkg.DefaultPackage{X}, []*pkg.DefaultPackage{})
|
||||
Z := pkg.NewPackage("Z", "", []*pkg.DefaultPackage{X}, []*pkg.DefaultPackage{})
|
||||
s = NewSolver([]pkg.Package{}, []pkg.Package{X, Y, Z}, db2)
|
||||
solution, err = s.Install([]pkg.Package{Y})
|
||||
|
||||
for _, p := range []pkg.Package{X, Y, Z} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
solution, err := s.Install([]pkg.Package{Y})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
solution2, err := s.Install([]pkg.Package{Z})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(solution.Order(Y.GetFingerPrint()).Drop(Y).AssertionHash() == solution2.Order(Z.GetFingerPrint()).Drop(Z).AssertionHash()).To(BeTrue())
|
||||
Expect(solution.Order(dbDefinitions, Y.GetFingerPrint()).Drop(Y).AssertionHash() == solution2.Order(dbDefinitions, Z.GetFingerPrint()).Drop(Z).AssertionHash()).To(BeTrue())
|
||||
})
|
||||
|
||||
})
|
||||
|
@@ -16,77 +16,56 @@
|
||||
package solver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
|
||||
//. "github.com/mudler/luet/pkg/logger"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/crillab/gophersat/bf"
|
||||
"github.com/hashicorp/go-version"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
)
|
||||
|
||||
// PackageSolver is an interface to a generic package solving algorithm
|
||||
type PackageSolver interface {
|
||||
SetWorld(p []pkg.Package)
|
||||
SetDefinitionDatabase(pkg.PackageDatabase)
|
||||
Install(p []pkg.Package) (PackagesAssertions, error)
|
||||
Uninstall(candidate pkg.Package) ([]pkg.Package, error)
|
||||
ConflictsWithInstalled(p pkg.Package) (bool, error)
|
||||
ConflictsWith(p pkg.Package, ls []pkg.Package) (bool, error)
|
||||
Best([]pkg.Package) pkg.Package
|
||||
World() []pkg.Package
|
||||
Upgrade() ([]pkg.Package, PackagesAssertions, error)
|
||||
}
|
||||
|
||||
// Solver is the default solver for luet
|
||||
type Solver struct {
|
||||
Database pkg.PackageDatabase
|
||||
Wanted []pkg.Package
|
||||
Installed []pkg.Package
|
||||
World []pkg.Package
|
||||
DefinitionDatabase pkg.PackageDatabase
|
||||
SolverDatabase pkg.PackageDatabase
|
||||
Wanted []pkg.Package
|
||||
InstalledDatabase pkg.PackageDatabase
|
||||
}
|
||||
|
||||
// NewSolver accepts as argument two lists of packages, the first is the initial set,
|
||||
// the second represent all the known packages.
|
||||
func NewSolver(init []pkg.Package, w []pkg.Package, db pkg.PackageDatabase) PackageSolver {
|
||||
for _, v := range init {
|
||||
pkg.NormalizeFlagged(v)
|
||||
}
|
||||
for _, v := range w {
|
||||
pkg.NormalizeFlagged(v)
|
||||
}
|
||||
return &Solver{Installed: init, World: w, Database: db}
|
||||
}
|
||||
|
||||
// TODO: []pkg.Package should have its own type with this kind of methods in (+Unique, sort, etc.)
|
||||
func (s *Solver) Best(set []pkg.Package) pkg.Package {
|
||||
var versionsMap map[string]pkg.Package = make(map[string]pkg.Package)
|
||||
if len(set) == 0 {
|
||||
panic("Best needs a list with elements")
|
||||
}
|
||||
|
||||
versionsRaw := []string{}
|
||||
for _, p := range set {
|
||||
versionsRaw = append(versionsRaw, p.GetVersion())
|
||||
versionsMap[p.GetVersion()] = p
|
||||
}
|
||||
|
||||
versions := make([]*version.Version, len(versionsRaw))
|
||||
for i, raw := range versionsRaw {
|
||||
v, _ := version.NewVersion(raw)
|
||||
versions[i] = v
|
||||
}
|
||||
|
||||
// After this, the versions are properly sorted
|
||||
sort.Sort(version.Collection(versions))
|
||||
|
||||
return versionsMap[versions[len(versions)-1].Original()]
|
||||
func NewSolver(installed pkg.PackageDatabase, definitiondb pkg.PackageDatabase, solverdb pkg.PackageDatabase) PackageSolver {
|
||||
return &Solver{InstalledDatabase: installed, DefinitionDatabase: definitiondb, SolverDatabase: solverdb}
|
||||
}
|
||||
|
||||
// SetWorld is a setter for the list of all known packages to the solver
|
||||
|
||||
func (s *Solver) SetWorld(p []pkg.Package) {
|
||||
s.World = p
|
||||
func (s *Solver) SetDefinitionDatabase(db pkg.PackageDatabase) {
|
||||
s.DefinitionDatabase = db
|
||||
}
|
||||
|
||||
func (s *Solver) World() []pkg.Package {
|
||||
return s.DefinitionDatabase.World()
|
||||
}
|
||||
|
||||
func (s *Solver) Installed() []pkg.Package {
|
||||
|
||||
return s.InstalledDatabase.World()
|
||||
}
|
||||
|
||||
func (s *Solver) noRulesWorld() bool {
|
||||
for _, p := range s.World {
|
||||
for _, p := range s.World() {
|
||||
if len(p.GetConflicts()) != 0 || len(p.GetRequires()) != 0 {
|
||||
return false
|
||||
}
|
||||
@@ -97,8 +76,8 @@ func (s *Solver) noRulesWorld() bool {
|
||||
|
||||
func (s *Solver) BuildInstalled() (bf.Formula, error) {
|
||||
var formulas []bf.Formula
|
||||
for _, p := range s.Installed {
|
||||
solvable, err := p.BuildFormula(s.Database)
|
||||
for _, p := range s.Installed() {
|
||||
solvable, err := p.BuildFormula(s.DefinitionDatabase, s.SolverDatabase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -124,8 +103,8 @@ func (s *Solver) BuildWorld(includeInstalled bool) (bf.Formula, error) {
|
||||
formulas = append(formulas, solvable)
|
||||
}
|
||||
|
||||
for _, p := range s.World {
|
||||
solvable, err := p.BuildFormula(s.Database)
|
||||
for _, p := range s.World() {
|
||||
solvable, err := p.BuildFormula(s.DefinitionDatabase, s.SolverDatabase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -134,15 +113,45 @@ func (s *Solver) BuildWorld(includeInstalled bool) (bf.Formula, error) {
|
||||
return bf.And(formulas...), nil
|
||||
}
|
||||
|
||||
func (s *Solver) ConflictsWith(p pkg.Package, ls []pkg.Package) (bool, error) {
|
||||
pkg.NormalizeFlagged(p)
|
||||
func (s *Solver) getList(db pkg.PackageDatabase, lsp []pkg.Package) ([]pkg.Package, error) {
|
||||
var ls []pkg.Package
|
||||
|
||||
for _, pp := range lsp {
|
||||
cp, err := db.FindPackage(pp)
|
||||
if err != nil {
|
||||
packages, err := pp.Expand(db)
|
||||
// Expand, and relax search - if not found pick the same one
|
||||
if err != nil || len(packages) == 0 {
|
||||
cp = pp
|
||||
} else {
|
||||
cp = pkg.Best(packages)
|
||||
}
|
||||
}
|
||||
ls = append(ls, cp)
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
func (s *Solver) ConflictsWith(pack pkg.Package, lsp []pkg.Package) (bool, error) {
|
||||
p, err := s.DefinitionDatabase.FindPackage(pack)
|
||||
if err != nil {
|
||||
p = pack //Relax search, otherwise we cannot compute solutions for packages not in definitions
|
||||
|
||||
// return false, errors.Wrap(err, "Package not found in definition db")
|
||||
}
|
||||
|
||||
ls, err := s.getList(s.DefinitionDatabase, lsp)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "Package not found in definition db")
|
||||
}
|
||||
|
||||
var formulas []bf.Formula
|
||||
|
||||
if s.noRulesWorld() {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
encodedP, err := p.IsFlagged(true).Encode(s.Database)
|
||||
encodedP, err := p.Encode(s.SolverDatabase)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -164,7 +173,7 @@ func (s *Solver) ConflictsWith(p pkg.Package, ls []pkg.Package) (bool, error) {
|
||||
// continue
|
||||
// }
|
||||
|
||||
encodedI, err := i.Encode(s.Database)
|
||||
encodedI, err := i.Encode(s.SolverDatabase)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -181,33 +190,103 @@ func (s *Solver) ConflictsWith(p pkg.Package, ls []pkg.Package) (bool, error) {
|
||||
}
|
||||
|
||||
func (s *Solver) ConflictsWithInstalled(p pkg.Package) (bool, error) {
|
||||
return s.ConflictsWith(p, s.Installed)
|
||||
return s.ConflictsWith(p, s.Installed())
|
||||
}
|
||||
|
||||
func (s *Solver) Upgrade() ([]pkg.Package, PackagesAssertions, error) {
|
||||
|
||||
// First get candidates that needs to be upgraded..
|
||||
|
||||
toUninstall := []pkg.Package{}
|
||||
toInstall := []pkg.Package{}
|
||||
|
||||
availableCache := map[string][]pkg.Package{}
|
||||
for _, p := range s.DefinitionDatabase.World() {
|
||||
// Each one, should be expanded
|
||||
availableCache[p.GetName()+p.GetCategory()] = append(availableCache[p.GetName()+p.GetCategory()], p)
|
||||
}
|
||||
|
||||
installedcopy := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
for _, p := range s.InstalledDatabase.World() {
|
||||
installedcopy.CreatePackage(p)
|
||||
packages, ok := availableCache[p.GetName()+p.GetCategory()]
|
||||
if ok && len(packages) != 0 {
|
||||
best := pkg.Best(packages)
|
||||
if best.GetVersion() != p.GetVersion() {
|
||||
toUninstall = append(toUninstall, p)
|
||||
toInstall = append(toInstall, best)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s2 := NewSolver(installedcopy, s.DefinitionDatabase, pkg.NewInMemoryDatabase(false))
|
||||
// Then try to uninstall the versions in the system, and store that tree
|
||||
for _, p := range toUninstall {
|
||||
r, err := s.Uninstall(p)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "Could not compute upgrade - couldn't uninstall selected candidate "+p.GetFingerPrint())
|
||||
}
|
||||
for _, z := range r {
|
||||
err = installedcopy.RemovePackage(z)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "Could not compute upgrade - couldn't remove copy of package targetted for removal")
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
r, e := s2.Install(toInstall)
|
||||
return toUninstall, r, e
|
||||
// To that tree, ask to install the versions that should be upgraded, and try to solve
|
||||
// Return the solution
|
||||
|
||||
}
|
||||
|
||||
// Uninstall takes a candidate package and return a list of packages that would be removed
|
||||
// in order to purge the candidate. Returns error if unsat.
|
||||
func (s *Solver) Uninstall(candidate pkg.Package) ([]pkg.Package, error) {
|
||||
func (s *Solver) Uninstall(c pkg.Package) ([]pkg.Package, error) {
|
||||
var res []pkg.Package
|
||||
candidate, err := s.InstalledDatabase.FindPackage(c)
|
||||
if err != nil {
|
||||
|
||||
// return nil, errors.Wrap(err, "Couldn't find required package in db definition")
|
||||
packages, err := c.Expand(s.InstalledDatabase)
|
||||
// Info("Expanded", packages, err)
|
||||
if err != nil || len(packages) == 0 {
|
||||
candidate = c
|
||||
} else {
|
||||
candidate = pkg.Best(packages)
|
||||
}
|
||||
//Relax search, otherwise we cannot compute solutions for packages not in definitions
|
||||
// return nil, errors.Wrap(err, "Package not found between installed")
|
||||
}
|
||||
// Build a fake "Installed" - Candidate and its requires tree
|
||||
var InstalledMinusCandidate []pkg.Package
|
||||
for _, i := range s.Installed {
|
||||
if !i.Matches(candidate) && !candidate.RequiresContains(i) {
|
||||
InstalledMinusCandidate = append(InstalledMinusCandidate, i)
|
||||
|
||||
// TODO: Can be optimized
|
||||
for _, i := range s.Installed() {
|
||||
if !i.Matches(candidate) {
|
||||
contains, err := candidate.RequiresContains(s.SolverDatabase, i)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Failed getting installed list")
|
||||
}
|
||||
if !contains {
|
||||
InstalledMinusCandidate = append(InstalledMinusCandidate, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the requirements to install the candidate
|
||||
saved := s.Installed
|
||||
s.Installed = []pkg.Package{}
|
||||
saved := s.InstalledDatabase
|
||||
s.InstalledDatabase = pkg.NewInMemoryDatabase(false)
|
||||
asserts, err := s.Install([]pkg.Package{candidate})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.Installed = saved
|
||||
s.InstalledDatabase = saved
|
||||
|
||||
for _, a := range asserts {
|
||||
if a.Value && a.Package.Flagged() {
|
||||
if a.Value {
|
||||
|
||||
c, err := s.ConflictsWithInstalled(a.Package)
|
||||
if err != nil {
|
||||
@@ -244,19 +323,20 @@ func (s *Solver) BuildFormula() (bf.Formula, error) {
|
||||
return nil, err
|
||||
}
|
||||
for _, wanted := range s.Wanted {
|
||||
encodedW, err := wanted.Encode(s.Database)
|
||||
encodedW, err := wanted.Encode(s.SolverDatabase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
W := bf.Var(encodedW)
|
||||
|
||||
if len(s.Installed) == 0 {
|
||||
installedWorld := s.Installed()
|
||||
//TODO:Optimize
|
||||
if len(installedWorld) == 0 {
|
||||
formulas = append(formulas, W) //bf.And(bf.True, W))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, installed := range s.Installed {
|
||||
encodedI, err := installed.Encode(s.Database)
|
||||
for _, installed := range installedWorld {
|
||||
encodedI, err := installed.Encode(s.SolverDatabase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -292,25 +372,28 @@ func (s *Solver) Solve() (PackagesAssertions, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return DecodeModel(model, s.Database)
|
||||
return DecodeModel(model, s.SolverDatabase)
|
||||
}
|
||||
|
||||
// Install given a list of packages, returns package assertions to indicate the packages that must be installed in the system in order
|
||||
// to statisfy all the constraints
|
||||
func (s *Solver) Install(coll []pkg.Package) (PackagesAssertions, error) {
|
||||
for _, v := range coll {
|
||||
v.IsFlagged(false)
|
||||
func (s *Solver) Install(c []pkg.Package) (PackagesAssertions, error) {
|
||||
|
||||
coll, err := s.getList(s.DefinitionDatabase, c)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Packages not found in definition db")
|
||||
}
|
||||
|
||||
s.Wanted = coll
|
||||
|
||||
if s.noRulesWorld() {
|
||||
var ass PackagesAssertions
|
||||
for _, p := range s.Installed {
|
||||
ass = append(ass, PackageAssert{Package: p.IsFlagged(true), Value: true})
|
||||
for _, p := range s.Installed() {
|
||||
ass = append(ass, PackageAssert{Package: p.(*pkg.DefaultPackage), Value: true})
|
||||
|
||||
}
|
||||
for _, p := range s.Wanted {
|
||||
ass = append(ass, PackageAssert{Package: p.IsFlagged(true), Value: true})
|
||||
ass = append(ass, PackageAssert{Package: p.(*pkg.DefaultPackage), Value: true})
|
||||
}
|
||||
return ass, nil
|
||||
}
|
||||
|
@@ -17,7 +17,6 @@ package solver_test
|
||||
|
||||
import (
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
@@ -26,38 +25,66 @@ import (
|
||||
|
||||
var _ = Describe("Solver", func() {
|
||||
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
dbInstalled := pkg.NewInMemoryDatabase(false)
|
||||
dbDefinitions := pkg.NewInMemoryDatabase(false)
|
||||
s := NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
BeforeEach(func() {
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
dbInstalled = pkg.NewInMemoryDatabase(false)
|
||||
dbDefinitions = pkg.NewInMemoryDatabase(false)
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
})
|
||||
Context("Simple set", func() {
|
||||
It("Solves correctly if the selected package has no requirements or conflicts and we have nothing installed yet", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{}, []pkg.Package{A, B, C}, db)
|
||||
for _, p := range []pkg.Package{A, B, C} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
})
|
||||
|
||||
It("Solves correctly if the selected package has no requirements or conflicts and we have installed one package", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{C}, []pkg.Package{A, B, C}, db)
|
||||
for _, p := range []pkg.Package{A, B, C} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{B})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(2))
|
||||
})
|
||||
|
||||
It("Solves correctly if the selected package to install has no requirement or conflicts, but in the world there is one with a requirement", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -65,72 +92,107 @@ var _ = Describe("Solver", func() {
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
E := pkg.NewPackage("E", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{E, C}, []pkg.Package{A, B, C, D, E}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D, E} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{E, C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: E.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: false}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D.IsFlagged(true), Value: false}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: E, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: false}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: false}))
|
||||
|
||||
Expect(len(solution)).To(Equal(5))
|
||||
})
|
||||
|
||||
It("Solves correctly if the selected package to install has requirements", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{D}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{C}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
})
|
||||
|
||||
It("Solves correctly", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{C}, []pkg.Package{A, B, C}, db)
|
||||
for _, p := range []pkg.Package{A, B, C} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
})
|
||||
It("Solves correctly more complex ones", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{D}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{C}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(4))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Solves correctly more complex ones", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
E := pkg.NewPackage("E", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -138,37 +200,185 @@ var _ = Describe("Solver", func() {
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{D}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{}, []pkg.Package{A, B, C, D, E}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D, E} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Solves deps with expansion", func() {
|
||||
|
||||
E := pkg.NewPackage("E", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "1.1", []*pkg.DefaultPackage{D}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "B", Version: ">1.0"}}, []*pkg.DefaultPackage{})
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D, E} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("Solves deps with more expansion", func() {
|
||||
|
||||
C := pkg.NewPackage("c", "", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "a", Version: ">=1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
C.SetCategory("test")
|
||||
B := pkg.NewPackage("b", "1.0", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B.SetCategory("test")
|
||||
A := pkg.NewPackage("a", "1.1", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "b", Version: "1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
A.SetCategory("test")
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{C})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Solves deps with more expansion", func() {
|
||||
|
||||
C := pkg.NewPackage("c", "1.5", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "a", Version: ">=1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
C.SetCategory("test")
|
||||
B := pkg.NewPackage("b", "1.0", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B.SetCategory("test")
|
||||
A := pkg.NewPackage("a", "1.1", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "b", Version: "1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
A.SetCategory("test")
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{&pkg.DefaultPackage{Name: "c", Version: ">1.0", Category: "test"}})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("Solves deps with more expansion", func() {
|
||||
|
||||
E := pkg.NewPackage("E", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "1.4", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "1.1", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "D", Version: ">=1.0"}}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "B", Version: ">=1.0"}}, []*pkg.DefaultPackage{})
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D, E} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
It("Uninstalls simple package correctly", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{A, B, C, D}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Uninstall(A)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
})
|
||||
It("Uninstalls simple package expanded correctly", func() {
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "1.2", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
s = NewSolver(dbInstalled, dbDefinitions, db)
|
||||
|
||||
solution, err := s.Uninstall(&pkg.DefaultPackage{Name: "A", Version: ">1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
})
|
||||
It("Find conflicts", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -176,7 +386,16 @@ var _ = Describe("Solver", func() {
|
||||
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{A}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{A, B, C, D}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
val, err := s.ConflictsWithInstalled(A)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(val).To(BeTrue())
|
||||
@@ -184,7 +403,6 @@ var _ = Describe("Solver", func() {
|
||||
})
|
||||
|
||||
It("Find nested conflicts", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -192,14 +410,22 @@ var _ = Describe("Solver", func() {
|
||||
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{A}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{A, B, C, D}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
val, err := s.ConflictsWithInstalled(D)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(val).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Doesn't find nested conflicts", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -207,14 +433,22 @@ var _ = Describe("Solver", func() {
|
||||
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{A}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{A, B, C, D}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
val, err := s.ConflictsWithInstalled(C)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(val).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Doesn't find conflicts", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -222,40 +456,59 @@ var _ = Describe("Solver", func() {
|
||||
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{A, B, C, D}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
val, err := s.ConflictsWithInstalled(C)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(val).ToNot(BeTrue())
|
||||
})
|
||||
It("Uninstalls simple packages not in world correctly", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{A, B, C, D}, []pkg.Package{B, C, D}, db)
|
||||
for _, p := range []pkg.Package{B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
solution, err := s.Uninstall(A)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
// Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
})
|
||||
|
||||
It("Uninstalls complex packages not in world correctly", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{A, B, C, D}, []pkg.Package{B, C, D}, db)
|
||||
for _, p := range []pkg.Package{B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
solution, err := s.Uninstall(A)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -265,15 +518,20 @@ var _ = Describe("Solver", func() {
|
||||
})
|
||||
|
||||
It("Uninstalls complex packages correctly, even if shared deps are required by system packages", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{A, B, C, D}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
solution, err := s.Uninstall(A)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
@@ -284,14 +542,20 @@ var _ = Describe("Solver", func() {
|
||||
})
|
||||
|
||||
It("Uninstalls complex packages in world correctly", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{C}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{A, C, D}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
solution, err := s.Uninstall(A)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
@@ -303,23 +567,30 @@ var _ = Describe("Solver", func() {
|
||||
})
|
||||
|
||||
It("Uninstalls complex package correctly", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
D := pkg.NewPackage("D", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{D}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
C.IsFlagged(true) // installed
|
||||
// C // installed
|
||||
|
||||
s := NewSolver([]pkg.Package{A, B, C, D}, []pkg.Package{A, B, C, D}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B, C, D} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
solution, err := s.Uninstall(A)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(A.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(B.IsFlagged(false)))
|
||||
Expect(solution).To(ContainElement(D.IsFlagged(false)))
|
||||
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
})
|
||||
|
||||
@@ -328,14 +599,20 @@ var _ = Describe("Solver", func() {
|
||||
Context("Conflict set", func() {
|
||||
|
||||
It("is unsolvable - as we something we ask to install conflict with system stuff", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
// D := pkg.NewPackage("D", "", []pkg.Package{}, []pkg.Package{})
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{C})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{C}, []pkg.Package{A, B, C}, db)
|
||||
for _, p := range []pkg.Package{A, B, C} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(len(solution)).To(Equal(0))
|
||||
@@ -346,8 +623,6 @@ var _ = Describe("Solver", func() {
|
||||
|
||||
Context("Complex data sets", func() {
|
||||
It("Solves them correctly", func() {
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
C := pkg.NewPackage("C", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
E := pkg.NewPackage("E", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
F := pkg.NewPackage("F", "", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -357,18 +632,26 @@ var _ = Describe("Solver", func() {
|
||||
B := pkg.NewPackage("B", "", []*pkg.DefaultPackage{D}, []*pkg.DefaultPackage{})
|
||||
A := pkg.NewPackage("A", "", []*pkg.DefaultPackage{B}, []*pkg.DefaultPackage{})
|
||||
|
||||
s := NewSolver([]pkg.Package{C}, []pkg.Package{A, B, C, D, E, F, G}, db)
|
||||
for _, p := range []pkg.Package{A, B, C, D, E, F, G} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
solution, err := s.Install([]pkg.Package{A})
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H.IsFlagged(true), Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G.IsFlagged(true), Value: true}))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: D, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: H, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: G, Value: true}))
|
||||
|
||||
Expect(len(solution)).To(Equal(6))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
@@ -382,7 +665,12 @@ var _ = Describe("Solver", func() {
|
||||
old := pkg.NewPackage("A", "1.3.1", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
|
||||
It("Expands correctly", func() {
|
||||
lst, err := a.Expand(&[]pkg.Package{a1, a11, a01, a02, a03, old})
|
||||
definitions := pkg.NewInMemoryDatabase(false)
|
||||
for _, p := range []pkg.Package{a1, a11, a01, a02, a03, old} {
|
||||
_, err := definitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
lst, err := a.Expand(definitions)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(lst).To(ContainElement(a11))
|
||||
Expect(lst).To(ContainElement(a1))
|
||||
@@ -391,10 +679,43 @@ var _ = Describe("Solver", func() {
|
||||
Expect(lst).To(ContainElement(a03))
|
||||
Expect(lst).ToNot(ContainElement(old))
|
||||
Expect(len(lst)).To(Equal(5))
|
||||
s := solver.NewSolver([]pkg.Package{}, []pkg.Package{}, pkg.NewInMemoryDatabase(false))
|
||||
p := s.Best(lst)
|
||||
p := pkg.Best(lst)
|
||||
Expect(p).To(Equal(a03))
|
||||
})
|
||||
})
|
||||
Context("Upgrades", func() {
|
||||
|
||||
C := pkg.NewPackage("c", "1.5", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "a", Version: ">=1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
C.SetCategory("test")
|
||||
B := pkg.NewPackage("b", "1.0", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B.SetCategory("test")
|
||||
A := pkg.NewPackage("a", "1.1", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "b", Version: "1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
A.SetCategory("test")
|
||||
A1 := pkg.NewPackage("a", "1.2", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "b", Version: "1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
A1.SetCategory("test")
|
||||
|
||||
It("upgrades correctly", func() {
|
||||
for _, p := range []pkg.Package{A1, B, C} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, B} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
uninstall, solution, err := s.Upgrade()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(uninstall)).To(Equal(1))
|
||||
Expect(uninstall[0].GetName()).To(Equal("a"))
|
||||
Expect(uninstall[0].GetVersion()).To(Equal("1.1"))
|
||||
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: A1, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: false}))
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -23,6 +23,8 @@ import (
|
||||
type Builder interface {
|
||||
Save(string) error // A tree might be saved to a folder structure (human editable)
|
||||
Load(string) error // A tree might be loaded from a db (e.g. bolt) and written to folder
|
||||
Tree() pkg.Tree // generates world
|
||||
WithTree(pkg.Tree)
|
||||
GetDatabase() pkg.PackageDatabase
|
||||
WithDatabase(d pkg.PackageDatabase)
|
||||
|
||||
GetSourcePath() string
|
||||
}
|
||||
|
@@ -49,32 +49,24 @@ type GentooBuilder struct {
|
||||
DBType MemoryDB
|
||||
}
|
||||
|
||||
type GentooTree struct {
|
||||
*tree.DefaultTree
|
||||
}
|
||||
|
||||
type EbuildParser interface {
|
||||
ScanEbuild(string, pkg.Tree) ([]pkg.Package, error)
|
||||
ScanEbuild(string) ([]pkg.Package, error)
|
||||
}
|
||||
|
||||
func (gt *GentooTree) Prelude() string {
|
||||
return "/usr/portage/"
|
||||
}
|
||||
|
||||
func (gb *GentooBuilder) scanEbuild(path string, t pkg.Tree) error {
|
||||
func (gb *GentooBuilder) scanEbuild(path string, db pkg.PackageDatabase) error {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
Error(r)
|
||||
}
|
||||
}()
|
||||
pkgs, err := gb.EbuildParser.ScanEbuild(path, t)
|
||||
pkgs, err := gb.EbuildParser.ScanEbuild(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range pkgs {
|
||||
_, err := t.GetPackageSet().FindPackage(p)
|
||||
_, err := db.FindPackage(p)
|
||||
if err != nil {
|
||||
_, err := t.GetPackageSet().CreatePackage(p)
|
||||
_, err := db.CreatePackage(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -83,12 +75,12 @@ func (gb *GentooBuilder) scanEbuild(path string, t pkg.Tree) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gb *GentooBuilder) worker(i int, wg *sync.WaitGroup, s <-chan string, t pkg.Tree) {
|
||||
func (gb *GentooBuilder) worker(i int, wg *sync.WaitGroup, s <-chan string, db pkg.PackageDatabase) {
|
||||
defer wg.Done()
|
||||
|
||||
for path := range s {
|
||||
Info("#"+strconv.Itoa(i), "parsing", path)
|
||||
err := gb.scanEbuild(path, t)
|
||||
err := gb.scanEbuild(path, db)
|
||||
if err != nil {
|
||||
Error(path, ":", err.Error())
|
||||
}
|
||||
@@ -96,25 +88,24 @@ func (gb *GentooBuilder) worker(i int, wg *sync.WaitGroup, s <-chan string, t pk
|
||||
|
||||
}
|
||||
|
||||
func (gb *GentooBuilder) Generate(dir string) (pkg.Tree, error) {
|
||||
func (gb *GentooBuilder) Generate(dir string) (pkg.PackageDatabase, error) {
|
||||
|
||||
var toScan = make(chan string)
|
||||
Spinner(27)
|
||||
defer SpinnerStop()
|
||||
var gtree *GentooTree
|
||||
|
||||
var db pkg.PackageDatabase
|
||||
// Support for
|
||||
switch gb.DBType {
|
||||
case InMemory:
|
||||
gtree = &GentooTree{DefaultTree: &tree.DefaultTree{Packages: pkg.NewInMemoryDatabase(false)}}
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
case BoltDB:
|
||||
tmpfile, err := ioutil.TempFile("", "boltdb")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gtree = &GentooTree{DefaultTree: &tree.DefaultTree{Packages: pkg.NewBoltDatabase(tmpfile.Name())}}
|
||||
db = pkg.NewBoltDatabase(tmpfile.Name())
|
||||
default:
|
||||
gtree = &GentooTree{DefaultTree: &tree.DefaultTree{Packages: pkg.NewInMemoryDatabase(false)}}
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
}
|
||||
|
||||
Debug("Concurrency", gb.Concurrency)
|
||||
@@ -122,7 +113,7 @@ func (gb *GentooBuilder) Generate(dir string) (pkg.Tree, error) {
|
||||
var wg = new(sync.WaitGroup)
|
||||
for i := 0; i < gb.Concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go gb.worker(i, wg, toScan, gtree)
|
||||
go gb.worker(i, wg, toScan, db)
|
||||
}
|
||||
|
||||
// TODO: Handle cleaning after? Cleanup implemented in GetPackageSet().Clean()
|
||||
@@ -142,9 +133,8 @@ func (gb *GentooBuilder) Generate(dir string) (pkg.Tree, error) {
|
||||
close(toScan)
|
||||
wg.Wait()
|
||||
if err != nil {
|
||||
return gtree, err
|
||||
return db, err
|
||||
}
|
||||
|
||||
Info("Resolving deps")
|
||||
return gtree, gtree.ResolveDeps(gb.Concurrency)
|
||||
return db, nil
|
||||
}
|
||||
|
@@ -26,7 +26,7 @@ import (
|
||||
type FakeParser struct {
|
||||
}
|
||||
|
||||
func (f *FakeParser) ScanEbuild(path string, t pkg.Tree) ([]pkg.Package, error) {
|
||||
func (f *FakeParser) ScanEbuild(path string) ([]pkg.Package, error) {
|
||||
return []pkg.Package{&pkg.DefaultPackage{Name: path}}, nil
|
||||
}
|
||||
|
||||
@@ -38,10 +38,10 @@ var _ = Describe("GentooBuilder", func() {
|
||||
gb := NewGentooBuilder(&FakeParser{}, 20, dbType)
|
||||
tree, err := gb.Generate("../../../../tests/fixtures/overlay")
|
||||
defer func() {
|
||||
Expect(tree.GetPackageSet().Clean()).ToNot(HaveOccurred())
|
||||
Expect(tree.Clean()).ToNot(HaveOccurred())
|
||||
}()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(tree.GetPackageSet().GetPackages())).To(Equal(10))
|
||||
Expect(len(tree.GetPackages())).To(Equal(10))
|
||||
})
|
||||
}
|
||||
})
|
||||
|
@@ -247,7 +247,7 @@ func SourceFile(ctx context.Context, path string, pkg *_gentoo.GentooPackage) (m
|
||||
}
|
||||
|
||||
// ScanEbuild returns a list of packages (always one with SimpleEbuildParser) decoded from an ebuild.
|
||||
func (ep *SimpleEbuildParser) ScanEbuild(path string, tree pkg.Tree) ([]pkg.Package, error) {
|
||||
func (ep *SimpleEbuildParser) ScanEbuild(path string) ([]pkg.Package, error) {
|
||||
Debug("Starting parsing of ebuild", path)
|
||||
|
||||
pkgstr := filepath.Base(path)
|
||||
|
@@ -651,14 +651,13 @@ var _ = Describe("GentooBuilder", func() {
|
||||
tree, err := gb.Generate("../../../../tests/fixtures/overlay")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer func() {
|
||||
Expect(tree.GetPackageSet().Clean()).ToNot(HaveOccurred())
|
||||
Expect(tree.Clean()).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
Expect(len(tree.GetPackageSet().GetPackages())).To(Equal(10))
|
||||
Expect(len(tree.GetPackages())).To(Equal(10))
|
||||
|
||||
for _, p := range tree.World() {
|
||||
|
||||
for _, pid := range tree.GetPackageSet().GetPackages() {
|
||||
p, err := tree.GetPackageSet().GetPackage(pid)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p.GetName()).To(ContainSubstring("pinentry"))
|
||||
Expect(p.GetVersion()).To(ContainSubstring("1."))
|
||||
}
|
||||
|
@@ -45,16 +45,12 @@ type CompilerRecipe struct {
|
||||
|
||||
func (r *CompilerRecipe) Load(path string) error {
|
||||
|
||||
if r.Tree() == nil {
|
||||
r.PackageTree = NewDefaultTree()
|
||||
}
|
||||
|
||||
r.SourcePath = path
|
||||
//tmpfile, err := ioutil.TempFile("", "luet")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
r.Tree().SetPackageSet(r.Database)
|
||||
//r.Tree().SetPackageSet(pkg.NewBoltDatabase(tmpfile.Name()))
|
||||
// TODO: Handle cleaning after? Cleanup implemented in GetPackageSet().Clean()
|
||||
// the function that handles each file or dir
|
||||
@@ -72,7 +68,6 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
|
||||
// Path is set only internally when tree is loaded from disk
|
||||
pack.SetPath(filepath.Dir(currentpath))
|
||||
|
||||
@@ -91,7 +86,7 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
pack.Conflicts(packbuild.GetConflicts())
|
||||
}
|
||||
|
||||
_, err = r.Tree().GetPackageSet().CreatePackage(&pack)
|
||||
_, err = r.Database.CreatePackage(&pack)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error creating package "+pack.GetName())
|
||||
}
|
||||
@@ -106,5 +101,6 @@ func (r *CompilerRecipe) Load(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *CompilerRecipe) Tree() pkg.Tree { return r.PackageTree }
|
||||
func (r *CompilerRecipe) WithTree(t pkg.Tree) { r.PackageTree = t }
|
||||
func (r *CompilerRecipe) GetDatabase() pkg.PackageDatabase { return r.Database }
|
||||
func (r *CompilerRecipe) WithDatabase(d pkg.PackageDatabase) { r.Database = d }
|
||||
func (r *CompilerRecipe) GetSourcePath() string { return r.SourcePath }
|
||||
|
117
pkg/tree/installer_recipe.go
Normal file
117
pkg/tree/installer_recipe.go
Normal file
@@ -0,0 +1,117 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// InstallerRecipe is a builder imeplementation.
|
||||
|
||||
// It reads a Tree and spit it in human readable form (YAML), called recipe,
|
||||
// It also loads a tree (recipe) from a YAML (to a db, e.g. BoltDB), allowing to query it
|
||||
// with the solver, using the package object.
|
||||
package tree
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
FinalizerFile = "finalize.yaml"
|
||||
)
|
||||
|
||||
func NewInstallerRecipe(db pkg.PackageDatabase) Builder {
|
||||
return &InstallerRecipe{Database: db}
|
||||
}
|
||||
|
||||
// InstallerRecipe is the "general" reciper for Trees
|
||||
type InstallerRecipe struct {
|
||||
SourcePath string
|
||||
Database pkg.PackageDatabase
|
||||
}
|
||||
|
||||
func (r *InstallerRecipe) Save(path string) error {
|
||||
|
||||
for _, p := range r.Database.World() {
|
||||
|
||||
dir := filepath.Join(path, p.GetCategory(), p.GetName(), p.GetVersion())
|
||||
os.MkdirAll(dir, os.ModePerm)
|
||||
data, err := p.Yaml()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(dir, DefinitionFile), data, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Instead of rdeps, have a different tree for build deps.
|
||||
finalizerPath := p.Rel(FinalizerFile)
|
||||
if helpers.Exists(finalizerPath) { // copy finalizer file from the source tree
|
||||
helpers.CopyFile(finalizerPath, filepath.Join(dir, FinalizerFile))
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *InstallerRecipe) Load(path string) error {
|
||||
|
||||
// tmpfile, err := ioutil.TempFile("", "luet")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
r.SourcePath = path
|
||||
|
||||
//r.Tree().SetPackageSet(pkg.NewBoltDatabase(tmpfile.Name()))
|
||||
// TODO: Handle cleaning after? Cleanup implemented in GetPackageSet().Clean()
|
||||
|
||||
// the function that handles each file or dir
|
||||
var ff = func(currentpath string, info os.FileInfo, err error) error {
|
||||
|
||||
if info.Name() != DefinitionFile {
|
||||
return nil // Skip with no errors
|
||||
}
|
||||
|
||||
dat, err := ioutil.ReadFile(currentpath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading file "+currentpath)
|
||||
}
|
||||
pack, err := pkg.DefaultPackageFromYaml(dat)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading yaml "+currentpath)
|
||||
}
|
||||
|
||||
// Path is set only internally when tree is loaded from disk
|
||||
pack.SetPath(filepath.Dir(currentpath))
|
||||
_, err = r.Database.CreatePackage(&pack)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error creating package "+pack.GetName())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.Walk(path, ff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *InstallerRecipe) GetDatabase() pkg.PackageDatabase { return r.Database }
|
||||
func (r *InstallerRecipe) WithDatabase(d pkg.PackageDatabase) { r.Database = d }
|
||||
func (r *InstallerRecipe) GetSourcePath() string { return r.SourcePath }
|
@@ -19,5 +19,5 @@ import pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
// parses ebuilds (?) and generates data which is readable by the builder
|
||||
type Parser interface {
|
||||
Generate(string) (pkg.Tree, error) // Generate scannable luet tree (by builder)
|
||||
Generate(string) (pkg.PackageDatabase, error) // Generate scannable luet tree (by builder)
|
||||
}
|
||||
|
@@ -37,18 +37,13 @@ func NewGeneralRecipe(db pkg.PackageDatabase) Builder { return &Recipe{Database:
|
||||
|
||||
// Recipe is the "general" reciper for Trees
|
||||
type Recipe struct {
|
||||
PackageTree pkg.Tree
|
||||
Database pkg.PackageDatabase
|
||||
SourcePath string
|
||||
Database pkg.PackageDatabase
|
||||
}
|
||||
|
||||
func (r *Recipe) Save(path string) error {
|
||||
|
||||
for _, pid := range r.PackageTree.GetPackageSet().GetPackages() {
|
||||
|
||||
p, err := r.PackageTree.GetPackageSet().GetPackage(pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range r.Database.World() {
|
||||
dir := filepath.Join(path, p.GetCategory(), p.GetName(), p.GetVersion())
|
||||
os.MkdirAll(dir, os.ModePerm)
|
||||
data, err := p.Yaml()
|
||||
@@ -66,15 +61,15 @@ func (r *Recipe) Save(path string) error {
|
||||
|
||||
func (r *Recipe) Load(path string) error {
|
||||
|
||||
if r.Tree() == nil {
|
||||
r.PackageTree = NewDefaultTree()
|
||||
}
|
||||
|
||||
// tmpfile, err := ioutil.TempFile("", "luet")
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
r.Tree().SetPackageSet(r.Database)
|
||||
r.SourcePath = path
|
||||
|
||||
if r.Database == nil {
|
||||
r.Database = pkg.NewInMemoryDatabase(false)
|
||||
}
|
||||
|
||||
//r.Tree().SetPackageSet(pkg.NewBoltDatabase(tmpfile.Name()))
|
||||
// TODO: Handle cleaning after? Cleanup implemented in GetPackageSet().Clean()
|
||||
@@ -97,7 +92,7 @@ func (r *Recipe) Load(path string) error {
|
||||
|
||||
// Path is set only internally when tree is loaded from disk
|
||||
pack.SetPath(filepath.Dir(currentpath))
|
||||
_, err = r.Tree().GetPackageSet().CreatePackage(&pack)
|
||||
_, err = r.Database.CreatePackage(&pack)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error creating package "+pack.GetName())
|
||||
}
|
||||
@@ -112,5 +107,6 @@ func (r *Recipe) Load(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Recipe) Tree() pkg.Tree { return r.PackageTree }
|
||||
func (r *Recipe) WithTree(t pkg.Tree) { r.PackageTree = t }
|
||||
func (r *Recipe) GetDatabase() pkg.PackageDatabase { return r.Database }
|
||||
func (r *Recipe) WithDatabase(d pkg.PackageDatabase) { r.Database = d }
|
||||
func (r *Recipe) GetSourcePath() string { return r.SourcePath }
|
||||
|
@@ -48,13 +48,12 @@ var _ = Describe("Recipe", func() {
|
||||
tree, err := gb.Generate("../../tests/fixtures/overlay")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer func() {
|
||||
Expect(tree.GetPackageSet().Clean()).ToNot(HaveOccurred())
|
||||
Expect(tree.Clean()).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
Expect(len(tree.GetPackageSet().GetPackages())).To(Equal(10))
|
||||
Expect(len(tree.GetPackages())).To(Equal(10))
|
||||
|
||||
generalRecipe := NewGeneralRecipe(tree.GetPackageSet())
|
||||
generalRecipe.WithTree(tree)
|
||||
generalRecipe := NewGeneralRecipe(tree)
|
||||
err = generalRecipe.Save(tmpdir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
@@ -70,31 +69,27 @@ var _ = Describe("Recipe", func() {
|
||||
tree, err := gb.Generate("../../tests/fixtures/overlay")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer func() {
|
||||
Expect(tree.GetPackageSet().Clean()).ToNot(HaveOccurred())
|
||||
Expect(tree.Clean()).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
Expect(len(tree.GetPackageSet().GetPackages())).To(Equal(10))
|
||||
Expect(len(tree.GetPackages())).To(Equal(10))
|
||||
|
||||
generalRecipe := NewGeneralRecipe(tree.GetPackageSet())
|
||||
generalRecipe.WithTree(tree)
|
||||
generalRecipe := NewGeneralRecipe(tree)
|
||||
err = generalRecipe.Save(tmpdir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
db := pkg.NewInMemoryDatabase(false)
|
||||
generalRecipe = NewGeneralRecipe(db)
|
||||
|
||||
generalRecipe.WithTree(nil)
|
||||
Expect(generalRecipe.Tree()).To(BeNil())
|
||||
generalRecipe.WithDatabase(nil)
|
||||
Expect(generalRecipe.GetDatabase()).To(BeNil())
|
||||
|
||||
err = generalRecipe.Load(tmpdir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(10))
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(10))
|
||||
|
||||
for _, pid := range tree.GetPackageSet().GetPackages() {
|
||||
p, err := tree.GetPackageSet().GetPackage(pid)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
for _, p := range tree.World() {
|
||||
Expect(p.GetName()).To(ContainSubstring("pinentry"))
|
||||
}
|
||||
})
|
||||
@@ -110,10 +105,10 @@ var _ = Describe("Recipe", func() {
|
||||
tree, err := gb.Generate("../../tests/fixtures/overlay")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer func() {
|
||||
Expect(tree.GetPackageSet().Clean()).ToNot(HaveOccurred())
|
||||
Expect(tree.Clean()).ToNot(HaveOccurred())
|
||||
}()
|
||||
|
||||
Expect(len(tree.GetPackageSet().GetPackages())).To(Equal(10))
|
||||
Expect(len(tree.GetPackages())).To(Equal(10))
|
||||
|
||||
pack, err := tree.FindPackage(&pkg.DefaultPackage{
|
||||
Name: "pinentry",
|
||||
@@ -121,10 +116,8 @@ var _ = Describe("Recipe", func() {
|
||||
Category: "app-crypt",
|
||||
}) // Note: the definition depends on pinentry-base without an explicit version
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
world, err := tree.World()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
s := solver.NewSolver([]pkg.Package{}, world, tree.GetPackageSet())
|
||||
s := solver.NewSolver(pkg.NewInMemoryDatabase(false), tree, tree)
|
||||
solution, err := s.Install([]pkg.Package{pack})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(solution)).To(Equal(10))
|
||||
@@ -134,9 +127,9 @@ var _ = Describe("Recipe", func() {
|
||||
allSol = allSol + "\n" + sol.ToString()
|
||||
}
|
||||
|
||||
Expect(allSol).To(ContainSubstring("app-crypt/pinentry-base 1.0.0 installed: true"))
|
||||
Expect(allSol).To(ContainSubstring("app-crypt/pinentry 1.1.0-r2 installed: false"))
|
||||
Expect(allSol).To(ContainSubstring("app-crypt/pinentry 1.0.0-r2 installed: true"))
|
||||
Expect(allSol).To(ContainSubstring("app-crypt/pinentry-base 1.0.0 installed"))
|
||||
Expect(allSol).To(ContainSubstring("app-crypt/pinentry 1.1.0-r2 not installed"))
|
||||
Expect(allSol).To(ContainSubstring("app-crypt/pinentry 1.0.0-r2 installed"))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
224
pkg/tree/tree.go
224
pkg/tree/tree.go
@@ -1,224 +0,0 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
// Recipe is a builder imeplementation.
|
||||
|
||||
// It reads a Tree and spit it in human readable form (YAML), called recipe,
|
||||
// It also loads a tree (recipe) from a YAML (to a db, e.g. BoltDB), allowing to query it
|
||||
// with the solver, using the package object.
|
||||
package tree
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
)
|
||||
|
||||
func NewDefaultTree() pkg.Tree { return &DefaultTree{} }
|
||||
|
||||
type DefaultTree struct {
|
||||
sync.Mutex
|
||||
Packages pkg.PackageDatabase
|
||||
CacheWorld []pkg.Package
|
||||
}
|
||||
|
||||
func (gt *DefaultTree) GetPackageSet() pkg.PackageDatabase {
|
||||
return gt.Packages
|
||||
}
|
||||
|
||||
func (gt *DefaultTree) Prelude() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (gt *DefaultTree) SetPackageSet(s pkg.PackageDatabase) {
|
||||
gt.Packages = s
|
||||
}
|
||||
|
||||
func (gt *DefaultTree) World() ([]pkg.Package, error) {
|
||||
gt.Lock()
|
||||
defer gt.Unlock()
|
||||
if len(gt.CacheWorld) > 0 {
|
||||
return gt.CacheWorld, nil
|
||||
}
|
||||
packages := []pkg.Package{}
|
||||
for _, pid := range gt.GetPackageSet().GetPackages() {
|
||||
|
||||
p, err := gt.GetPackageSet().GetPackage(pid)
|
||||
if err != nil {
|
||||
return packages, err
|
||||
}
|
||||
packages = append(packages, p)
|
||||
}
|
||||
gt.CacheWorld = packages
|
||||
return packages, nil
|
||||
}
|
||||
|
||||
func (gt *DefaultTree) UpdateWorldPackage(p pkg.Package) {
|
||||
gt.Lock()
|
||||
defer gt.Unlock()
|
||||
//var CacheWorld []pkg.Package
|
||||
for _, pid := range gt.CacheWorld {
|
||||
if p.Matches(pid) {
|
||||
pid.Requires(p.GetRequires())
|
||||
pid.Conflicts(p.GetConflicts())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// FIXME: Dup in Packageset
|
||||
func (gt *DefaultTree) FindPackage(pack pkg.Package) (pkg.Package, error) {
|
||||
packages, err := gt.World()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pid := range packages {
|
||||
if pack.Matches(pid) {
|
||||
return pid, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("No package found")
|
||||
}
|
||||
|
||||
func (gb *DefaultTree) updatePackage(p pkg.Package) error {
|
||||
Debug("🔧 Calculating deps for", p.GetName())
|
||||
for i, r := range p.GetRequires() {
|
||||
foundPackage, err := gb.FindPackage(r)
|
||||
if err == nil {
|
||||
found, ok := foundPackage.(*pkg.DefaultPackage)
|
||||
if !ok {
|
||||
return errors.New("Simpleparser should deal only with DefaultPackages")
|
||||
}
|
||||
|
||||
p.GetRequires()[i] = found
|
||||
} else {
|
||||
Warning("Unmatched require for", r.GetFingerPrint())
|
||||
w, err := gb.World()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error while computing world")
|
||||
}
|
||||
packages, err := r.Expand(&w)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error while expanding to world")
|
||||
}
|
||||
if len(packages) == 0 {
|
||||
Warning("Could not expand")
|
||||
continue
|
||||
}
|
||||
|
||||
s := solver.NewSolver([]pkg.Package{}, []pkg.Package{}, gb.GetPackageSet())
|
||||
best := s.Best(packages)
|
||||
found, ok := best.(*pkg.DefaultPackage)
|
||||
if !ok {
|
||||
return errors.New("Simpleparser should deal only with DefaultPackages")
|
||||
}
|
||||
Info(":ok_hand: Automatically selected", found.GetName(), found.GetVersion())
|
||||
p.GetRequires()[i] = found
|
||||
}
|
||||
}
|
||||
|
||||
Debug("🔍 Walking conflicts for", p.GetName())
|
||||
for i, r := range p.GetConflicts() {
|
||||
foundPackage, err := gb.FindPackage(r)
|
||||
if err == nil {
|
||||
found, ok := foundPackage.(*pkg.DefaultPackage)
|
||||
if !ok {
|
||||
return errors.New("Simpleparser should deal only with DefaultPackages")
|
||||
}
|
||||
|
||||
p.GetConflicts()[i] = found
|
||||
} else {
|
||||
Warning("Unmatched conflict for", r.GetFingerPrint())
|
||||
w, err := gb.World()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error while computing world")
|
||||
}
|
||||
packages, err := r.Expand(&w)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error while expanding to world")
|
||||
}
|
||||
if len(packages) == 0 {
|
||||
Warning("Could not expand")
|
||||
continue
|
||||
}
|
||||
|
||||
s := solver.NewSolver([]pkg.Package{}, []pkg.Package{}, gb.GetPackageSet())
|
||||
best := s.Best(packages)
|
||||
found, ok := best.(*pkg.DefaultPackage)
|
||||
if !ok {
|
||||
return errors.New("Simpleparser should deal only with DefaultPackages")
|
||||
}
|
||||
p.GetConflicts()[i] = found
|
||||
}
|
||||
}
|
||||
Debug("💫 Finished processing", p.GetName())
|
||||
|
||||
if err := gb.GetPackageSet().UpdatePackage(p); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gb.UpdateWorldPackage(p)
|
||||
//Debug("Update done", p.GetName())
|
||||
// Debug("Triggering propagation", p.GetName())
|
||||
|
||||
// Debug(" "+p.GetName(), "Deps ")
|
||||
// for _, r := range p.GetRequires() {
|
||||
// if err := gb.updatePackage(r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
|
||||
// Debug("Walking conflicts for", p.GetName())
|
||||
// for _, r := range p.GetConflicts() {
|
||||
// if err := gb.updatePackage(r); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (gb *DefaultTree) depsWorker(i int, wg *sync.WaitGroup, c <-chan pkg.Package) error {
|
||||
defer wg.Done()
|
||||
|
||||
for p := range c {
|
||||
gb.updatePackage(p)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Search for deps/conflicts in db and replaces it with packages in the db
|
||||
func (t *DefaultTree) ResolveDeps(concurrency int) error {
|
||||
Spinner(32)
|
||||
defer SpinnerStop()
|
||||
all := make(chan pkg.Package)
|
||||
|
||||
var wg = new(sync.WaitGroup)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
wg.Add(1)
|
||||
go t.depsWorker(i, wg, all)
|
||||
}
|
||||
|
||||
err := t.GetPackageSet().GetAllPackages(all)
|
||||
close(all)
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
@@ -45,46 +45,38 @@ var _ = Describe("Tree", func() {
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/buildableseed")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(generalRecipe.Tree()).ToNot(BeNil()) // It should be populated back at this point
|
||||
|
||||
Expect(len(generalRecipe.Tree().GetPackageSet().GetPackages())).To(Equal(4))
|
||||
err = generalRecipe.Tree().ResolveDeps(1)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(len(generalRecipe.GetDatabase().World())).To(Equal(4))
|
||||
|
||||
D, err := generalRecipe.Tree().FindPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
|
||||
D, err := generalRecipe.GetDatabase().FindPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(D.GetRequires()[0].GetName()).To(Equal("c"))
|
||||
CfromD := D.GetRequires()[0]
|
||||
CfromD, err := generalRecipe.GetDatabase().FindPackage(D.GetRequires()[0])
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(CfromD.GetRequires()) != 0).To(BeTrue())
|
||||
Expect(CfromD.GetRequires()[0].GetName()).To(Equal("b"))
|
||||
|
||||
w, err := generalRecipe.Tree().World()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
s := solver.NewSolver([]pkg.Package{}, w, db)
|
||||
pack, err := generalRecipe.Tree().FindPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
|
||||
s := solver.NewSolver(pkg.NewInMemoryDatabase(false), generalRecipe.GetDatabase(), db)
|
||||
pack, err := generalRecipe.GetDatabase().FindPackage(&pkg.DefaultPackage{Name: "d", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
solution, err := s.Install([]pkg.Package{pack})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
solution = solution.Order(pack.GetFingerPrint())
|
||||
solution = solution.Order(generalRecipe.GetDatabase(), pack.GetFingerPrint())
|
||||
|
||||
Expect(solution[0].Package.GetName()).To(Equal("a"))
|
||||
Expect(solution[0].Package.Flagged()).To(BeTrue())
|
||||
Expect(solution[0].Value).To(BeFalse())
|
||||
|
||||
Expect(solution[1].Package.GetName()).To(Equal("b"))
|
||||
Expect(solution[1].Package.Flagged()).To(BeTrue())
|
||||
Expect(solution[1].Value).To(BeTrue())
|
||||
|
||||
Expect(solution[2].Package.GetName()).To(Equal("c"))
|
||||
Expect(solution[2].Package.Flagged()).To(BeTrue())
|
||||
Expect(solution[2].Value).To(BeTrue())
|
||||
|
||||
Expect(solution[3].Package.GetName()).To(Equal("d"))
|
||||
Expect(solution[3].Package.Flagged()).To(BeTrue())
|
||||
Expect(solution[3].Value).To(BeTrue())
|
||||
Expect(len(solution)).To(Equal(4))
|
||||
|
||||
@@ -92,15 +84,12 @@ var _ = Describe("Tree", func() {
|
||||
Expect(len(newsolution)).To(Equal(3))
|
||||
|
||||
Expect(newsolution[0].Package.GetName()).To(Equal("a"))
|
||||
Expect(newsolution[0].Package.Flagged()).To(BeTrue())
|
||||
Expect(newsolution[0].Value).To(BeFalse())
|
||||
|
||||
Expect(newsolution[1].Package.GetName()).To(Equal("b"))
|
||||
Expect(newsolution[1].Package.Flagged()).To(BeTrue())
|
||||
Expect(newsolution[1].Value).To(BeTrue())
|
||||
|
||||
Expect(newsolution[2].Package.GetName()).To(Equal("c"))
|
||||
Expect(newsolution[2].Package.Flagged()).To(BeTrue())
|
||||
Expect(newsolution[2].Value).To(BeTrue())
|
||||
|
||||
}
|
||||
|
17
tests/fixtures/includeimage/build.yaml
vendored
Normal file
17
tests/fixtures/includeimage/build.yaml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
requires:
|
||||
- category: "layer"
|
||||
name: "seed"
|
||||
version: "1.0"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- echo artifact43 > /marvin
|
||||
unpack: true
|
||||
includes:
|
||||
- marvin
|
||||
- ^/var$
|
||||
- ^\/var\/lib$
|
||||
- ^\/var\/lib\/udhcpd$
|
3
tests/fixtures/includeimage/definition.yaml
vendored
Normal file
3
tests/fixtures/includeimage/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
2
tests/fixtures/includeimage/seed/build.yaml
vendored
Normal file
2
tests/fixtures/includeimage/seed/build.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
image: alpine
|
||||
unpack: true
|
3
tests/fixtures/includeimage/seed/definition.yaml
vendored
Normal file
3
tests/fixtures/includeimage/seed/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "layer"
|
||||
name: "seed"
|
||||
version: "1.0"
|
10
tests/fixtures/upgrade/c/build.yaml
vendored
Normal file
10
tests/fixtures/upgrade/c/build.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo c > /c
|
||||
- echo c > /cd
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "a"
|
||||
version: ">=1.0"
|
3
tests/fixtures/upgrade/c/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade/c/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.0"
|
11
tests/fixtures/upgrade/cat/a/a/build.yaml
vendored
Normal file
11
tests/fixtures/upgrade/cat/a/a/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
8
tests/fixtures/upgrade/cat/a/a/definition.yaml
vendored
Normal file
8
tests/fixtures/upgrade/cat/a/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.1"
|
||||
requires:
|
||||
- category: "test2"
|
||||
name: "b"
|
||||
version: "1.0"
|
||||
|
9
tests/fixtures/upgrade/cat/b-1.1/build.yaml
vendored
Normal file
9
tests/fixtures/upgrade/cat/b-1.1/build.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
- chmod +x generate.sh
|
||||
steps:
|
||||
- echo artifact5 > /newc
|
||||
- echo artifact6 > /newnewc
|
||||
- ./generate.sh
|
3
tests/fixtures/upgrade/cat/b-1.1/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade/cat/b-1.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.1"
|
1
tests/fixtures/upgrade/cat/b-1.1/generate.sh
vendored
Normal file
1
tests/fixtures/upgrade/cat/b-1.1/generate.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
echo generated > /sonewc
|
9
tests/fixtures/upgrade/cat/b/build.yaml
vendored
Normal file
9
tests/fixtures/upgrade/cat/b/build.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
- chmod +x generate.sh
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- ./generate.sh
|
3
tests/fixtures/upgrade/cat/b/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade/cat/b/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
1
tests/fixtures/upgrade/cat/b/generate.sh
vendored
Normal file
1
tests/fixtures/upgrade/cat/b/generate.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
echo generated > /artifact42
|
13
vendor/github.com/cavaliercoder/grab/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/cavaliercoder/grab/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.10.x
|
||||
- 1.9.x
|
||||
- 1.8.x
|
||||
- 1.7.x
|
||||
|
||||
script: make check
|
||||
|
||||
env:
|
||||
- GOARCH=amd64
|
||||
- GOARCH=386
|
26
vendor/github.com/cavaliercoder/grab/LICENSE
generated
vendored
Normal file
26
vendor/github.com/cavaliercoder/grab/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
Copyright (c) 2017 Ryan Armstrong. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29
vendor/github.com/cavaliercoder/grab/Makefile
generated
vendored
Normal file
29
vendor/github.com/cavaliercoder/grab/Makefile
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
GO = go
|
||||
GOGET = $(GO) get -u
|
||||
|
||||
all: check lint
|
||||
|
||||
check:
|
||||
cd cmd/grab && $(MAKE) -B all
|
||||
$(GO) test -cover -race ./...
|
||||
|
||||
install:
|
||||
$(GO) install -v ./...
|
||||
|
||||
clean:
|
||||
$(GO) clean -x ./...
|
||||
rm -rvf ./.test*
|
||||
|
||||
lint:
|
||||
gofmt -l -e -s . || :
|
||||
go vet . || :
|
||||
golint . || :
|
||||
gocyclo -over 15 . || :
|
||||
misspell ./* || :
|
||||
|
||||
deps:
|
||||
$(GOGET) github.com/golang/lint/golint
|
||||
$(GOGET) github.com/fzipp/gocyclo
|
||||
$(GOGET) github.com/client9/misspell/cmd/misspell
|
||||
|
||||
.PHONY: all check install clean lint deps
|
127
vendor/github.com/cavaliercoder/grab/README.md
generated
vendored
Normal file
127
vendor/github.com/cavaliercoder/grab/README.md
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
# grab
|
||||
|
||||
[](https://godoc.org/github.com/cavaliercoder/grab) [](https://travis-ci.org/cavaliercoder/grab) [](https://goreportcard.com/report/github.com/cavaliercoder/grab)
|
||||
|
||||
*Downloading the internet, one goroutine at a time!*
|
||||
|
||||
$ go get github.com/cavaliercoder/grab
|
||||
|
||||
Grab is a Go package for downloading files from the internet with the following
|
||||
rad features:
|
||||
|
||||
* Monitor download progress concurrently
|
||||
* Auto-resume incomplete downloads
|
||||
* Guess filename from content header or URL path
|
||||
* Safely cancel downloads using context.Context
|
||||
* Validate downloads using checksums
|
||||
* Download batches of files concurrently
|
||||
* Apply rate limiters
|
||||
|
||||
Requires Go v1.7+
|
||||
|
||||
## Example
|
||||
|
||||
The following example downloads a PDF copy of the free eBook, "An Introduction
|
||||
to Programming in Go" into the current working directory.
|
||||
|
||||
```go
|
||||
resp, err := grab.Get(".", "http://www.golang-book.com/public/pdf/gobook.pdf")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Download saved to", resp.Filename)
|
||||
```
|
||||
|
||||
The following, more complete example allows for more granular control and
|
||||
periodically prints the download progress until it is complete.
|
||||
|
||||
The second time you run the example, it will auto-resume the previous download
|
||||
and exit sooner.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/cavaliercoder/grab"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// create client
|
||||
client := grab.NewClient()
|
||||
req, _ := grab.NewRequest(".", "http://www.golang-book.com/public/pdf/gobook.pdf")
|
||||
|
||||
// start download
|
||||
fmt.Printf("Downloading %v...\n", req.URL())
|
||||
resp := client.Do(req)
|
||||
fmt.Printf(" %v\n", resp.HTTPResponse.Status)
|
||||
|
||||
// start UI loop
|
||||
t := time.NewTicker(500 * time.Millisecond)
|
||||
defer t.Stop()
|
||||
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
fmt.Printf(" transferred %v / %v bytes (%.2f%%)\n",
|
||||
resp.BytesComplete(),
|
||||
resp.Size,
|
||||
100*resp.Progress())
|
||||
|
||||
case <-resp.Done:
|
||||
// download is complete
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
// check for errors
|
||||
if err := resp.Err(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Download failed: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Download saved to ./%v \n", resp.Filename)
|
||||
|
||||
// Output:
|
||||
// Downloading http://www.golang-book.com/public/pdf/gobook.pdf...
|
||||
// 200 OK
|
||||
// transferred 42970 / 2893557 bytes (1.49%)
|
||||
// transferred 1207474 / 2893557 bytes (41.73%)
|
||||
// transferred 2758210 / 2893557 bytes (95.32%)
|
||||
// Download saved to ./gobook.pdf
|
||||
}
|
||||
```
|
||||
|
||||
## Design trade-offs
|
||||
|
||||
The primary use case for Grab is to concurrently downloading thousands of large
|
||||
files from remote file repositories where the remote files are immutable.
|
||||
Examples include operating system package repositories or ISO libraries.
|
||||
|
||||
Grab aims to provide robust, sane defaults. These are usually determined using
|
||||
the HTTP specifications, or by mimicking the behavior of common web clients like
|
||||
cURL, wget and common web browsers.
|
||||
|
||||
Grab aims to be stateless. The only state that exists is the remote files you
|
||||
wish to download and the local copy which may be completed, partially completed
|
||||
or not yet created. The advantage to this is that the local file system is not
|
||||
cluttered unnecessarily with addition state files (like a `.crdownload` file).
|
||||
The disadvantage of this approach is that grab must make assumptions about the
|
||||
local and remote state; specifically, that they have not been modified by
|
||||
another program.
|
||||
|
||||
If the local or remote file are modified outside of grab, and you download the
|
||||
file again with resuming enabled, the local file will likely become corrupted.
|
||||
In this case, you might consider making remote files immutable, or disabling
|
||||
resume.
|
||||
|
||||
Grab aims to enable best-in-class functionality for more complex features
|
||||
through extensible interfaces, rather than reimplementation. For example,
|
||||
you can provide your own Hash algorithm to compute file checksums, or your
|
||||
own rate limiter implementation (with all the associated trade-offs) to rate
|
||||
limit downloads.
|
506
vendor/github.com/cavaliercoder/grab/client.go
generated
vendored
Normal file
506
vendor/github.com/cavaliercoder/grab/client.go
generated
vendored
Normal file
@@ -0,0 +1,506 @@
|
||||
package grab
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Client is a file download client.
|
||||
//
|
||||
// Clients are safe for concurrent use by multiple goroutines.
|
||||
type Client struct {
|
||||
// HTTPClient specifies the http.Client which will be used for communicating
|
||||
// with the remote server during the file transfer.
|
||||
HTTPClient *http.Client
|
||||
|
||||
// UserAgent specifies the User-Agent string which will be set in the
|
||||
// headers of all requests made by this client.
|
||||
//
|
||||
// The user agent string may be overridden in the headers of each request.
|
||||
UserAgent string
|
||||
|
||||
// BufferSize specifies the size in bytes of the buffer that is used for
|
||||
// transferring all requested files. Larger buffers may result in faster
|
||||
// throughput but will use more memory and result in less frequent updates
|
||||
// to the transfer progress statistics. The BufferSize of each request can
|
||||
// be overridden on each Request object. Default: 32KB.
|
||||
BufferSize int
|
||||
}
|
||||
|
||||
// NewClient returns a new file download Client, using default configuration.
|
||||
func NewClient() *Client {
|
||||
return &Client{
|
||||
UserAgent: "grab",
|
||||
HTTPClient: &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultClient is the default client and is used by all Get convenience
|
||||
// functions.
|
||||
var DefaultClient = NewClient()
|
||||
|
||||
// Do sends a file transfer request and returns a file transfer response,
|
||||
// following policy (e.g. redirects, cookies, auth) as configured on the
|
||||
// client's HTTPClient.
|
||||
//
|
||||
// Like http.Get, Do blocks while the transfer is initiated, but returns as soon
|
||||
// as the transfer has started transferring in a background goroutine, or if it
|
||||
// failed early.
|
||||
//
|
||||
// An error is returned via Response.Err if caused by client policy (such as
|
||||
// CheckRedirect), or if there was an HTTP protocol or IO error. Response.Err
|
||||
// will block the caller until the transfer is completed, successfully or
|
||||
// otherwise.
|
||||
func (c *Client) Do(req *Request) *Response {
|
||||
// cancel will be called on all code-paths via closeResponse
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
resp := &Response{
|
||||
Request: req,
|
||||
Start: time.Now(),
|
||||
Done: make(chan struct{}, 0),
|
||||
Filename: req.Filename,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
bufferSize: req.BufferSize,
|
||||
}
|
||||
if resp.bufferSize == 0 {
|
||||
// default to Client.BufferSize
|
||||
resp.bufferSize = c.BufferSize
|
||||
}
|
||||
|
||||
// Run state-machine while caller is blocked to initialize the file transfer.
|
||||
// Must never transition to the copyFile state - this happens next in another
|
||||
// goroutine.
|
||||
c.run(resp, c.statFileInfo)
|
||||
|
||||
// Run copyFile in a new goroutine. copyFile will no-op if the transfer is
|
||||
// already complete or failed.
|
||||
go c.run(resp, c.copyFile)
|
||||
return resp
|
||||
}
|
||||
|
||||
// DoChannel executes all requests sent through the given Request channel, one
|
||||
// at a time, until it is closed by another goroutine. The caller is blocked
|
||||
// until the Request channel is closed and all transfers have completed. All
|
||||
// responses are sent through the given Response channel as soon as they are
|
||||
// received from the remote servers and can be used to track the progress of
|
||||
// each download.
|
||||
//
|
||||
// Slow Response receivers will cause a worker to block and therefore delay the
|
||||
// start of the transfer for an already initiated connection - potentially
|
||||
// causing a server timeout. It is the caller's responsibility to ensure a
|
||||
// sufficient buffer size is used for the Response channel to prevent this.
|
||||
//
|
||||
// If an error occurs during any of the file transfers it will be accessible via
|
||||
// the associated Response.Err function.
|
||||
func (c *Client) DoChannel(reqch <-chan *Request, respch chan<- *Response) {
|
||||
// TODO: enable cancelling of batch jobs
|
||||
for req := range reqch {
|
||||
resp := c.Do(req)
|
||||
respch <- resp
|
||||
<-resp.Done
|
||||
}
|
||||
}
|
||||
|
||||
// DoBatch executes all the given requests using the given number of concurrent
|
||||
// workers. Control is passed back to the caller as soon as the workers are
|
||||
// initiated.
|
||||
//
|
||||
// If the requested number of workers is less than one, a worker will be created
|
||||
// for every request. I.e. all requests will be executed concurrently.
|
||||
//
|
||||
// If an error occurs during any of the file transfers it will be accessible via
|
||||
// call to the associated Response.Err.
|
||||
//
|
||||
// The returned Response channel is closed only after all of the given Requests
|
||||
// have completed, successfully or otherwise.
|
||||
func (c *Client) DoBatch(workers int, requests ...*Request) <-chan *Response {
|
||||
if workers < 1 {
|
||||
workers = len(requests)
|
||||
}
|
||||
reqch := make(chan *Request, len(requests))
|
||||
respch := make(chan *Response, len(requests))
|
||||
wg := sync.WaitGroup{}
|
||||
for i := 0; i < workers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
c.DoChannel(reqch, respch)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
// queue requests
|
||||
go func() {
|
||||
for _, req := range requests {
|
||||
reqch <- req
|
||||
}
|
||||
close(reqch)
|
||||
wg.Wait()
|
||||
close(respch)
|
||||
}()
|
||||
return respch
|
||||
}
|
||||
|
||||
// An stateFunc is an action that mutates the state of a Response and returns
|
||||
// the next stateFunc to be called.
|
||||
type stateFunc func(*Response) stateFunc
|
||||
|
||||
// run calls the given stateFunc function and all subsequent returned stateFuncs
|
||||
// until a stateFunc returns nil or the Response.ctx is canceled. Each stateFunc
|
||||
// should mutate the state of the given Response until it has completed
|
||||
// downloading or failed.
|
||||
func (c *Client) run(resp *Response, f stateFunc) {
|
||||
for {
|
||||
select {
|
||||
case <-resp.ctx.Done():
|
||||
if resp.IsComplete() {
|
||||
return
|
||||
}
|
||||
resp.err = resp.ctx.Err()
|
||||
f = c.closeResponse
|
||||
|
||||
default:
|
||||
// keep working
|
||||
}
|
||||
if f = f(resp); f == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// statFileInfo retrieves FileInfo for any local file matching
|
||||
// Response.Filename.
|
||||
//
|
||||
// If the file does not exist, is a directory, or its name is unknown the next
|
||||
// stateFunc is headRequest.
|
||||
//
|
||||
// If the file exists, Response.fi is set and the next stateFunc is
|
||||
// validateLocal.
|
||||
//
|
||||
// If an error occurs, the next stateFunc is closeResponse.
|
||||
func (c *Client) statFileInfo(resp *Response) stateFunc {
|
||||
if resp.Filename == "" {
|
||||
return c.headRequest
|
||||
}
|
||||
fi, err := os.Stat(resp.Filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return c.headRequest
|
||||
}
|
||||
resp.err = err
|
||||
return c.closeResponse
|
||||
}
|
||||
if fi.IsDir() {
|
||||
resp.Filename = ""
|
||||
return c.headRequest
|
||||
}
|
||||
resp.fi = fi
|
||||
return c.validateLocal
|
||||
}
|
||||
|
||||
// validateLocal compares a local copy of the downloaded file to the remote
|
||||
// file.
|
||||
//
|
||||
// An error is returned if the local file is larger than the remote file, or
|
||||
// Request.SkipExisting is true.
|
||||
//
|
||||
// If the existing file matches the length of the remote file, the next
|
||||
// stateFunc is checksumFile.
|
||||
//
|
||||
// If the local file is smaller than the remote file and the remote server is
|
||||
// known to support ranged requests, the next stateFunc is getRequest.
|
||||
func (c *Client) validateLocal(resp *Response) stateFunc {
|
||||
if resp.Request.SkipExisting {
|
||||
resp.err = ErrFileExists
|
||||
return c.closeResponse
|
||||
}
|
||||
|
||||
// determine expected file size
|
||||
size := resp.Request.Size
|
||||
if size == 0 && resp.HTTPResponse != nil {
|
||||
size = resp.HTTPResponse.ContentLength
|
||||
}
|
||||
if size == 0 {
|
||||
return c.headRequest
|
||||
}
|
||||
|
||||
if size == resp.fi.Size() {
|
||||
resp.DidResume = true
|
||||
resp.bytesResumed = resp.fi.Size()
|
||||
return c.checksumFile
|
||||
}
|
||||
|
||||
if resp.Request.NoResume {
|
||||
return c.getRequest
|
||||
}
|
||||
|
||||
if size < resp.fi.Size() {
|
||||
resp.err = ErrBadLength
|
||||
return c.closeResponse
|
||||
}
|
||||
|
||||
if resp.CanResume {
|
||||
resp.Request.HTTPRequest.Header.Set(
|
||||
"Range",
|
||||
fmt.Sprintf("bytes=%d-", resp.fi.Size()))
|
||||
resp.DidResume = true
|
||||
resp.bytesResumed = resp.fi.Size()
|
||||
return c.getRequest
|
||||
}
|
||||
return c.headRequest
|
||||
}
|
||||
|
||||
func (c *Client) checksumFile(resp *Response) stateFunc {
|
||||
if resp.Request.hash == nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
if resp.Filename == "" {
|
||||
panic("filename not set")
|
||||
}
|
||||
req := resp.Request
|
||||
|
||||
// compare checksum
|
||||
var sum []byte
|
||||
sum, resp.err = checksum(req.Context(), resp.Filename, req.hash)
|
||||
if resp.err != nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
if !bytes.Equal(sum, req.checksum) {
|
||||
resp.err = ErrBadChecksum
|
||||
if req.deleteOnError {
|
||||
if err := os.Remove(resp.Filename); err != nil {
|
||||
// err should be os.PathError and include file path
|
||||
resp.err = fmt.Errorf(
|
||||
"cannot remove downloaded file with checksum mismatch: %v",
|
||||
err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return c.closeResponse
|
||||
}
|
||||
|
||||
// doHTTPRequest sends a HTTP Request and returns the response
|
||||
func (c *Client) doHTTPRequest(req *http.Request) (*http.Response, error) {
|
||||
if c.UserAgent != "" && req.Header.Get("User-Agent") == "" {
|
||||
req.Header.Set("User-Agent", c.UserAgent)
|
||||
}
|
||||
return c.HTTPClient.Do(req)
|
||||
}
|
||||
|
||||
func (c *Client) headRequest(resp *Response) stateFunc {
|
||||
if resp.optionsKnown {
|
||||
return c.getRequest
|
||||
}
|
||||
resp.optionsKnown = true
|
||||
|
||||
if resp.Request.NoResume {
|
||||
return c.getRequest
|
||||
}
|
||||
|
||||
if resp.Filename != "" && resp.fi == nil {
|
||||
// destination path is already known and does not exist
|
||||
return c.getRequest
|
||||
}
|
||||
|
||||
hreq := new(http.Request)
|
||||
*hreq = *resp.Request.HTTPRequest
|
||||
hreq.Method = "HEAD"
|
||||
|
||||
resp.HTTPResponse, resp.err = c.doHTTPRequest(hreq)
|
||||
if resp.err != nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
resp.HTTPResponse.Body.Close()
|
||||
|
||||
if resp.HTTPResponse.StatusCode != http.StatusOK {
|
||||
return c.getRequest
|
||||
}
|
||||
|
||||
return c.readResponse
|
||||
}
|
||||
|
||||
func (c *Client) getRequest(resp *Response) stateFunc {
|
||||
resp.HTTPResponse, resp.err = c.doHTTPRequest(resp.Request.HTTPRequest)
|
||||
if resp.err != nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
|
||||
// check status code
|
||||
if !resp.Request.IgnoreBadStatusCodes {
|
||||
if resp.HTTPResponse.StatusCode < 200 || resp.HTTPResponse.StatusCode > 299 {
|
||||
resp.err = StatusCodeError(resp.HTTPResponse.StatusCode)
|
||||
return c.closeResponse
|
||||
}
|
||||
}
|
||||
|
||||
return c.readResponse
|
||||
}
|
||||
|
||||
func (c *Client) readResponse(resp *Response) stateFunc {
|
||||
if resp.HTTPResponse == nil {
|
||||
panic("Response.HTTPResponse is not ready")
|
||||
}
|
||||
|
||||
// check expected size
|
||||
resp.Size = resp.bytesResumed + resp.HTTPResponse.ContentLength
|
||||
if resp.HTTPResponse.ContentLength > 0 && resp.Request.Size > 0 {
|
||||
if resp.Request.Size != resp.Size {
|
||||
resp.err = ErrBadLength
|
||||
return c.closeResponse
|
||||
}
|
||||
}
|
||||
|
||||
// check filename
|
||||
if resp.Filename == "" {
|
||||
filename, err := guessFilename(resp.HTTPResponse)
|
||||
if err != nil {
|
||||
resp.err = err
|
||||
return c.closeResponse
|
||||
}
|
||||
// Request.Filename will be empty or a directory
|
||||
resp.Filename = filepath.Join(resp.Request.Filename, filename)
|
||||
}
|
||||
|
||||
if resp.requestMethod() == "HEAD" {
|
||||
if resp.HTTPResponse.Header.Get("Accept-Ranges") == "bytes" {
|
||||
resp.CanResume = true
|
||||
}
|
||||
return c.statFileInfo
|
||||
}
|
||||
return c.openWriter
|
||||
}
|
||||
|
||||
// openWriter opens the destination file for writing and seeks to the location
|
||||
// from whence the file transfer will resume.
|
||||
//
|
||||
// Requires that Response.Filename and resp.DidResume are already be set.
|
||||
func (c *Client) openWriter(resp *Response) stateFunc {
|
||||
if !resp.Request.NoCreateDirectories {
|
||||
resp.err = mkdirp(resp.Filename)
|
||||
if resp.err != nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
}
|
||||
|
||||
// compute write flags
|
||||
flag := os.O_CREATE | os.O_WRONLY
|
||||
if resp.fi != nil {
|
||||
if resp.DidResume {
|
||||
flag = os.O_APPEND | os.O_WRONLY
|
||||
} else {
|
||||
flag = os.O_TRUNC | os.O_WRONLY
|
||||
}
|
||||
}
|
||||
|
||||
// open file
|
||||
f, err := os.OpenFile(resp.Filename, flag, 0644)
|
||||
if err != nil {
|
||||
resp.err = err
|
||||
return c.closeResponse
|
||||
}
|
||||
resp.writer = f
|
||||
|
||||
// seek to start or end
|
||||
whence := os.SEEK_SET
|
||||
if resp.bytesResumed > 0 {
|
||||
whence = os.SEEK_END
|
||||
}
|
||||
_, resp.err = f.Seek(0, whence)
|
||||
if resp.err != nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
|
||||
// init transfer
|
||||
if resp.bufferSize < 1 {
|
||||
resp.bufferSize = 32 * 1024
|
||||
}
|
||||
b := make([]byte, resp.bufferSize)
|
||||
resp.transfer = newTransfer(
|
||||
resp.Request.Context(),
|
||||
resp.Request.RateLimiter,
|
||||
resp.writer,
|
||||
resp.HTTPResponse.Body,
|
||||
b)
|
||||
|
||||
// next step is copyFile, but this will be called later in another goroutine
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy transfers content for a HTTP connection established via Client.do()
|
||||
func (c *Client) copyFile(resp *Response) stateFunc {
|
||||
if resp.IsComplete() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// run BeforeCopy hook
|
||||
if f := resp.Request.BeforeCopy; f != nil {
|
||||
resp.err = f(resp)
|
||||
if resp.err != nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
}
|
||||
|
||||
if resp.transfer == nil {
|
||||
panic("developer error: Response.transfer is not initialized")
|
||||
}
|
||||
go resp.watchBps()
|
||||
_, resp.err = resp.transfer.copy()
|
||||
if resp.err != nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
closeWriter(resp)
|
||||
|
||||
// set timestamp
|
||||
if !resp.Request.IgnoreRemoteTime {
|
||||
resp.err = setLastModified(resp.HTTPResponse, resp.Filename)
|
||||
if resp.err != nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
}
|
||||
|
||||
// run AfterCopy hook
|
||||
if f := resp.Request.AfterCopy; f != nil {
|
||||
resp.err = f(resp)
|
||||
if resp.err != nil {
|
||||
return c.closeResponse
|
||||
}
|
||||
}
|
||||
|
||||
return c.checksumFile
|
||||
}
|
||||
|
||||
func closeWriter(resp *Response) {
|
||||
if resp.writer != nil {
|
||||
resp.writer.Close()
|
||||
resp.writer = nil
|
||||
}
|
||||
}
|
||||
|
||||
// close finalizes the Response
|
||||
func (c *Client) closeResponse(resp *Response) stateFunc {
|
||||
if resp.IsComplete() {
|
||||
panic("response already closed")
|
||||
}
|
||||
|
||||
resp.fi = nil
|
||||
closeWriter(resp)
|
||||
resp.closeResponseBody()
|
||||
|
||||
resp.End = time.Now()
|
||||
close(resp.Done)
|
||||
if resp.cancel != nil {
|
||||
resp.cancel()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
63
vendor/github.com/cavaliercoder/grab/doc.go
generated
vendored
Normal file
63
vendor/github.com/cavaliercoder/grab/doc.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
Package grab provides a HTTP download manager implementation.
|
||||
|
||||
Get is the most simple way to download a file:
|
||||
|
||||
resp, err := grab.Get("/tmp", "http://example.com/example.zip")
|
||||
// ...
|
||||
|
||||
Get will download the given URL and save it to the given destination directory.
|
||||
The destination filename will be determined automatically by grab using
|
||||
Content-Disposition headers returned by the remote server, or by inspecting the
|
||||
requested URL path.
|
||||
|
||||
An empty destination string or "." means the transfer will be stored in the
|
||||
current working directory.
|
||||
|
||||
If a destination file already exists, grab will assume it is a complete or
|
||||
partially complete download of the requested file. If the remote server supports
|
||||
resuming interrupted downloads, grab will resume downloading from the end of the
|
||||
partial file. If the server does not support resumed downloads, the file will be
|
||||
retransferred in its entirety. If the file is already complete, grab will return
|
||||
successfully.
|
||||
|
||||
For control over the HTTP client, destination path, auto-resume, checksum
|
||||
validation and other settings, create a Client:
|
||||
|
||||
client := grab.NewClient()
|
||||
client.HTTPClient.Transport.DisableCompression = true
|
||||
|
||||
req, err := grab.NewRequest("/tmp", "http://example.com/example.zip")
|
||||
// ...
|
||||
req.NoResume = true
|
||||
req.HTTPRequest.Header.Set("Authorization", "Basic YWxhZGRpbjpvcGVuc2VzYW1l")
|
||||
|
||||
resp := client.Do(req)
|
||||
// ...
|
||||
|
||||
You can monitor the progress of downloads while they are transferring:
|
||||
|
||||
client := grab.NewClient()
|
||||
req, err := grab.NewRequest("", "http://example.com/example.zip")
|
||||
// ...
|
||||
resp := client.Do(req)
|
||||
|
||||
t := time.NewTicker(time.Second)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
fmt.Printf("%.02f%% complete\n", resp.Progress())
|
||||
|
||||
case <-resp.Done:
|
||||
if err := resp.Err(); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
// ...
|
||||
return
|
||||
}
|
||||
}
|
||||
*/
|
||||
package grab
|
42
vendor/github.com/cavaliercoder/grab/error.go
generated
vendored
Normal file
42
vendor/github.com/cavaliercoder/grab/error.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package grab
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBadLength indicates that the server response or an existing file does
|
||||
// not match the expected content length.
|
||||
ErrBadLength = errors.New("bad content length")
|
||||
|
||||
// ErrBadChecksum indicates that a downloaded file failed to pass checksum
|
||||
// validation.
|
||||
ErrBadChecksum = errors.New("checksum mismatch")
|
||||
|
||||
// ErrNoFilename indicates that a reasonable filename could not be
|
||||
// automatically determined using the URL or response headers from a server.
|
||||
ErrNoFilename = errors.New("no filename could be determined")
|
||||
|
||||
// ErrNoTimestamp indicates that a timestamp could not be automatically
|
||||
// determined using the response headers from the remote server.
|
||||
ErrNoTimestamp = errors.New("no timestamp could be determined for the remote file")
|
||||
|
||||
// ErrFileExists indicates that the destination path already exists.
|
||||
ErrFileExists = errors.New("file exists")
|
||||
)
|
||||
|
||||
// StatusCodeError indicates that the server response had a status code that
|
||||
// was not in the 200-299 range (after following any redirects).
|
||||
type StatusCodeError int
|
||||
|
||||
func (err StatusCodeError) Error() string {
|
||||
return fmt.Sprintf("server returned %d %s", err, http.StatusText(int(err)))
|
||||
}
|
||||
|
||||
// IsStatusCodeError returns true if the given error is of type StatusCodeError.
|
||||
func IsStatusCodeError(err error) bool {
|
||||
_, ok := err.(StatusCodeError)
|
||||
return ok
|
||||
}
|
64
vendor/github.com/cavaliercoder/grab/grab.go
generated
vendored
Normal file
64
vendor/github.com/cavaliercoder/grab/grab.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package grab
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Get sends a HTTP request and downloads the content of the requested URL to
|
||||
// the given destination file path. The caller is blocked until the download is
|
||||
// completed, successfully or otherwise.
|
||||
//
|
||||
// An error is returned if caused by client policy (such as CheckRedirect), or
|
||||
// if there was an HTTP protocol or IO error.
|
||||
//
|
||||
// For non-blocking calls or control over HTTP client headers, redirect policy,
|
||||
// and other settings, create a Client instead.
|
||||
func Get(dst, urlStr string) (*Response, error) {
|
||||
req, err := NewRequest(dst, urlStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp := DefaultClient.Do(req)
|
||||
return resp, resp.Err()
|
||||
}
|
||||
|
||||
// GetBatch sends multiple HTTP requests and downloads the content of the
|
||||
// requested URLs to the given destination directory using the given number of
|
||||
// concurrent worker goroutines.
|
||||
//
|
||||
// The Response for each requested URL is sent through the returned Response
|
||||
// channel, as soon as a worker receives a response from the remote server. The
|
||||
// Response can then be used to track the progress of the download while it is
|
||||
// in progress.
|
||||
//
|
||||
// The returned Response channel will be closed by Grab, only once all downloads
|
||||
// have completed or failed.
|
||||
//
|
||||
// If an error occurs during any download, it will be available via call to the
|
||||
// associated Response.Err.
|
||||
//
|
||||
// For control over HTTP client headers, redirect policy, and other settings,
|
||||
// create a Client instead.
|
||||
func GetBatch(workers int, dst string, urlStrs ...string) (<-chan *Response, error) {
|
||||
fi, err := os.Stat(dst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
return nil, fmt.Errorf("destination is not a directory")
|
||||
}
|
||||
|
||||
reqs := make([]*Request, len(urlStrs))
|
||||
for i := 0; i < len(urlStrs); i++ {
|
||||
req, err := NewRequest(dst, urlStrs[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqs[i] = req
|
||||
}
|
||||
|
||||
ch := DefaultClient.DoBatch(workers, reqs...)
|
||||
return ch, nil
|
||||
}
|
12
vendor/github.com/cavaliercoder/grab/rate_limiter.go
generated
vendored
Normal file
12
vendor/github.com/cavaliercoder/grab/rate_limiter.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
package grab
|
||||
|
||||
import "context"
|
||||
|
||||
// RateLimiter is an interface that must be satisfied by any third-party rate
|
||||
// limiters that may be used to limit download transfer speeds.
|
||||
//
|
||||
// A recommended token bucket implementation can be found at
|
||||
// https://godoc.org/golang.org/x/time/rate#Limiter.
|
||||
type RateLimiter interface {
|
||||
WaitN(ctx context.Context, n int) (err error)
|
||||
}
|
172
vendor/github.com/cavaliercoder/grab/request.go
generated
vendored
Normal file
172
vendor/github.com/cavaliercoder/grab/request.go
generated
vendored
Normal file
@@ -0,0 +1,172 @@
|
||||
package grab
|
||||
|
||||
import (
|
||||
"context"
|
||||
"hash"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// A Hook is a user provided callback function that can be called by grab at
|
||||
// various stages of a requests lifecycle. If a hook returns an error, the
|
||||
// associated request is canceled and the same error is returned on the Response
|
||||
// object.
|
||||
//
|
||||
// Hook functions are called synchronously and should never block unnecessarily.
|
||||
// Response methods that block until a download is complete, such as
|
||||
// Response.Err, Response.Cancel or Response.Wait will deadlock. To cancel a
|
||||
// download from a callback, simply return a non-nil error.
|
||||
type Hook func(*Response) error
|
||||
|
||||
// A Request represents an HTTP file transfer request to be sent by a Client.
|
||||
type Request struct {
|
||||
// Label is an arbitrary string which may used to label a Request with a
|
||||
// user friendly name.
|
||||
Label string
|
||||
|
||||
// Tag is an arbitrary interface which may be used to relate a Request to
|
||||
// other data.
|
||||
Tag interface{}
|
||||
|
||||
// HTTPRequest specifies the http.Request to be sent to the remote server to
|
||||
// initiate a file transfer. It includes request configuration such as URL,
|
||||
// protocol version, HTTP method, request headers and authentication.
|
||||
HTTPRequest *http.Request
|
||||
|
||||
// Filename specifies the path where the file transfer will be stored in
|
||||
// local storage. If Filename is empty or a directory, the true Filename will
|
||||
// be resolved using Content-Disposition headers or the request URL.
|
||||
//
|
||||
// An empty string means the transfer will be stored in the current working
|
||||
// directory.
|
||||
Filename string
|
||||
|
||||
// SkipExisting specifies that ErrFileExists should be returned if the
|
||||
// destination path already exists. The existing file will not be checked for
|
||||
// completeness.
|
||||
SkipExisting bool
|
||||
|
||||
// NoResume specifies that a partially completed download will be restarted
|
||||
// without attempting to resume any existing file. If the download is already
|
||||
// completed in full, it will not be restarted.
|
||||
NoResume bool
|
||||
|
||||
// NoCreateDirectories specifies that any missing directories in the given
|
||||
// Filename path should not be created automatically, if they do not already
|
||||
// exist.
|
||||
NoCreateDirectories bool
|
||||
|
||||
// IgnoreBadStatusCodes specifies that grab should accept any status code in
|
||||
// the response from the remote server. Otherwise, grab expects the response
|
||||
// status code to be within the 2XX range (after following redirects).
|
||||
IgnoreBadStatusCodes bool
|
||||
|
||||
// IgnoreRemoteTime specifies that grab should not attempt to set the
|
||||
// timestamp of the local file to match the remote file.
|
||||
IgnoreRemoteTime bool
|
||||
|
||||
// Size specifies the expected size of the file transfer if known. If the
|
||||
// server response size does not match, the transfer is cancelled and
|
||||
// ErrBadLength returned.
|
||||
Size int64
|
||||
|
||||
// BufferSize specifies the size in bytes of the buffer that is used for
|
||||
// transferring the requested file. Larger buffers may result in faster
|
||||
// throughput but will use more memory and result in less frequent updates
|
||||
// to the transfer progress statistics. If a RateLimiter is configured,
|
||||
// BufferSize should be much lower than the rate limit. Default: 32KB.
|
||||
BufferSize int
|
||||
|
||||
// RateLimiter allows the transfer rate of a download to be limited. The given
|
||||
// Request.BufferSize determines how frequently the RateLimiter will be
|
||||
// polled.
|
||||
RateLimiter RateLimiter
|
||||
|
||||
// BeforeCopy is a user provided callback that is called immediately before
|
||||
// a request starts downloading. If BeforeCopy returns an error, the request
|
||||
// is cancelled and the same error is returned on the Response object.
|
||||
BeforeCopy Hook
|
||||
|
||||
// AfterCopy is a user provided callback that is called immediately after a
|
||||
// request has finished downloading, before checksum validation and closure.
|
||||
// This hook is only called if the transfer was successful. If AfterCopy
|
||||
// returns an error, the request is canceled and the same error is returned on
|
||||
// the Response object.
|
||||
AfterCopy Hook
|
||||
|
||||
// hash, checksum and deleteOnError - set via SetChecksum.
|
||||
hash hash.Hash
|
||||
checksum []byte
|
||||
deleteOnError bool
|
||||
|
||||
// Context for cancellation and timeout - set via WithContext
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// NewRequest returns a new file transfer Request suitable for use with
|
||||
// Client.Do.
|
||||
func NewRequest(dst, urlStr string) (*Request, error) {
|
||||
if dst == "" {
|
||||
dst = "."
|
||||
}
|
||||
req, err := http.NewRequest("GET", urlStr, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Request{
|
||||
HTTPRequest: req,
|
||||
Filename: dst,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Context returns the request's context. To change the context, use
|
||||
// WithContext.
|
||||
//
|
||||
// The returned context is always non-nil; it defaults to the background
|
||||
// context.
|
||||
//
|
||||
// The context controls cancelation.
|
||||
func (r *Request) Context() context.Context {
|
||||
if r.ctx != nil {
|
||||
return r.ctx
|
||||
}
|
||||
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
// WithContext returns a shallow copy of r with its context changed
|
||||
// to ctx. The provided ctx must be non-nil.
|
||||
func (r *Request) WithContext(ctx context.Context) *Request {
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
r2 := new(Request)
|
||||
*r2 = *r
|
||||
r2.ctx = ctx
|
||||
r2.HTTPRequest = r2.HTTPRequest.WithContext(ctx)
|
||||
return r2
|
||||
}
|
||||
|
||||
// URL returns the URL to be downloaded.
|
||||
func (r *Request) URL() *url.URL {
|
||||
return r.HTTPRequest.URL
|
||||
}
|
||||
|
||||
// SetChecksum sets the desired hashing algorithm and checksum value to validate
|
||||
// a downloaded file. Once the download is complete, the given hashing algorithm
|
||||
// will be used to compute the actual checksum of the downloaded file. If the
|
||||
// checksums do not match, an error will be returned by the associated
|
||||
// Response.Err method.
|
||||
//
|
||||
// If deleteOnError is true, the downloaded file will be deleted automatically
|
||||
// if it fails checksum validation.
|
||||
//
|
||||
// To prevent corruption of the computed checksum, the given hash must not be
|
||||
// used by any other request or goroutines.
|
||||
//
|
||||
// To disable checksum validation, call SetChecksum with a nil hash.
|
||||
func (r *Request) SetChecksum(h hash.Hash, sum []byte, deleteOnError bool) {
|
||||
r.hash = h
|
||||
r.checksum = sum
|
||||
r.deleteOnError = deleteOnError
|
||||
}
|
224
vendor/github.com/cavaliercoder/grab/response.go
generated
vendored
Normal file
224
vendor/github.com/cavaliercoder/grab/response.go
generated
vendored
Normal file
@@ -0,0 +1,224 @@
|
||||
package grab
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Response represents the response to a completed or in-progress download
|
||||
// request.
|
||||
//
|
||||
// A response may be returned as soon a HTTP response is received from a remote
|
||||
// server, but before the body content has started transferring.
|
||||
//
|
||||
// All Response method calls are thread-safe.
|
||||
type Response struct {
|
||||
// The Request that was submitted to obtain this Response.
|
||||
Request *Request
|
||||
|
||||
// HTTPResponse represents the HTTP response received from an HTTP request.
|
||||
//
|
||||
// The response Body should not be used as it will be consumed and closed by
|
||||
// grab.
|
||||
HTTPResponse *http.Response
|
||||
|
||||
// Filename specifies the path where the file transfer is stored in local
|
||||
// storage.
|
||||
Filename string
|
||||
|
||||
// Size specifies the total expected size of the file transfer.
|
||||
Size int64
|
||||
|
||||
// Start specifies the time at which the file transfer started.
|
||||
Start time.Time
|
||||
|
||||
// End specifies the time at which the file transfer completed.
|
||||
//
|
||||
// This will return zero until the transfer has completed.
|
||||
End time.Time
|
||||
|
||||
// CanResume specifies that the remote server advertised that it can resume
|
||||
// previous downloads, as the 'Accept-Ranges: bytes' header is set.
|
||||
CanResume bool
|
||||
|
||||
// DidResume specifies that the file transfer resumed a previously incomplete
|
||||
// transfer.
|
||||
DidResume bool
|
||||
|
||||
// Done is closed once the transfer is finalized, either successfully or with
|
||||
// errors. Errors are available via Response.Err
|
||||
Done chan struct{}
|
||||
|
||||
// ctx is a Context that controls cancelation of an inprogress transfer
|
||||
ctx context.Context
|
||||
|
||||
// cancel is a cancel func that can be used to cancel the context of this
|
||||
// Response.
|
||||
cancel context.CancelFunc
|
||||
|
||||
// fi is the FileInfo for the destination file if it already existed before
|
||||
// transfer started.
|
||||
fi os.FileInfo
|
||||
|
||||
// optionsKnown indicates that a HEAD request has been completed and the
|
||||
// capabilities of the remote server are known.
|
||||
optionsKnown bool
|
||||
|
||||
// writer is the file handle used to write the downloaded file to local
|
||||
// storage
|
||||
writer io.WriteCloser
|
||||
|
||||
// bytesCompleted specifies the number of bytes which were already
|
||||
// transferred before this transfer began.
|
||||
bytesResumed int64
|
||||
|
||||
// transfer is responsible for copying data from the remote server to a local
|
||||
// file, tracking progress and allowing for cancelation.
|
||||
transfer *transfer
|
||||
|
||||
// bytesPerSecond specifies the number of bytes that have been transferred in
|
||||
// the last 1-second window.
|
||||
bytesPerSecond float64
|
||||
bytesPerSecondMu sync.Mutex
|
||||
|
||||
// bufferSize specifies the size in bytes of the transfer buffer.
|
||||
bufferSize int
|
||||
|
||||
// Error contains any error that may have occurred during the file transfer.
|
||||
// This should not be read until IsComplete returns true.
|
||||
err error
|
||||
}
|
||||
|
||||
// IsComplete returns true if the download has completed. If an error occurred
|
||||
// during the download, it can be returned via Err.
|
||||
func (c *Response) IsComplete() bool {
|
||||
select {
|
||||
case <-c.Done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel cancels the file transfer by canceling the underlying Context for
|
||||
// this Response. Cancel blocks until the transfer is closed and returns any
|
||||
// error - typically context.Canceled.
|
||||
func (c *Response) Cancel() error {
|
||||
c.cancel()
|
||||
return c.Err()
|
||||
}
|
||||
|
||||
// Wait blocks until the download is completed.
|
||||
func (c *Response) Wait() {
|
||||
<-c.Done
|
||||
}
|
||||
|
||||
// Err blocks the calling goroutine until the underlying file transfer is
|
||||
// completed and returns any error that may have occurred. If the download is
|
||||
// already completed, Err returns immediately.
|
||||
func (c *Response) Err() error {
|
||||
<-c.Done
|
||||
return c.err
|
||||
}
|
||||
|
||||
// BytesComplete returns the total number of bytes which have been copied to
|
||||
// the destination, including any bytes that were resumed from a previous
|
||||
// download.
|
||||
func (c *Response) BytesComplete() int64 {
|
||||
return c.bytesResumed + c.transfer.N()
|
||||
}
|
||||
|
||||
// BytesPerSecond returns the number of bytes transferred in the last second. If
|
||||
// the download is already complete, the average bytes/sec for the life of the
|
||||
// download is returned.
|
||||
func (c *Response) BytesPerSecond() float64 {
|
||||
if c.IsComplete() {
|
||||
return float64(c.transfer.N()) / c.Duration().Seconds()
|
||||
}
|
||||
c.bytesPerSecondMu.Lock()
|
||||
defer c.bytesPerSecondMu.Unlock()
|
||||
return c.bytesPerSecond
|
||||
}
|
||||
|
||||
// Progress returns the ratio of total bytes that have been downloaded. Multiply
|
||||
// the returned value by 100 to return the percentage completed.
|
||||
func (c *Response) Progress() float64 {
|
||||
if c.Size == 0 {
|
||||
return 0
|
||||
}
|
||||
return float64(c.BytesComplete()) / float64(c.Size)
|
||||
}
|
||||
|
||||
// Duration returns the duration of a file transfer. If the transfer is in
|
||||
// process, the duration will be between now and the start of the transfer. If
|
||||
// the transfer is complete, the duration will be between the start and end of
|
||||
// the completed transfer process.
|
||||
func (c *Response) Duration() time.Duration {
|
||||
if c.IsComplete() {
|
||||
return c.End.Sub(c.Start)
|
||||
}
|
||||
|
||||
return time.Now().Sub(c.Start)
|
||||
}
|
||||
|
||||
// ETA returns the estimated time at which the the download will complete, given
|
||||
// the current BytesPerSecond. If the transfer has already completed, the actual
|
||||
// end time will be returned.
|
||||
func (c *Response) ETA() time.Time {
|
||||
if c.IsComplete() {
|
||||
return c.End
|
||||
}
|
||||
bt := c.BytesComplete()
|
||||
bps := c.BytesPerSecond()
|
||||
if bps == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
secs := float64(c.Size-bt) / bps
|
||||
return time.Now().Add(time.Duration(secs) * time.Second)
|
||||
}
|
||||
|
||||
// watchBps watches the progress of a transfer and maintains statistics.
|
||||
func (c *Response) watchBps() {
|
||||
var prev int64
|
||||
then := c.Start
|
||||
|
||||
t := time.NewTicker(time.Second)
|
||||
defer t.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-c.Done:
|
||||
return
|
||||
|
||||
case now := <-t.C:
|
||||
d := now.Sub(then)
|
||||
then = now
|
||||
|
||||
cur := c.transfer.N()
|
||||
bs := cur - prev
|
||||
prev = cur
|
||||
|
||||
c.bytesPerSecondMu.Lock()
|
||||
c.bytesPerSecond = float64(bs) / d.Seconds()
|
||||
c.bytesPerSecondMu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Response) requestMethod() string {
|
||||
if c == nil || c.HTTPResponse == nil || c.HTTPResponse.Request == nil {
|
||||
return ""
|
||||
}
|
||||
return c.HTTPResponse.Request.Method
|
||||
}
|
||||
|
||||
func (c *Response) closeResponseBody() error {
|
||||
if c.HTTPResponse == nil || c.HTTPResponse.Body == nil {
|
||||
return nil
|
||||
}
|
||||
return c.HTTPResponse.Body.Close()
|
||||
}
|
111
vendor/github.com/cavaliercoder/grab/states.wsd
generated
vendored
Normal file
111
vendor/github.com/cavaliercoder/grab/states.wsd
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
@startuml
|
||||
title Grab transfer state
|
||||
|
||||
legend
|
||||
| # | Meaning |
|
||||
| D | Destination path known |
|
||||
| S | File size known |
|
||||
| O | Server options known (Accept-Ranges) |
|
||||
| R | Resume supported (Accept-Ranges) |
|
||||
| Z | Local file empty or missing |
|
||||
| P | Local file partially complete |
|
||||
endlegend
|
||||
|
||||
[*] --> Empty
|
||||
[*] --> D
|
||||
[*] --> S
|
||||
[*] --> DS
|
||||
|
||||
Empty : Filename: ""
|
||||
Empty : Size: 0
|
||||
Empty --> O : HEAD: Method not allowed
|
||||
Empty --> DSO : HEAD: Range not supported
|
||||
Empty --> DSOR : HEAD: Range supported
|
||||
|
||||
DS : Filename: "foo.bar"
|
||||
DS : Size: > 0
|
||||
DS --> DSZ : checkExisting(): File missing
|
||||
DS --> DSP : checkExisting(): File partial
|
||||
DS --> [*] : checkExisting(): File complete
|
||||
DS --> ERROR
|
||||
|
||||
S : Filename: ""
|
||||
S : Size: > 0
|
||||
S --> SO : HEAD: Method not allowed
|
||||
S --> DSO : HEAD: Range not supported
|
||||
S --> DSOR : HEAD: Range supported
|
||||
|
||||
D : Filename: "foo.bar"
|
||||
D : Size: 0
|
||||
D --> DO : HEAD: Method not allowed
|
||||
D --> DSO : HEAD: Range not supported
|
||||
D --> DSOR : HEAD: Range supported
|
||||
|
||||
|
||||
O : Filename: ""
|
||||
O : Size: 0
|
||||
O : CanResume: false
|
||||
O --> DSO : GET 200
|
||||
O --> ERROR
|
||||
|
||||
SO : Filename: ""
|
||||
SO : Size: > 0
|
||||
SO : CanResume: false
|
||||
SO --> DSO : GET: 200
|
||||
SO --> ERROR
|
||||
|
||||
DO : Filename: "foo.bar"
|
||||
DO : Size: 0
|
||||
DO : CanResume: false
|
||||
DO --> DSO : GET 200
|
||||
DO --> ERROR
|
||||
|
||||
DSZ : Filename: "foo.bar"
|
||||
DSZ : Size: > 0
|
||||
DSZ : File: empty
|
||||
DSZ --> DSORZ : HEAD: Range supported
|
||||
DSZ --> DSOZ : HEAD 405 or Range unsupported
|
||||
|
||||
DSP : Filename: "foo.bar"
|
||||
DSP : Size: > 0
|
||||
DSP : File: partial
|
||||
DSP --> DSORP : HEAD: Range supported
|
||||
DSP --> DSOZ : HEAD: 405 or Range unsupported
|
||||
|
||||
DSO : Filename: "foo.bar"
|
||||
DSO : Size: > 0
|
||||
DSO : CanResume: false
|
||||
DSO --> DSOZ : checkExisting(): File partial|missing
|
||||
DSO --> [*] : checkExisting(): File complete
|
||||
|
||||
DSOR : Filename: "foo.bar"
|
||||
DSOR : Size: > 0
|
||||
DSOR : CanResume: true
|
||||
DSOR --> DSORP : CheckLocal: File partial
|
||||
DSOR --> DSORZ : CheckLocal: File missing
|
||||
|
||||
DSORP : Filename: "foo.bar"
|
||||
DSORP : Size: > 0
|
||||
DSORP : CanResume: true
|
||||
DSORP : File: partial
|
||||
DSORP --> Transferring
|
||||
|
||||
DSORZ : Filename: "foo.bar"
|
||||
DSORZ : Size: > 0
|
||||
DSORZ : CanResume: true
|
||||
DSORZ : File: empty
|
||||
DSORZ --> Transferring
|
||||
|
||||
DSOZ : Filename: "foo.bar"
|
||||
DSOZ : Size: > 0
|
||||
DSOZ : CanResume: false
|
||||
DSOZ : File: empty
|
||||
DSOZ --> Transferring
|
||||
|
||||
Transferring --> [*]
|
||||
Transferring --> ERROR
|
||||
|
||||
ERROR : Something went wrong
|
||||
ERROR --> [*]
|
||||
|
||||
@enduml
|
81
vendor/github.com/cavaliercoder/grab/transfer.go
generated
vendored
Normal file
81
vendor/github.com/cavaliercoder/grab/transfer.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
package grab
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type transfer struct {
|
||||
n int64 // must be 64bit aligned on 386
|
||||
ctx context.Context
|
||||
lim RateLimiter
|
||||
w io.Writer
|
||||
r io.Reader
|
||||
b []byte
|
||||
}
|
||||
|
||||
func newTransfer(ctx context.Context, lim RateLimiter, dst io.Writer, src io.Reader, buf []byte) *transfer {
|
||||
return &transfer{
|
||||
ctx: ctx,
|
||||
lim: lim,
|
||||
w: dst,
|
||||
r: src,
|
||||
b: buf,
|
||||
}
|
||||
}
|
||||
|
||||
// copy behaves similarly to io.CopyBuffer except that it checks for cancelation
|
||||
// of the given context.Context and reports progress in a thread-safe manner.
|
||||
func (c *transfer) copy() (written int64, err error) {
|
||||
if c.b == nil {
|
||||
c.b = make([]byte, 32*1024)
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
err = c.ctx.Err()
|
||||
return
|
||||
default:
|
||||
// keep working
|
||||
}
|
||||
if c.lim != nil {
|
||||
err = c.lim.WaitN(c.ctx, len(c.b))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
nr, er := c.r.Read(c.b)
|
||||
if nr > 0 {
|
||||
nw, ew := c.w.Write(c.b[0:nr])
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
atomic.StoreInt64(&c.n, written)
|
||||
}
|
||||
if ew != nil {
|
||||
err = ew
|
||||
break
|
||||
}
|
||||
if nr != nw {
|
||||
err = io.ErrShortWrite
|
||||
break
|
||||
}
|
||||
}
|
||||
if er != nil {
|
||||
if er != io.EOF {
|
||||
err = er
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
||||
// N returns the number of bytes transferred.
|
||||
func (c *transfer) N() (n int64) {
|
||||
if c == nil {
|
||||
return 0
|
||||
}
|
||||
n = atomic.LoadInt64(&c.n)
|
||||
return
|
||||
}
|
89
vendor/github.com/cavaliercoder/grab/util.go
generated
vendored
Normal file
89
vendor/github.com/cavaliercoder/grab/util.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
package grab
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"hash"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// setLastModified sets the last modified timestamp of a local file according to
|
||||
// the Last-Modified header returned by a remote server.
|
||||
func setLastModified(resp *http.Response, filename string) error {
|
||||
// https://tools.ietf.org/html/rfc7232#section-2.2
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Last-Modified
|
||||
header := resp.Header.Get("Last-Modified")
|
||||
if header == "" {
|
||||
return nil
|
||||
}
|
||||
lastmod, err := time.Parse(http.TimeFormat, header)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return os.Chtimes(filename, lastmod, lastmod)
|
||||
}
|
||||
|
||||
// mkdirp creates all missing parent directories for the destination file path.
|
||||
func mkdirp(path string) error {
|
||||
dir := filepath.Dir(path)
|
||||
if fi, err := os.Stat(dir); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("error checking destination directory: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return fmt.Errorf("error creating destination directory: %v", err)
|
||||
}
|
||||
} else if !fi.IsDir() {
|
||||
panic("destination path is not directory")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// guessFilename returns a filename for the given http.Response. If none can be
|
||||
// determined ErrNoFilename is returned.
|
||||
func guessFilename(resp *http.Response) (string, error) {
|
||||
filename := resp.Request.URL.Path
|
||||
if cd := resp.Header.Get("Content-Disposition"); cd != "" {
|
||||
if _, params, err := mime.ParseMediaType(cd); err == nil {
|
||||
filename = params["filename"]
|
||||
}
|
||||
}
|
||||
|
||||
// sanitize
|
||||
if filename == "" || strings.HasSuffix(filename, "/") || strings.Contains(filename, "\x00") {
|
||||
return "", ErrNoFilename
|
||||
}
|
||||
|
||||
filename = filepath.Base(path.Clean("/" + filename))
|
||||
if filename == "" || filename == "." || filename == "/" {
|
||||
return "", ErrNoFilename
|
||||
}
|
||||
|
||||
return filename, nil
|
||||
}
|
||||
|
||||
// checksum returns a hash of the given file, using the given hash algorithm.
|
||||
func checksum(ctx context.Context, filename string, h hash.Hash) (b []byte, err error) {
|
||||
var f *os.File
|
||||
f, err = os.Open(filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer func() {
|
||||
err = f.Close()
|
||||
}()
|
||||
|
||||
t := newTransfer(ctx, nil, h, f, nil)
|
||||
if _, err = t.copy(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b = h.Sum(nil)
|
||||
return
|
||||
}
|
21
vendor/github.com/marcsauter/single/LICENSE
generated
vendored
Normal file
21
vendor/github.com/marcsauter/single/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 Marc Sauter
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
29
vendor/github.com/marcsauter/single/README.md
generated
vendored
Normal file
29
vendor/github.com/marcsauter/single/README.md
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
# single
|
||||
|
||||
`single` provides a mechanism to ensure, that only one instance of a program is running.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/marcsauter/single"
|
||||
)
|
||||
|
||||
func main() {
|
||||
s := single.New("your-app-name")
|
||||
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
|
||||
log.Fatal("another instance of the app is already running, exiting")
|
||||
} else if err != nil {
|
||||
// Another error occurred, might be worth handling it as well
|
||||
log.Fatalf("failed to acquire exclusive app lock: %v", err)
|
||||
}
|
||||
defer s.TryUnlock()
|
||||
|
||||
log.Println("working")
|
||||
time.Sleep(60 * time.Second)
|
||||
log.Println("finished")
|
||||
}
|
||||
|
||||
The package currently supports `linux`, `solaris` and `windows`.
|
41
vendor/github.com/marcsauter/single/single.go
generated
vendored
Normal file
41
vendor/github.com/marcsauter/single/single.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
// package single provides a mechanism to ensure, that only one instance of a program is running
|
||||
|
||||
package single
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrAlreadyRunning -- the instance is already running
|
||||
ErrAlreadyRunning = errors.New("the program is already running")
|
||||
// Lockfile -- the lock file to check
|
||||
Lockfile string
|
||||
)
|
||||
|
||||
// Single represents the name and the open file descriptor
|
||||
type Single struct {
|
||||
name string
|
||||
file *os.File
|
||||
}
|
||||
|
||||
// New creates a Single instance
|
||||
func New(name string) *Single {
|
||||
return &Single{name: name}
|
||||
}
|
||||
|
||||
// Lock tries to obtain an exclude lock on a lockfile and exits the program if an error occurs
|
||||
func (s *Single) Lock() {
|
||||
if err := s.CheckLock(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock releases the lock, closes and removes the lockfile. All errors will be reported directly.
|
||||
func (s *Single) Unlock() {
|
||||
if err := s.TryUnlock(); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
16
vendor/github.com/marcsauter/single/single_bsd.go
generated
vendored
Normal file
16
vendor/github.com/marcsauter/single/single_bsd.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package single
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Filename returns an absolute filename, appropriate for the operating system
|
||||
func (s *Single) Filename() string {
|
||||
if len(Lockfile) > 0 {
|
||||
return Lockfile
|
||||
}
|
||||
return filepath.Join("/var/run", fmt.Sprintf("%s.lock", s.name))
|
||||
}
|
15
vendor/github.com/marcsauter/single/single_darwin.go
generated
vendored
Normal file
15
vendor/github.com/marcsauter/single/single_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
package single
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Filename returns an absolute filename, appropriate for the operating system
|
||||
func (s *Single) Filename() string {
|
||||
if len(Lockfile) > 0 {
|
||||
return Lockfile
|
||||
}
|
||||
return filepath.Join(os.TempDir(), fmt.Sprintf("%s.lock", s.name))
|
||||
}
|
14
vendor/github.com/marcsauter/single/single_linux.go
generated
vendored
Normal file
14
vendor/github.com/marcsauter/single/single_linux.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package single
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Filename returns an absolute filename, appropriate for the operating system
|
||||
func (s *Single) Filename() string {
|
||||
if len(Lockfile) > 0 {
|
||||
return Lockfile
|
||||
}
|
||||
return filepath.Join("/var/lock", fmt.Sprintf("%s.lock", s.name))
|
||||
}
|
14
vendor/github.com/marcsauter/single/single_solaris.go
generated
vendored
Normal file
14
vendor/github.com/marcsauter/single/single_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package single
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Filename returns an absolute filename, appropriate for the operating system
|
||||
func (s *Single) Filename() string {
|
||||
if len(Lockfile) > 0 {
|
||||
return Lockfile
|
||||
}
|
||||
return filepath.Join("/var/run", fmt.Sprintf("%s.lock", s.name))
|
||||
}
|
50
vendor/github.com/marcsauter/single/single_unix.go
generated
vendored
Normal file
50
vendor/github.com/marcsauter/single/single_unix.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// +build linux solaris darwin freebsd openbsd netbsd dragonfly
|
||||
|
||||
package single
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// CheckLock tries to obtain an exclude lock on a lockfile and returns an error if one occurs
|
||||
func (s *Single) CheckLock() error {
|
||||
|
||||
// open/create lock file
|
||||
f, err := os.OpenFile(s.Filename(), os.O_RDWR|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.file = f
|
||||
// set the lock type to F_WRLCK, therefor the file has to be opened writable
|
||||
flock := syscall.Flock_t{
|
||||
Type: syscall.F_WRLCK,
|
||||
Pid: int32(os.Getpid()),
|
||||
}
|
||||
// try to obtain an exclusive lock - FcntlFlock seems to be the portable *ix way
|
||||
if err := syscall.FcntlFlock(s.file.Fd(), syscall.F_SETLK, &flock); err != nil {
|
||||
return ErrAlreadyRunning
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TryUnlock unlocks, closes and removes the lockfile
|
||||
func (s *Single) TryUnlock() error {
|
||||
// set the lock type to F_UNLCK
|
||||
flock := syscall.Flock_t{
|
||||
Type: syscall.F_UNLCK,
|
||||
Pid: int32(os.Getpid()),
|
||||
}
|
||||
if err := syscall.FcntlFlock(s.file.Fd(), syscall.F_SETLK, &flock); err != nil {
|
||||
return fmt.Errorf("failed to unlock the lock file: %v", err)
|
||||
}
|
||||
if err := s.file.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close the lock file: %v", err)
|
||||
}
|
||||
if err := os.Remove(s.Filename()); err != nil {
|
||||
return fmt.Errorf("failed to remove the lock file: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
44
vendor/github.com/marcsauter/single/single_windows.go
generated
vendored
Normal file
44
vendor/github.com/marcsauter/single/single_windows.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
// +build windows
|
||||
|
||||
package single
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Filename returns an absolute filename, appropriate for the operating system
|
||||
func (s *Single) Filename() string {
|
||||
if len(Lockfile) > 0 {
|
||||
return Lockfile
|
||||
}
|
||||
return filepath.Join(os.TempDir(), fmt.Sprintf("%s.lock", s.name))
|
||||
}
|
||||
|
||||
// CheckLock tries to obtain an exclude lock on a lockfile and returns an error if one occurs
|
||||
func (s *Single) CheckLock() error {
|
||||
|
||||
if err := os.Remove(s.Filename()); err != nil && !os.IsNotExist(err) {
|
||||
return ErrAlreadyRunning
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(s.Filename(), os.O_EXCL|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.file = file
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TryUnlock closes and removes the lockfile
|
||||
func (s *Single) TryUnlock() error {
|
||||
if err := s.file.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close the lock file: %v", err)
|
||||
}
|
||||
if err := os.Remove(s.Filename()); err != nil {
|
||||
return fmt.Errorf("failed to remove the lock file: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
3
vendor/github.com/otiai10/copy/.gitignore
generated
vendored
Normal file
3
vendor/github.com/otiai10/copy/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
testdata.copy
|
||||
coverage.txt
|
||||
vendor
|
11
vendor/github.com/otiai10/copy/.travis.yml
generated
vendored
Normal file
11
vendor/github.com/otiai10/copy/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.9
|
||||
- tip
|
||||
before_script:
|
||||
- go get -t ./...
|
||||
script:
|
||||
- go test ./... -v
|
||||
- go test -race -coverprofile=coverage.txt -covermode=atomic
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
21
vendor/github.com/otiai10/copy/LICENSE
generated
vendored
Normal file
21
vendor/github.com/otiai10/copy/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2018 otiai10
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user