mirror of
https://github.com/mudler/luet.git
synced 2025-09-02 15:54:39 +00:00
Compare commits
34 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
088adf6f3a | ||
|
cead09fb9f | ||
|
9a1787ddaf | ||
|
b1316b50b4 | ||
|
d92ee9e1d9 | ||
|
e7b58eec41 | ||
|
6a1b64acea | ||
|
df14fe60fc | ||
|
459eb01a59 | ||
|
e6c597c7d3 | ||
|
e70cdbaaf7 | ||
|
eea9dad2c6 | ||
|
513f441bb3 | ||
|
ebe7466fdc | ||
|
76328176c1 | ||
|
46ed6423ad | ||
|
d5df40512b | ||
|
d219a2e0fb | ||
|
4048138dcb | ||
|
e5f44eee09 | ||
|
6819a28f07 | ||
|
24eb6eaef5 | ||
|
58c4866289 | ||
|
c72565e019 | ||
|
0f59c207b0 | ||
|
68bc8d4d27 | ||
|
b24d335538 | ||
|
dcc5aae3cd | ||
|
99bf9e291d | ||
|
51417ecb5d | ||
|
130eb8de1a | ||
|
f1604c3b6f | ||
|
5b5735266a | ||
|
984366d3a5 |
22
.travis.yml
22
.travis.yml
@@ -1,15 +1,31 @@
|
||||
dist: bionic
|
||||
language: go
|
||||
services:
|
||||
- docker
|
||||
go:
|
||||
- "1.14"
|
||||
|
||||
env:
|
||||
- "GO15VENDOREXPERIMENT=1"
|
||||
global:
|
||||
- "GO15VENDOREXPERIMENT=1"
|
||||
jobs:
|
||||
- "DOCKER_BUILDKIT=0"
|
||||
- "DOCKER_BUILDKIT=1"
|
||||
before_install:
|
||||
- sudo rm -rf /var/lib/apt/lists/*
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) edge"
|
||||
- sudo apt-get update
|
||||
- echo '{"experimental":true}' | sudo tee /etc/docker/daemon.json
|
||||
- export DOCKER_CLI_EXPERIMENTAL=enabled
|
||||
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
|
||||
- mkdir -vp ~/.docker/cli-plugins/
|
||||
- curl --silent -L "https://github.com/docker/buildx/releases/download/v0.3.0/buildx-v0.3.0.linux-amd64" > ~/.docker/cli-plugins/docker-buildx
|
||||
- chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
- docker buildx version
|
||||
- sudo -E env "PATH=$PATH" apt-get install -y libcap2-bin
|
||||
- sudo -E env "PATH=$PATH" make deps
|
||||
script:
|
||||
- sudo -E env "PATH=$PATH" make multiarch-build test-integration test-coverage
|
||||
|
||||
#after_success:
|
||||
# - |
|
||||
# if [ -n "$TRAVIS_TAG" ] && [ "$TRAVIS_PULL_REQUEST" == "false" ]; then
|
||||
|
@@ -38,7 +38,7 @@ var Verbose bool
|
||||
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
||||
|
||||
const (
|
||||
LuetCLIVersion = "0.8.12"
|
||||
LuetCLIVersion = "0.9"
|
||||
LuetEnvPrefix = "LUET"
|
||||
)
|
||||
|
||||
|
@@ -75,6 +75,9 @@ var uninstallCmd = &cobra.Command{
|
||||
}
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
|
||||
|
||||
// Load config protect configs
|
||||
installer.LoadConfigProtectConfs(LuetCfg)
|
||||
|
||||
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
|
||||
Concurrency: LuetCfg.GetGeneral().Concurrency,
|
||||
SolverOptions: *LuetCfg.GetSolverOptions(),
|
||||
|
@@ -78,6 +78,9 @@ var upgradeCmd = &cobra.Command{
|
||||
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().String())
|
||||
|
||||
// Load config protect configs
|
||||
installer.LoadConfigProtectConfs(LuetCfg)
|
||||
|
||||
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
|
||||
Concurrency: LuetCfg.GetGeneral().Concurrency,
|
||||
SolverOptions: *LuetCfg.GetSolverOptions(),
|
||||
|
37
contrib/config/get_luet_root.sh
Executable file
37
contrib/config/get_luet_root.sh
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
LUET_VERSION=0.8.6
|
||||
LUET_ROOTFS=${LUET_ROOTFS:-/}
|
||||
LUET_DATABASE_PATH=${LUET_DATABASE_PATH:-/var/luet/db}
|
||||
LUET_DATABASE_ENGINE=${LUET_DATABASE_ENGINE:-boltdb}
|
||||
LUET_CONFIG_PROTECT=${LUET_CONFIG_PROTECT:-1}
|
||||
|
||||
wget -q https://github.com/mudler/luet/releases/download/0.8.6/luet-0.8.6-linux-amd64 -O luet
|
||||
chmod +x luet
|
||||
|
||||
mkdir -p /etc/luet/repos.conf.d || true
|
||||
mkdir -p $LUET_DATABASE_PATH || true
|
||||
mkdir -p /var/tmp/luet || true
|
||||
|
||||
if [ "${LUET_CONFIG_PROTECT}" = "1" ] ; then
|
||||
mkdir -p /etc/luet/config.protect.d || true
|
||||
wget -q https://raw.githubusercontent.com/mudler/luet/master/contrib/config/config.protect.d/01_etc.yml.example -O /etc/luet/config.protect.d/01_etc.yml
|
||||
fi
|
||||
wget -q https://raw.githubusercontent.com/mocaccinoOS/repository-index/master/packages/mocaccino-repository-index/mocaccino-repository-index.yml -O /etc/luet/repos.conf.d/mocaccino-repository-index.yml
|
||||
|
||||
cat > /etc/luet/luet.yaml <<EOF
|
||||
general:
|
||||
debug: false
|
||||
system:
|
||||
rootfs: ${LUET_ROOTFS}
|
||||
database_path: "${LUET_DATABASE_PATH}"
|
||||
database_engine: "${LUET_DATABASE_ENGINE}"
|
||||
tmpdir_base: "/var/tmp/luet"
|
||||
EOF
|
||||
|
||||
./luet install repository/luet repository/mocaccino-repository-index
|
||||
./luet install system/luet system/luet-extensions
|
||||
|
||||
rm -rf luet
|
@@ -69,6 +69,7 @@
|
||||
# Default $TMPDIR/tmpluet
|
||||
# tmpdir_base: "/tmp/tmpluet"
|
||||
#
|
||||
#
|
||||
# ---------------------------------------------
|
||||
# Repositories configurations directories.
|
||||
# ---------------------------------------------
|
||||
@@ -93,6 +94,11 @@
|
||||
# annotation.
|
||||
# config_protect_skip: false
|
||||
#
|
||||
# The paths used for load repositories and config
|
||||
# protects are based on host rootfs.
|
||||
# If set to false rootfs path is used as prefix.
|
||||
# config_from_host: true
|
||||
#
|
||||
# System repositories
|
||||
# ---------------------------------------------
|
||||
# In alternative to define repositories files
|
||||
|
@@ -330,35 +330,25 @@ func tarModifierWrapperFunc(dst, path string, header *tar.Header, content io.Rea
|
||||
|
||||
func (a *PackageArtifact) GetProtectFiles() []string {
|
||||
ans := []string{}
|
||||
annotationDir := ""
|
||||
|
||||
if !LuetCfg.ConfigProtectSkip &&
|
||||
LuetCfg.GetConfigProtectConfFiles() != nil &&
|
||||
len(LuetCfg.GetConfigProtectConfFiles()) > 0 {
|
||||
if !LuetCfg.ConfigProtectSkip {
|
||||
|
||||
for _, file := range a.Files {
|
||||
for _, conf := range LuetCfg.GetConfigProtectConfFiles() {
|
||||
for _, dir := range conf.Directories {
|
||||
// Note file is without / at begin.
|
||||
if strings.HasPrefix("/"+file, filepath.Clean(dir)) {
|
||||
// docker archive modifier works with path without / at begin.
|
||||
ans = append(ans, file)
|
||||
goto nextFile
|
||||
}
|
||||
}
|
||||
// a.CompileSpec could be nil when artifact.Unpack is used for tree tarball
|
||||
if a.CompileSpec != nil &&
|
||||
a.CompileSpec.GetPackage().HasAnnotation(string(pkg.ConfigProtectAnnnotation)) {
|
||||
dir, ok := a.CompileSpec.GetPackage().GetAnnotations()[string(pkg.ConfigProtectAnnnotation)]
|
||||
if ok {
|
||||
annotationDir = dir
|
||||
}
|
||||
|
||||
if a.CompileSpec.GetPackage().HasAnnotation(string(pkg.ConfigProtectAnnnotation)) {
|
||||
dir, ok := a.CompileSpec.GetPackage().GetAnnotations()[string(pkg.ConfigProtectAnnnotation)]
|
||||
if ok {
|
||||
if strings.HasPrefix("/"+file, filepath.Clean(dir)) {
|
||||
ans = append(ans, file)
|
||||
goto nextFile
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nextFile:
|
||||
}
|
||||
// TODO: check if skip this if we have a.CompileSpec nil
|
||||
|
||||
cp := NewConfigProtect(annotationDir)
|
||||
cp.Map(a.Files)
|
||||
|
||||
// NOTE: for unpack we need files path without initial /
|
||||
ans = cp.GetProtectFiles(false)
|
||||
}
|
||||
|
||||
return ans
|
||||
@@ -526,7 +516,7 @@ func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
||||
}
|
||||
|
||||
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
|
||||
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, t CompressionImplementation) (Artifact, error) {
|
||||
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, excludes []string, t CompressionImplementation) (Artifact, error) {
|
||||
|
||||
archive, err := LuetCfg.GetSystem().TempDir("archive")
|
||||
if err != nil {
|
||||
@@ -556,7 +546,8 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
||||
}
|
||||
|
||||
// Handle includes in spec. If specified they filter what gets in the package
|
||||
if len(includes) > 0 {
|
||||
|
||||
if len(includes) > 0 && len(excludes) == 0 {
|
||||
var includeRegexp []*regexp.Regexp
|
||||
for _, i := range includes {
|
||||
r, e := regexp.Compile(i)
|
||||
@@ -584,6 +575,81 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
||||
Debug("File ", a.Name, " deleted")
|
||||
}
|
||||
}
|
||||
|
||||
} else if len(includes) == 0 && len(excludes) != 0 {
|
||||
var excludeRegexp []*regexp.Regexp
|
||||
for _, i := range excludes {
|
||||
r, e := regexp.Compile(i)
|
||||
if e != nil {
|
||||
Warning("Failed compiling regex:", e)
|
||||
continue
|
||||
}
|
||||
excludeRegexp = append(excludeRegexp, r)
|
||||
}
|
||||
for _, l := range layers {
|
||||
// Consider d.Additions (and d.Changes? - warn at least) only
|
||||
ADD:
|
||||
for _, a := range l.Diffs.Additions {
|
||||
for _, i := range excludeRegexp {
|
||||
if i.MatchString(a.Name) {
|
||||
continue ADD
|
||||
}
|
||||
}
|
||||
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
|
||||
}
|
||||
for _, a := range l.Diffs.Changes {
|
||||
Debug("File ", a.Name, " changed")
|
||||
}
|
||||
for _, a := range l.Diffs.Deletions {
|
||||
Debug("File ", a.Name, " deleted")
|
||||
}
|
||||
}
|
||||
|
||||
} else if len(includes) != 0 && len(excludes) != 0 {
|
||||
|
||||
var includeRegexp []*regexp.Regexp
|
||||
for _, i := range includes {
|
||||
r, e := regexp.Compile(i)
|
||||
if e != nil {
|
||||
Warning("Failed compiling regex:", e)
|
||||
continue
|
||||
}
|
||||
includeRegexp = append(includeRegexp, r)
|
||||
}
|
||||
var excludeRegexp []*regexp.Regexp
|
||||
for _, i := range excludes {
|
||||
r, e := regexp.Compile(i)
|
||||
if e != nil {
|
||||
Warning("Failed compiling regex:", e)
|
||||
continue
|
||||
}
|
||||
excludeRegexp = append(excludeRegexp, r)
|
||||
}
|
||||
|
||||
for _, l := range layers {
|
||||
// Consider d.Additions (and d.Changes? - warn at least) only
|
||||
EXCLUDES:
|
||||
for _, a := range l.Diffs.Additions {
|
||||
for _, i := range includeRegexp {
|
||||
if i.MatchString(a.Name) {
|
||||
for _, e := range excludeRegexp {
|
||||
if e.MatchString(a.Name) {
|
||||
continue EXCLUDES
|
||||
}
|
||||
}
|
||||
toCopy <- CopyJob{Src: filepath.Join(src, a.Name), Dst: filepath.Join(archive, a.Name), Artifact: a.Name}
|
||||
continue EXCLUDES
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, a := range l.Diffs.Changes {
|
||||
Debug("File ", a.Name, " changed")
|
||||
}
|
||||
for _, a := range l.Diffs.Deletions {
|
||||
Debug("File ", a.Name, " deleted")
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// Otherwise just grab all
|
||||
for _, l := range layers {
|
||||
|
@@ -112,21 +112,25 @@ RUN echo bar > /test2`))
|
||||
diffs, err := b.Changes(filepath.Join(tmpdir2, "output1.tar"), filepath.Join(tmpdir, "output2.tar"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifacts := []ArtifactNode{}
|
||||
if os.Getenv("DOCKER_BUILDKIT") == "1" {
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/etc/resolv.conf", Size: 0})
|
||||
}
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/test", Size: 4})
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/test2", Size: 4})
|
||||
|
||||
Expect(diffs).To(Equal(
|
||||
[]ArtifactLayer{{
|
||||
FromImage: filepath.Join(tmpdir2, "output1.tar"),
|
||||
ToImage: filepath.Join(tmpdir, "output2.tar"),
|
||||
Diffs: ArtifactDiffs{
|
||||
Additions: []ArtifactNode{
|
||||
{Name: "/test", Size: 4},
|
||||
{Name: "/test2", Size: 4},
|
||||
},
|
||||
Additions: artifacts,
|
||||
},
|
||||
}}))
|
||||
err = b.ExtractRootfs(CompilerBackendOptions{SourcePath: filepath.Join(tmpdir, "output2.tar"), Destination: rootfs}, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
artifact, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, None)
|
||||
artifact, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, None)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "package.tar"))).To(BeTrue())
|
||||
err = helpers.Untar(artifact.GetPath(), unpacked, false)
|
||||
|
@@ -101,15 +101,19 @@ RUN echo bar > /test2`))
|
||||
Expect(b.ImageDefinitionToTar(opts)).ToNot(HaveOccurred())
|
||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||
|
||||
artifacts := []ArtifactNode{}
|
||||
if os.Getenv("DOCKER_BUILDKIT") == "1" {
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/etc/resolv.conf", Size: 0})
|
||||
}
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/test", Size: 4})
|
||||
artifacts = append(artifacts, ArtifactNode{Name: "/test2", Size: 4})
|
||||
|
||||
Expect(b.Changes(filepath.Join(tmpdir2, "output1.tar"), filepath.Join(tmpdir, "output2.tar"))).To(Equal(
|
||||
[]ArtifactLayer{{
|
||||
FromImage: filepath.Join(tmpdir2, "output1.tar"),
|
||||
ToImage: filepath.Join(tmpdir, "output2.tar"),
|
||||
Diffs: ArtifactDiffs{
|
||||
Additions: []ArtifactNode{
|
||||
{Name: "/test", Size: 4},
|
||||
{Name: "/test2", Size: 4},
|
||||
},
|
||||
Additions: artifacts,
|
||||
},
|
||||
}}))
|
||||
|
||||
|
@@ -181,7 +181,7 @@ func (cs *LuetCompiler) CompileParallel(keepPermissions bool, ps CompilationSpec
|
||||
return artifacts, allErrors
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) stripIncludesFromRootfs(includes []string, rootfs string) error {
|
||||
func (cs *LuetCompiler) stripFromRootfs(includes []string, rootfs string, include bool) error {
|
||||
var includeRegexp []*regexp.Regexp
|
||||
for _, i := range includes {
|
||||
r, e := regexp.Compile(i)
|
||||
@@ -213,7 +213,7 @@ func (cs *LuetCompiler) stripIncludesFromRootfs(includes []string, rootfs string
|
||||
}
|
||||
}
|
||||
|
||||
if !match {
|
||||
if include && !match || !include && match {
|
||||
toRemove = append(toRemove, currentpath)
|
||||
}
|
||||
|
||||
@@ -421,7 +421,11 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
|
||||
if len(p.GetIncludes()) > 0 {
|
||||
// strip from includes
|
||||
cs.stripIncludesFromRootfs(p.GetIncludes(), rootfs)
|
||||
cs.stripFromRootfs(p.GetIncludes(), rootfs, true)
|
||||
}
|
||||
if len(p.GetExcludes()) > 0 {
|
||||
// strip from includes
|
||||
cs.stripFromRootfs(p.GetExcludes(), rootfs, false)
|
||||
}
|
||||
artifact = NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
||||
artifact.SetCompressionType(cs.CompressionType)
|
||||
@@ -438,7 +442,7 @@ func (cs *LuetCompiler) compileWithImage(image, buildertaggedImage, packageImage
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not generate changes from layers")
|
||||
}
|
||||
artifact, err = ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), cs.CompressionType)
|
||||
artifact, err = ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), p.GetExcludes(), cs.CompressionType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Could not generate deltas")
|
||||
}
|
||||
|
@@ -265,6 +265,146 @@ var _ = Describe("Compiler", func() {
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and excludes files", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/excludes")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// err = generalRecipe.Tree().ResolveDeps(3)
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles includes and excludes files", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/excludesincludes")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// err = generalRecipe.Tree().ResolveDeps(3)
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("marvot"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).ToNot(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and excludes ony wanted files also from unpacked packages", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/excludeimage")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// err = generalRecipe.Tree().ResolveDeps(3)
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles includes and excludes ony wanted files also from unpacked packages", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/excludeincludeimage")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(2))
|
||||
|
||||
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(), generalRecipe.GetDatabase(), NewDefaultCompilerOptions(), solver.Options{Type: solver.SingleCoreSimple})
|
||||
|
||||
spec, err := compiler.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// err = generalRecipe.Tree().ResolveDeps(3)
|
||||
// Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
compiler.SetConcurrency(1)
|
||||
artifacts, errs := compiler.CompileParallel(false, NewLuetCompilationspecs(spec))
|
||||
Expect(errs).To(BeNil())
|
||||
Expect(len(artifacts)).To(Equal(1))
|
||||
|
||||
for _, artifact := range artifacts {
|
||||
Expect(helpers.Exists(artifact.GetPath())).To(BeTrue())
|
||||
Expect(helpers.Untar(artifact.GetPath(), tmpdir, false)).ToNot(HaveOccurred())
|
||||
}
|
||||
Expect(helpers.Exists(spec.Rel("marvin"))).ToNot(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test5"))).To(BeTrue())
|
||||
Expect(helpers.Exists(spec.Rel("test6"))).To(BeTrue())
|
||||
})
|
||||
|
||||
It("Compiles and includes ony wanted files also from unpacked packages", func() {
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
tmpdir, err := ioutil.TempDir("", "package")
|
||||
|
@@ -147,6 +147,7 @@ type ArtifactLayersSummary struct {
|
||||
type CompilationSpec interface {
|
||||
ImageUnpack() bool // tells if the definition is just an image
|
||||
GetIncludes() []string
|
||||
GetExcludes() []string
|
||||
|
||||
RenderBuildImage() (string, error)
|
||||
WriteBuildImageDefinition(string) error
|
||||
|
@@ -102,6 +102,7 @@ type LuetCompilationSpec struct {
|
||||
OutputPath string `json:"-"` // Where the build processfiles go
|
||||
Unpack bool `json:"unpack"`
|
||||
Includes []string `json:"includes"`
|
||||
Excludes []string `json:"excludes"`
|
||||
}
|
||||
|
||||
func NewLuetCompilationSpec(b []byte, p pkg.Package) (CompilationSpec, error) {
|
||||
@@ -148,6 +149,10 @@ func (cs *LuetCompilationSpec) GetIncludes() []string {
|
||||
return cs.Includes
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) GetExcludes() []string {
|
||||
return cs.Excludes
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) GetRetrieve() []string {
|
||||
return cs.Retrieve
|
||||
}
|
||||
|
@@ -97,7 +97,7 @@ type LuetSystemConfig struct {
|
||||
TmpDirBase string `yaml:"tmpdir_base" mapstructure:"tmpdir_base"`
|
||||
}
|
||||
|
||||
func (sc LuetSystemConfig) GetRepoDatabaseDirPath(name string) string {
|
||||
func (sc *LuetSystemConfig) GetRepoDatabaseDirPath(name string) string {
|
||||
dbpath := filepath.Join(sc.Rootfs, sc.DatabasePath)
|
||||
dbpath = filepath.Join(dbpath, "repos/"+name)
|
||||
err := os.MkdirAll(dbpath, os.ModePerm)
|
||||
@@ -107,7 +107,7 @@ func (sc LuetSystemConfig) GetRepoDatabaseDirPath(name string) string {
|
||||
return dbpath
|
||||
}
|
||||
|
||||
func (sc LuetSystemConfig) GetSystemRepoDatabaseDirPath() string {
|
||||
func (sc *LuetSystemConfig) GetSystemRepoDatabaseDirPath() string {
|
||||
dbpath := filepath.Join(sc.Rootfs,
|
||||
sc.DatabasePath)
|
||||
err := os.MkdirAll(dbpath, os.ModePerm)
|
||||
@@ -117,7 +117,7 @@ func (sc LuetSystemConfig) GetSystemRepoDatabaseDirPath() string {
|
||||
return dbpath
|
||||
}
|
||||
|
||||
func (sc LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
|
||||
func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
|
||||
var cachepath string
|
||||
if sc.PkgsCachePath != "" {
|
||||
cachepath = sc.PkgsCachePath
|
||||
@@ -135,6 +135,10 @@ func (sc LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (sc *LuetSystemConfig) GetRootFsAbs() (string, error) {
|
||||
return filepath.Abs(sc.Rootfs)
|
||||
}
|
||||
|
||||
type LuetRepository struct {
|
||||
Name string `json:"name" yaml:"name" mapstructure:"name"`
|
||||
Description string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description"`
|
||||
@@ -204,6 +208,7 @@ type LuetConfig struct {
|
||||
RepositoriesConfDir []string `mapstructure:"repos_confdir"`
|
||||
ConfigProtectConfDir []string `mapstructure:"config_protect_confdir"`
|
||||
ConfigProtectSkip bool `mapstructure:"config_protect_skip"`
|
||||
ConfigFromHost bool `mapstructure:"config_from_host"`
|
||||
CacheRepositories []LuetRepository `mapstructure:"repetitors"`
|
||||
SystemRepositories []LuetRepository `mapstructure:"repositories"`
|
||||
|
||||
@@ -251,6 +256,8 @@ func GenDefault(viper *v.Viper) {
|
||||
viper.SetDefault("repos_confdir", []string{"/etc/luet/repos.conf.d"})
|
||||
viper.SetDefault("config_protect_confdir", []string{"/etc/luet/config.protect.d"})
|
||||
viper.SetDefault("config_protect_skip", false)
|
||||
// TODO: Set default to false when we are ready for migration.
|
||||
viper.SetDefault("config_from_host", true)
|
||||
viper.SetDefault("cache_repositories", []string{})
|
||||
viper.SetDefault("system_repositories", []string{})
|
||||
|
||||
|
@@ -18,6 +18,8 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ConfigProtectConfFile struct {
|
||||
@@ -39,3 +41,79 @@ func (c *ConfigProtectConfFile) String() string {
|
||||
return fmt.Sprintf("[%s] filename: %s, dirs: %s", c.Name, c.Filename,
|
||||
c.Directories)
|
||||
}
|
||||
|
||||
type ConfigProtect struct {
|
||||
AnnotationDir string
|
||||
MapProtected map[string]bool
|
||||
}
|
||||
|
||||
func NewConfigProtect(annotationDir string) *ConfigProtect {
|
||||
if len(annotationDir) > 0 && annotationDir[0:1] != "/" {
|
||||
annotationDir = "/" + annotationDir
|
||||
}
|
||||
return &ConfigProtect{
|
||||
AnnotationDir: annotationDir,
|
||||
MapProtected: make(map[string]bool, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConfigProtect) AddAnnotationDir(d string) {
|
||||
c.AnnotationDir = d
|
||||
}
|
||||
|
||||
func (c *ConfigProtect) GetAnnotationDir() string {
|
||||
return c.AnnotationDir
|
||||
}
|
||||
|
||||
func (c *ConfigProtect) Map(files []string) {
|
||||
if LuetCfg.ConfigProtectSkip {
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
|
||||
if file[0:1] != "/" {
|
||||
file = "/" + file
|
||||
}
|
||||
|
||||
if len(LuetCfg.GetConfigProtectConfFiles()) > 0 {
|
||||
for _, conf := range LuetCfg.GetConfigProtectConfFiles() {
|
||||
for _, dir := range conf.Directories {
|
||||
// Note file is without / at begin (on unpack)
|
||||
if strings.HasPrefix(file, filepath.Clean(dir)) {
|
||||
// docker archive modifier works with path without / at begin.
|
||||
c.MapProtected[file] = true
|
||||
goto nextFile
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.AnnotationDir != "" && strings.HasPrefix(file, filepath.Clean(c.AnnotationDir)) {
|
||||
c.MapProtected[file] = true
|
||||
}
|
||||
nextFile:
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *ConfigProtect) Protected(file string) bool {
|
||||
if file[0:1] != "/" {
|
||||
file = "/" + file
|
||||
}
|
||||
_, ans := c.MapProtected[file]
|
||||
return ans
|
||||
}
|
||||
|
||||
func (c *ConfigProtect) GetProtectFiles(withSlash bool) []string {
|
||||
ans := []string{}
|
||||
|
||||
for key, _ := range c.MapProtected {
|
||||
if withSlash {
|
||||
ans = append(ans, key)
|
||||
} else {
|
||||
ans = append(ans, key[1:])
|
||||
}
|
||||
}
|
||||
return ans
|
||||
}
|
||||
|
118
pkg/config/config_protect_test.go
Normal file
118
pkg/config/config_protect_test.go
Normal file
@@ -0,0 +1,118 @@
|
||||
// Copyright © 2019-2020 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// Daniele Rondina <geaaru@sabayonlinux.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package config_test
|
||||
|
||||
import (
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Config", func() {
|
||||
|
||||
Context("Test config protect", func() {
|
||||
|
||||
It("Protect1", func() {
|
||||
|
||||
files := []string{
|
||||
"etc/foo/my.conf",
|
||||
"usr/bin/foo",
|
||||
"usr/share/doc/foo.md",
|
||||
}
|
||||
|
||||
cp := config.NewConfigProtect("/etc")
|
||||
cp.Map(files)
|
||||
|
||||
Expect(cp.Protected("etc/foo/my.conf")).To(BeTrue())
|
||||
Expect(cp.Protected("/etc/foo/my.conf")).To(BeTrue())
|
||||
Expect(cp.Protected("usr/bin/foo")).To(BeFalse())
|
||||
Expect(cp.Protected("/usr/bin/foo")).To(BeFalse())
|
||||
Expect(cp.Protected("/usr/share/doc/foo.md")).To(BeFalse())
|
||||
|
||||
Expect(cp.GetProtectFiles(false)).To(Equal(
|
||||
[]string{
|
||||
"etc/foo/my.conf",
|
||||
},
|
||||
))
|
||||
|
||||
Expect(cp.GetProtectFiles(true)).To(Equal(
|
||||
[]string{
|
||||
"/etc/foo/my.conf",
|
||||
},
|
||||
))
|
||||
})
|
||||
|
||||
It("Protect2", func() {
|
||||
|
||||
files := []string{
|
||||
"etc/foo/my.conf",
|
||||
"usr/bin/foo",
|
||||
"usr/share/doc/foo.md",
|
||||
}
|
||||
|
||||
cp := config.NewConfigProtect("")
|
||||
cp.Map(files)
|
||||
|
||||
Expect(cp.Protected("etc/foo/my.conf")).To(BeFalse())
|
||||
Expect(cp.Protected("/etc/foo/my.conf")).To(BeFalse())
|
||||
Expect(cp.Protected("usr/bin/foo")).To(BeFalse())
|
||||
Expect(cp.Protected("/usr/bin/foo")).To(BeFalse())
|
||||
Expect(cp.Protected("/usr/share/doc/foo.md")).To(BeFalse())
|
||||
|
||||
Expect(cp.GetProtectFiles(false)).To(Equal(
|
||||
[]string{},
|
||||
))
|
||||
|
||||
Expect(cp.GetProtectFiles(true)).To(Equal(
|
||||
[]string{},
|
||||
))
|
||||
})
|
||||
|
||||
It("Protect3: Annotation dir without initial slash", func() {
|
||||
|
||||
files := []string{
|
||||
"etc/foo/my.conf",
|
||||
"usr/bin/foo",
|
||||
"usr/share/doc/foo.md",
|
||||
}
|
||||
|
||||
cp := config.NewConfigProtect("etc")
|
||||
cp.Map(files)
|
||||
|
||||
Expect(cp.Protected("etc/foo/my.conf")).To(BeTrue())
|
||||
Expect(cp.Protected("/etc/foo/my.conf")).To(BeTrue())
|
||||
Expect(cp.Protected("usr/bin/foo")).To(BeFalse())
|
||||
Expect(cp.Protected("/usr/bin/foo")).To(BeFalse())
|
||||
Expect(cp.Protected("/usr/share/doc/foo.md")).To(BeFalse())
|
||||
|
||||
Expect(cp.GetProtectFiles(false)).To(Equal(
|
||||
[]string{
|
||||
"etc/foo/my.conf",
|
||||
},
|
||||
))
|
||||
|
||||
Expect(cp.GetProtectFiles(true)).To(Equal(
|
||||
[]string{
|
||||
"/etc/foo/my.conf",
|
||||
},
|
||||
))
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
})
|
@@ -24,6 +24,37 @@ import (
|
||||
copy "github.com/otiai10/copy"
|
||||
)
|
||||
|
||||
func OrderFiles(target string, files []string) ([]string, []string) {
|
||||
|
||||
var newFiles []string
|
||||
var notPresent []string
|
||||
|
||||
for _, f := range files {
|
||||
target := filepath.Join(target, f)
|
||||
fi, err := os.Lstat(target)
|
||||
if err != nil {
|
||||
notPresent = append(notPresent, f)
|
||||
continue
|
||||
}
|
||||
if m := fi.Mode(); !m.IsDir() {
|
||||
newFiles = append(newFiles, f)
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
target := filepath.Join(target, f)
|
||||
fi, err := os.Lstat(target)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if m := fi.Mode(); m.IsDir() {
|
||||
newFiles = append(newFiles, f)
|
||||
}
|
||||
}
|
||||
|
||||
return newFiles, notPresent
|
||||
}
|
||||
|
||||
func ListDir(dir string) ([]string, error) {
|
||||
content := []string{}
|
||||
|
||||
|
@@ -16,6 +16,10 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
. "github.com/mudler/luet/pkg/helpers"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -28,4 +32,33 @@ var _ = Describe("Helpers", func() {
|
||||
Expect(Exists("../../tests/fixtures/buildtree/app-admin/enman/1.4.0/build.yaml.not.exists")).To(BeFalse())
|
||||
})
|
||||
})
|
||||
|
||||
Context("Orders dir and files correctly", func() {
|
||||
It("puts files first and folders at end", func() {
|
||||
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(testDir)
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(testDir, "foo"), []byte("test\n"), 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(testDir, "baz"), []byte("test\n"), 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = os.MkdirAll(filepath.Join(testDir, "bar"), 0755)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = ioutil.WriteFile(filepath.Join(testDir, "bar", "foo"), []byte("test\n"), 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = os.MkdirAll(filepath.Join(testDir, "baz2"), 0755)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = ioutil.WriteFile(filepath.Join(testDir, "baz2", "foo"), []byte("test\n"), 0644)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
ordered, notExisting := OrderFiles(testDir, []string{"bar", "baz", "bar/foo", "baz2", "foo", "baz2/foo", "notexisting"})
|
||||
|
||||
Expect(ordered).To(Equal([]string{"baz", "bar/foo", "foo", "baz2/foo", "bar", "baz2"}))
|
||||
Expect(notExisting).To(Equal([]string{"notexisting"}))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@@ -38,15 +38,26 @@ func NewLocalClient(r RepoData) *LocalClient {
|
||||
func (c *LocalClient) DownloadArtifact(artifact compiler.Artifact) (compiler.Artifact, error) {
|
||||
var err error
|
||||
|
||||
rootfs := ""
|
||||
artifactName := path.Base(artifact.GetPath())
|
||||
cacheFile := filepath.Join(config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath(), artifactName)
|
||||
|
||||
if !config.LuetCfg.ConfigFromHost {
|
||||
rootfs, err = config.LuetCfg.GetSystem().GetRootFsAbs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check if file is already in cache
|
||||
if helpers.Exists(cacheFile) {
|
||||
Info("Use artifact", artifactName, "from cache.")
|
||||
} else {
|
||||
ok := false
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
|
||||
uri = filepath.Join(rootfs, uri)
|
||||
|
||||
Info("Downloading artifact", artifactName, "from", uri)
|
||||
|
||||
//defer os.Remove(file.Name())
|
||||
@@ -72,8 +83,20 @@ func (c *LocalClient) DownloadFile(name string) (string, error) {
|
||||
var err error
|
||||
var file *os.File = nil
|
||||
|
||||
rootfs := ""
|
||||
|
||||
if !config.LuetCfg.ConfigFromHost {
|
||||
rootfs, err = config.LuetCfg.GetSystem().GetRootFsAbs()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
ok := false
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
|
||||
uri = filepath.Join(rootfs, uri)
|
||||
|
||||
Info("Downloading file", name, "from", uri)
|
||||
file, err = config.LuetCfg.GetSystem().TempFile("localclient")
|
||||
if err != nil {
|
||||
|
@@ -19,6 +19,7 @@ package installer
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
@@ -29,8 +30,21 @@ import (
|
||||
|
||||
func LoadConfigProtectConfs(c *LuetConfig) error {
|
||||
var regexConfs = regexp.MustCompile(`.yml$`)
|
||||
var err error
|
||||
|
||||
rootfs := ""
|
||||
|
||||
// Respect the rootfs param on read repositories
|
||||
if !c.ConfigFromHost {
|
||||
rootfs, err = c.GetSystem().GetRootFsAbs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, cdir := range c.ConfigProtectConfDir {
|
||||
cdir = filepath.Join(rootfs, cdir)
|
||||
|
||||
Debug("Parsing Config Protect Directory", cdir, "...")
|
||||
|
||||
files, err := ioutil.ReadDir(cdir)
|
||||
|
@@ -610,16 +610,39 @@ func (l *LuetInstaller) installerWorker(i int, wg *sync.WaitGroup, c <-chan Arti
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
var cp *config.ConfigProtect
|
||||
annotationDir := ""
|
||||
|
||||
files, err := s.Database.GetPackageFiles(p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed getting installed files")
|
||||
}
|
||||
|
||||
// Remove from target
|
||||
for _, f := range files {
|
||||
target := filepath.Join(s.Target, f)
|
||||
Debug("Removing", target)
|
||||
if !config.LuetCfg.ConfigProtectSkip {
|
||||
|
||||
if p.HasAnnotation(string(pkg.ConfigProtectAnnnotation)) {
|
||||
dir, ok := p.GetAnnotations()[string(pkg.ConfigProtectAnnnotation)]
|
||||
if ok {
|
||||
annotationDir = dir
|
||||
}
|
||||
}
|
||||
|
||||
cp = config.NewConfigProtect(annotationDir)
|
||||
cp.Map(files)
|
||||
}
|
||||
|
||||
toRemove, notPresent := helpers.OrderFiles(s.Target, files)
|
||||
|
||||
// Remove from target
|
||||
for _, f := range toRemove {
|
||||
target := filepath.Join(s.Target, f)
|
||||
|
||||
if !config.LuetCfg.ConfigProtectSkip && cp.Protected(f) {
|
||||
Debug("Preserving protected file:", f)
|
||||
continue
|
||||
}
|
||||
|
||||
Debug("Removing", target)
|
||||
if l.Options.PreserveSystemEssentialData &&
|
||||
strings.HasPrefix(f, config.LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) ||
|
||||
strings.HasPrefix(f, config.LuetCfg.GetSystem().GetSystemRepoDatabaseDirPath()) {
|
||||
@@ -627,26 +650,41 @@ func (l *LuetInstaller) uninstall(p pkg.Package, s *System) error {
|
||||
continue
|
||||
}
|
||||
|
||||
fi, err := os.Stat(target)
|
||||
fi, err := os.Lstat(target)
|
||||
if err != nil {
|
||||
Warning("Failed removing file (not present in the system target ?)", target, err.Error())
|
||||
Warning("File not found (it was before?) ", err.Error())
|
||||
continue
|
||||
}
|
||||
switch mode := fi.Mode(); {
|
||||
case mode.IsDir():
|
||||
files, err := ioutil.ReadDir(target)
|
||||
if err != nil {
|
||||
Warning("Failed reading target", target, err.Error())
|
||||
Warning("Failed reading folder", target, err.Error())
|
||||
}
|
||||
if len(files) != 0 {
|
||||
Debug("Preserving not-empty folder", target)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err = os.Remove(target); err != nil {
|
||||
Warning("Failed removing file (not present in the system target ?)", target, err.Error())
|
||||
Warning("Failed removing file (maybe not present in the system target anymore ?)", target, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range notPresent {
|
||||
target := filepath.Join(s.Target, f)
|
||||
|
||||
if !config.LuetCfg.ConfigProtectSkip && cp.Protected(f) {
|
||||
Debug("Preserving protected file:", f)
|
||||
continue
|
||||
}
|
||||
|
||||
if err = os.Remove(target); err != nil {
|
||||
Debug("Failed removing file (not present in the system target)", target, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
err = s.Database.RemovePackageFiles(p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Failed removing package files from database")
|
||||
|
@@ -374,6 +374,11 @@ func (db *BoltDatabase) FindPackages(p Package) (Packages, error) {
|
||||
|
||||
// FindPackageVersions return the list of the packages beloging to cat/name
|
||||
func (db *BoltDatabase) FindPackageVersions(p Package) (Packages, error) {
|
||||
// Provides: Treat as the replaced package here
|
||||
if provided, err := db.getProvide(p); err == nil {
|
||||
p = provided
|
||||
}
|
||||
|
||||
var versionsInWorld []Package
|
||||
for _, w := range db.World() {
|
||||
if w.GetName() != p.GetName() || w.GetCategory() != p.GetCategory() {
|
||||
|
@@ -225,6 +225,11 @@ func (db *InMemoryDatabase) FindPackage(p Package) (Package, error) {
|
||||
|
||||
// FindPackages return the list of the packages beloging to cat/name
|
||||
func (db *InMemoryDatabase) FindPackageVersions(p Package) (Packages, error) {
|
||||
// Provides: Treat as the replaced package here
|
||||
if provided, err := db.getProvide(p); err == nil {
|
||||
p = provided
|
||||
}
|
||||
|
||||
versions, ok := db.CacheNoVersion[p.GetPackageName()]
|
||||
if !ok {
|
||||
return nil, errors.New("No versions found for package")
|
||||
|
@@ -19,6 +19,7 @@ package repository
|
||||
import (
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
@@ -29,8 +30,21 @@ import (
|
||||
|
||||
func LoadRepositories(c *LuetConfig) error {
|
||||
var regexRepo = regexp.MustCompile(`.yml$|.yaml$`)
|
||||
var err error
|
||||
rootfs := ""
|
||||
|
||||
// Respect the rootfs param on read repositories
|
||||
if !c.ConfigFromHost {
|
||||
rootfs, err = c.GetSystem().GetRootFsAbs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, rdir := range c.RepositoriesConfDir {
|
||||
|
||||
rdir = filepath.Join(rootfs, rdir)
|
||||
|
||||
Debug("Parsing Repository Directory", rdir, "...")
|
||||
|
||||
files, err := ioutil.ReadDir(rdir)
|
||||
|
@@ -556,10 +556,11 @@ func (s *Parallel) Upgrade(checkconflicts, full bool) (pkg.Packages, PackagesAss
|
||||
|
||||
toUninstall := pkg.Packages{}
|
||||
toInstall := pkg.Packages{}
|
||||
availableCache := map[string]pkg.Packages{}
|
||||
|
||||
// we do this in memory so we take into account of provides
|
||||
universe := pkg.NewInMemoryDatabase(false)
|
||||
for _, p := range s.DefinitionDatabase.World() {
|
||||
// Each one, should be expanded
|
||||
availableCache[p.GetName()+p.GetCategory()] = append(availableCache[p.GetName()+p.GetCategory()], p)
|
||||
universe.CreatePackage(p)
|
||||
}
|
||||
|
||||
installedcopy := pkg.NewInMemoryDatabase(false)
|
||||
@@ -575,10 +576,10 @@ func (s *Parallel) Upgrade(checkconflicts, full bool) (pkg.Packages, PackagesAss
|
||||
defer wg.Done()
|
||||
for p := range c {
|
||||
installedcopy.CreatePackage(p)
|
||||
packages, ok := availableCache[p.GetName()+p.GetCategory()]
|
||||
if ok && len(packages) != 0 {
|
||||
packages, err := universe.FindPackageVersions(p)
|
||||
if err == nil && len(packages) != 0 {
|
||||
best := packages.Best(nil)
|
||||
if best.GetVersion() != p.GetVersion() {
|
||||
if !best.Matches(p) {
|
||||
results <- []pkg.Package{p, best}
|
||||
}
|
||||
}
|
||||
|
@@ -489,20 +489,20 @@ func (s *Solver) Upgrade(checkconflicts, full bool) (pkg.Packages, PackagesAsser
|
||||
toUninstall := pkg.Packages{}
|
||||
toInstall := pkg.Packages{}
|
||||
|
||||
availableCache := map[string]pkg.Packages{}
|
||||
// we do this in memory so we take into account of provides
|
||||
universe := pkg.NewInMemoryDatabase(false)
|
||||
for _, p := range s.DefinitionDatabase.World() {
|
||||
// Each one, should be expanded
|
||||
availableCache[p.GetName()+p.GetCategory()] = append(availableCache[p.GetName()+p.GetCategory()], p)
|
||||
universe.CreatePackage(p)
|
||||
}
|
||||
|
||||
installedcopy := pkg.NewInMemoryDatabase(false)
|
||||
|
||||
for _, p := range s.InstalledDatabase.World() {
|
||||
installedcopy.CreatePackage(p)
|
||||
packages, ok := availableCache[p.GetName()+p.GetCategory()]
|
||||
if ok && len(packages) != 0 {
|
||||
packages, err := universe.FindPackageVersions(p)
|
||||
if err == nil && len(packages) != 0 {
|
||||
best := packages.Best(nil)
|
||||
if best.GetVersion() != p.GetVersion() {
|
||||
if !best.Matches(p) {
|
||||
toUninstall = append(toUninstall, p)
|
||||
toInstall = append(toInstall, best)
|
||||
}
|
||||
|
@@ -1209,7 +1209,6 @@ var _ = Describe("Solver", func() {
|
||||
})
|
||||
})
|
||||
Context("Upgrades", func() {
|
||||
|
||||
C := pkg.NewPackage("c", "1.5", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "a", Version: ">=1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
C.SetCategory("test")
|
||||
B := pkg.NewPackage("b", "1.0", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
@@ -1219,6 +1218,17 @@ var _ = Describe("Solver", func() {
|
||||
A1 := pkg.NewPackage("a", "1.2", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "b", Version: "1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
A1.SetCategory("test")
|
||||
|
||||
BeforeEach(func() {
|
||||
C = pkg.NewPackage("c", "1.5", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "a", Version: ">=1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
C.SetCategory("test")
|
||||
B = pkg.NewPackage("b", "1.0", []*pkg.DefaultPackage{}, []*pkg.DefaultPackage{})
|
||||
B.SetCategory("test")
|
||||
A = pkg.NewPackage("a", "1.1", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "b", Version: "1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
A.SetCategory("test")
|
||||
A1 = pkg.NewPackage("a", "1.2", []*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "b", Version: "1.0", Category: "test"}}, []*pkg.DefaultPackage{})
|
||||
A1.SetCategory("test")
|
||||
})
|
||||
|
||||
It("upgrades correctly", func() {
|
||||
for _, p := range []pkg.Package{A1, B, C} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
@@ -1240,7 +1250,31 @@ var _ = Describe("Solver", func() {
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: C, Value: false}))
|
||||
Expect(len(solution)).To(Equal(3))
|
||||
})
|
||||
|
||||
It("upgrades correctly with provides", func() {
|
||||
B.SetProvides([]*pkg.DefaultPackage{&pkg.DefaultPackage{Name: "a", Version: ">=0", Category: "test"}, &pkg.DefaultPackage{Name: "c", Version: ">=0", Category: "test"}})
|
||||
|
||||
for _, p := range []pkg.Package{A1, B} {
|
||||
_, err := dbDefinitions.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, p := range []pkg.Package{A, C} {
|
||||
_, err := dbInstalled.CreatePackage(p)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
uninstall, solution, err := s.Upgrade(true, true)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(uninstall)).To(Equal(2))
|
||||
Expect(uninstall[1].GetName()).To(Equal("c"))
|
||||
Expect(uninstall[1].GetVersion()).To(Equal("1.5"))
|
||||
Expect(uninstall[0].GetName()).To(Equal("a"))
|
||||
Expect(uninstall[0].GetVersion()).To(Equal("1.1"))
|
||||
|
||||
Expect(solution).To(ContainElement(PackageAssert{Package: B, Value: true}))
|
||||
Expect(len(solution)).To(Equal(1))
|
||||
})
|
||||
|
||||
It("UpgradeUniverse upgrades correctly", func() {
|
||||
|
14
tests/fixtures/excludeimage/build.yaml
vendored
Normal file
14
tests/fixtures/excludeimage/build.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
requires:
|
||||
- category: "layer"
|
||||
name: "seed"
|
||||
version: "1.0"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- echo artifact43 > /marvin
|
||||
unpack: true
|
||||
excludes:
|
||||
- marvin
|
3
tests/fixtures/excludeimage/definition.yaml
vendored
Normal file
3
tests/fixtures/excludeimage/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
2
tests/fixtures/excludeimage/seed/build.yaml
vendored
Normal file
2
tests/fixtures/excludeimage/seed/build.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
image: alpine
|
||||
unpack: true
|
3
tests/fixtures/excludeimage/seed/definition.yaml
vendored
Normal file
3
tests/fixtures/excludeimage/seed/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "layer"
|
||||
name: "seed"
|
||||
version: "1.0"
|
17
tests/fixtures/excludeincludeimage/build.yaml
vendored
Normal file
17
tests/fixtures/excludeincludeimage/build.yaml
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
requires:
|
||||
- category: "layer"
|
||||
name: "seed"
|
||||
version: "1.0"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- echo artifact43 > /marvin
|
||||
unpack: true
|
||||
excludes:
|
||||
- marvin
|
||||
includes:
|
||||
- test.*
|
||||
- mar.*
|
3
tests/fixtures/excludeincludeimage/definition.yaml
vendored
Normal file
3
tests/fixtures/excludeincludeimage/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
2
tests/fixtures/excludeincludeimage/seed/build.yaml
vendored
Normal file
2
tests/fixtures/excludeincludeimage/seed/build.yaml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
image: alpine
|
||||
unpack: true
|
3
tests/fixtures/excludeincludeimage/seed/definition.yaml
vendored
Normal file
3
tests/fixtures/excludeincludeimage/seed/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "layer"
|
||||
name: "seed"
|
||||
version: "1.0"
|
11
tests/fixtures/excludes/build.yaml
vendored
Normal file
11
tests/fixtures/excludes/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- echo artifact43 > /marvin
|
||||
- echo "foo" > /marvot
|
||||
excludes:
|
||||
- marvot
|
3
tests/fixtures/excludes/definition.yaml
vendored
Normal file
3
tests/fixtures/excludes/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
14
tests/fixtures/excludesincludes/build.yaml
vendored
Normal file
14
tests/fixtures/excludesincludes/build.yaml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- echo artifact43 > /marvin
|
||||
- echo "foo" > /marvot
|
||||
excludes:
|
||||
- marvot
|
||||
includes:
|
||||
- /test5
|
||||
- mar.*
|
3
tests/fixtures/excludesincludes/definition.yaml
vendored
Normal file
3
tests/fixtures/excludesincludes/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
@@ -43,6 +43,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -48,6 +48,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -51,6 +51,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -28,6 +28,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -43,6 +43,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -43,6 +43,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -71,6 +71,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -70,6 +70,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -42,6 +42,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -42,6 +42,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -50,6 +50,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -42,6 +42,7 @@ system:
|
||||
rootfs: /
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -44,6 +44,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -71,6 +71,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -11,18 +11,19 @@ oneTimeTearDown() {
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/config_protect" --destination $tmpdir/testbuild --compression gzip test/a
|
||||
mkdir $tmpdir/testrootfs/testbuild -p
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/config_protect" \
|
||||
--destination $tmpdir/testrootfs/testbuild --compression gzip test/a
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/a-test-1.0.package.tar.gz' ]"
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testrootfs/testbuild/a-test-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/config_protect" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--output $tmpdir/testrootfs/testbuild \
|
||||
--packages $tmpdir/testrootfs/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
@@ -30,15 +31,14 @@ testRepo() {
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testrootfs/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
|
||||
mkdir $tmpdir/config.protect.d
|
||||
mkdir $tmpdir/testrootfs/etc/luet/config.protect.d -p
|
||||
|
||||
cat <<EOF > $tmpdir/config.protect.d/conf1.yml
|
||||
cat <<EOF > $tmpdir/testrootfs/etc/luet/config.protect.d/conf1.yml
|
||||
name: "protect1"
|
||||
dirs:
|
||||
- /etc/
|
||||
@@ -52,13 +52,14 @@ system:
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_protect_confdir:
|
||||
- $tmpdir/config.protect.d
|
||||
- /etc/luet/config.protect.d
|
||||
config_from_host: false
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
- "/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
@@ -91,6 +92,7 @@ testUnInstall() {
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
# TODO: we need remove it or not??
|
||||
assertTrue 'config protect created' "[ -e '$tmpdir/testrootfs/etc/a/._cfg0001_conf' ]"
|
||||
assertTrue 'config protect maintains the protected files' "[ -e '$tmpdir/testrootfs/etc/a/conf' ]"
|
||||
}
|
||||
|
||||
|
||||
|
@@ -54,6 +54,7 @@ system:
|
||||
database_engine: "boltdb"
|
||||
config_protect_confdir:
|
||||
- $tmpdir/config.protect.d
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
@@ -91,6 +92,7 @@ testUnInstall() {
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
# TODO: we need remove it or not??
|
||||
assertTrue 'config protect created' "[ -e '$tmpdir/testrootfs/opt/etc/._cfg0001_conf' ]"
|
||||
assertTrue 'config protect maintains the protected files' "[ -e '$tmpdir/testrootfs/opt/etc/conf' ]"
|
||||
}
|
||||
|
||||
|
||||
|
@@ -61,6 +61,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -42,6 +42,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -42,6 +42,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
@@ -52,6 +52,7 @@ system:
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_protect_skip: true
|
||||
config_from_host: true
|
||||
config_protect_confdir:
|
||||
- $tmpdir/config.protect.d
|
||||
repositories:
|
||||
|
@@ -43,6 +43,7 @@ system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
|
Reference in New Issue
Block a user