mirror of
https://github.com/mudler/luet.git
synced 2025-09-03 08:14:46 +00:00
Compare commits
17 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
9ca5d24856 | ||
|
9a34296be0 | ||
|
ebd18ae22c | ||
|
7f10a19be5 | ||
|
6bf7368993 | ||
|
338f310d67 | ||
|
3fd1bdbfc8 | ||
|
59d78c3f5c | ||
|
86c256a062 | ||
|
876e3659fb | ||
|
3c0dd2b71d | ||
|
e9b4d66a3e | ||
|
5047316b70 | ||
|
02edc10c58 | ||
|
d479ada402 | ||
|
7b800c9a20 | ||
|
18e6e085d5 |
@@ -40,7 +40,7 @@ var Verbose bool
|
|||||||
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LuetCLIVersion = "0.9.10"
|
LuetCLIVersion = "0.9.12"
|
||||||
LuetEnvPrefix = "LUET"
|
LuetEnvPrefix = "LUET"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@@ -123,7 +123,7 @@ func init() {
|
|||||||
upgradeCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
|
upgradeCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
|
||||||
upgradeCmd.Flags().Bool("force", false, "Force upgrade by ignoring errors")
|
upgradeCmd.Flags().Bool("force", false, "Force upgrade by ignoring errors")
|
||||||
upgradeCmd.Flags().Bool("nodeps", false, "Don't consider package dependencies (harmful! overrides checkconflicts and full!)")
|
upgradeCmd.Flags().Bool("nodeps", false, "Don't consider package dependencies (harmful! overrides checkconflicts and full!)")
|
||||||
upgradeCmd.Flags().Bool("full", true, "Attempts to remove as much packages as possible which aren't required (slow)")
|
upgradeCmd.Flags().Bool("full", false, "Attempts to remove as much packages as possible which aren't required (slow)")
|
||||||
upgradeCmd.Flags().Bool("universe", false, "Use ONLY the SAT solver to compute upgrades (experimental)")
|
upgradeCmd.Flags().Bool("universe", false, "Use ONLY the SAT solver to compute upgrades (experimental)")
|
||||||
upgradeCmd.Flags().Bool("clean", false, "Try to drop removed packages (experimental, only when --universe is enabled)")
|
upgradeCmd.Flags().Bool("clean", false, "Try to drop removed packages (experimental, only when --universe is enabled)")
|
||||||
upgradeCmd.Flags().Bool("sync", false, "Upgrade packages with new revisions (experimental)")
|
upgradeCmd.Flags().Bool("sync", false, "Upgrade packages with new revisions (experimental)")
|
||||||
|
@@ -16,6 +16,7 @@
|
|||||||
package compiler
|
package compiler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
@@ -345,6 +346,10 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
|||||||
return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
|
return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(p.GetPreBuildSteps()) == 0 {
|
||||||
|
buildertaggedImage = image
|
||||||
|
}
|
||||||
|
|
||||||
// Then we write the step image, which uses the builder one
|
// Then we write the step image, which uses the builder one
|
||||||
if err := p.WriteStepImageDefinition(buildertaggedImage, filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+".dockerfile")); err != nil {
|
if err := p.WriteStepImageDefinition(buildertaggedImage, filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+".dockerfile")); err != nil {
|
||||||
return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
|
return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
|
||||||
@@ -392,11 +397,15 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
if len(p.GetPreBuildSteps()) != 0 {
|
||||||
Info(pkgTag, ":whale: Generating 'builder' image from", image, "as", buildertaggedImage, "with prelude steps")
|
Info(pkgTag, ":whale: Generating 'builder' image from", image, "as", buildertaggedImage, "with prelude steps")
|
||||||
if err := buildAndPush(builderOpts); err != nil {
|
if err := buildAndPush(builderOpts); err != nil {
|
||||||
return builderOpts, runnerOpts, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
|
return builderOpts, runnerOpts, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Even if we might not have any steps to build, we do that so we can tag the image used in this moment and use that to cache it in a registry, or in the system.
|
||||||
|
// acting as a docker tag.
|
||||||
Info(pkgTag, ":whale: Generating 'package' image from", buildertaggedImage, "as", packageImage, "with build steps")
|
Info(pkgTag, ":whale: Generating 'package' image from", buildertaggedImage, "as", packageImage, "with build steps")
|
||||||
if err := buildAndPush(runnerOpts); err != nil {
|
if err := buildAndPush(runnerOpts); err != nil {
|
||||||
return builderOpts, runnerOpts, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
|
return builderOpts, runnerOpts, errors.Wrap(err, "Could not push image: "+image+" "+builderOpts.DockerFileName)
|
||||||
@@ -420,6 +429,23 @@ func (cs *LuetCompiler) genArtifact(p CompilationSpec, builderOpts, runnerOpts C
|
|||||||
unpack = true
|
unpack = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(p.BuildSteps()) == 0 && len(p.GetPreBuildSteps()) == 0 && !unpack {
|
||||||
|
fakePackage := p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar")
|
||||||
|
// We can't generate delta in this case. It implies the package is a virtual, and nothing has to be done really
|
||||||
|
|
||||||
|
file, err := os.Create(fakePackage)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "Failed creating virtual package")
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
tw := tar.NewWriter(file)
|
||||||
|
defer tw.Close()
|
||||||
|
|
||||||
|
artifact := NewPackageArtifact(fakePackage)
|
||||||
|
artifact.SetCompressionType(cs.CompressionType)
|
||||||
|
return artifact, nil
|
||||||
|
}
|
||||||
|
|
||||||
// prepare folder content of the image with the package compiled inside
|
// prepare folder content of the image with the package compiled inside
|
||||||
if err := cs.Backend.ExportImage(runnerOpts); err != nil {
|
if err := cs.Backend.ExportImage(runnerOpts); err != nil {
|
||||||
return nil, errors.Wrap(err, "Failed exporting image")
|
return nil, errors.Wrap(err, "Failed exporting image")
|
||||||
|
@@ -247,6 +247,19 @@ ENV PACKAGE_NAME=` + cs.Package.GetName() + `
|
|||||||
ENV PACKAGE_VERSION=` + cs.Package.GetVersion() + `
|
ENV PACKAGE_VERSION=` + cs.Package.GetVersion() + `
|
||||||
ENV PACKAGE_CATEGORY=` + cs.Package.GetCategory()
|
ENV PACKAGE_CATEGORY=` + cs.Package.GetCategory()
|
||||||
|
|
||||||
|
if len(cs.Retrieve) > 0 {
|
||||||
|
for _, s := range cs.Retrieve {
|
||||||
|
//var file string
|
||||||
|
// if helpers.IsValidUrl(s) {
|
||||||
|
// file = s
|
||||||
|
// } else {
|
||||||
|
// file = cs.Rel(s)
|
||||||
|
// }
|
||||||
|
spec = spec + `
|
||||||
|
ADD ` + s + ` /luetbuild/`
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, s := range cs.Env {
|
for _, s := range cs.Env {
|
||||||
spec = spec + `
|
spec = spec + `
|
||||||
ENV ` + s
|
ENV ` + s
|
||||||
|
@@ -173,6 +173,8 @@ WORKDIR /luetbuild
|
|||||||
ENV PACKAGE_NAME=a
|
ENV PACKAGE_NAME=a
|
||||||
ENV PACKAGE_VERSION=1.0
|
ENV PACKAGE_VERSION=1.0
|
||||||
ENV PACKAGE_CATEGORY=test
|
ENV PACKAGE_CATEGORY=test
|
||||||
|
ADD test /luetbuild/
|
||||||
|
ADD http://www.google.com /luetbuild/
|
||||||
ENV test=1
|
ENV test=1
|
||||||
RUN echo foo > /test
|
RUN echo foo > /test
|
||||||
RUN echo bar > /test2`))
|
RUN echo bar > /test2`))
|
||||||
|
@@ -19,6 +19,8 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
copy "github.com/otiai10/copy"
|
copy "github.com/otiai10/copy"
|
||||||
@@ -41,6 +43,8 @@ func OrderFiles(target string, files []string) ([]string, []string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dirs := []string{}
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
target := filepath.Join(target, f)
|
target := filepath.Join(target, f)
|
||||||
fi, err := os.Lstat(target)
|
fi, err := os.Lstat(target)
|
||||||
@@ -48,11 +52,16 @@ func OrderFiles(target string, files []string) ([]string, []string) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if m := fi.Mode(); m.IsDir() {
|
if m := fi.Mode(); m.IsDir() {
|
||||||
newFiles = append(newFiles, f)
|
dirs = append(dirs, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return newFiles, notPresent
|
// Compare how many sub paths there are, and push at the end the ones that have less subpaths
|
||||||
|
sort.Slice(dirs, func(i, j int) bool {
|
||||||
|
return len(strings.Split(dirs[i], string(os.PathSeparator))) > len(strings.Split(dirs[j], string(os.PathSeparator)))
|
||||||
|
})
|
||||||
|
|
||||||
|
return append(newFiles, dirs...), notPresent
|
||||||
}
|
}
|
||||||
|
|
||||||
func ListDir(dir string) ([]string, error) {
|
func ListDir(dir string) ([]string, error) {
|
||||||
|
@@ -60,5 +60,27 @@ var _ = Describe("Helpers", func() {
|
|||||||
Expect(ordered).To(Equal([]string{"baz", "bar/foo", "foo", "baz2/foo", "bar", "baz2"}))
|
Expect(ordered).To(Equal([]string{"baz", "bar/foo", "foo", "baz2/foo", "bar", "baz2"}))
|
||||||
Expect(notExisting).To(Equal([]string{"notexisting"}))
|
Expect(notExisting).To(Equal([]string{"notexisting"}))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("orders correctly when there are folders with folders", func() {
|
||||||
|
testDir, err := ioutil.TempDir(os.TempDir(), "test")
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
defer os.RemoveAll(testDir)
|
||||||
|
|
||||||
|
err = os.MkdirAll(filepath.Join(testDir, "bar"), os.ModePerm)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = os.MkdirAll(filepath.Join(testDir, "foo"), os.ModePerm)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = os.MkdirAll(filepath.Join(testDir, "foo", "bar"), os.ModePerm)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
err = os.MkdirAll(filepath.Join(testDir, "foo", "baz"), os.ModePerm)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = os.MkdirAll(filepath.Join(testDir, "foo", "baz", "fa"), os.ModePerm)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
ordered, _ := OrderFiles(testDir, []string{"foo", "foo/bar", "bar", "foo/baz/fa", "foo/baz"})
|
||||||
|
Expect(ordered).To(Equal([]string{"foo/baz/fa", "foo/bar", "foo/baz", "foo", "bar"}))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@@ -84,7 +84,7 @@ func (l *LuetInstaller) computeUpgrade(syncedRepos Repositories, s *System) (pkg
|
|||||||
return uninstall, toInstall, errors.Wrap(err, "Failed solving solution for upgrade")
|
return uninstall, toInstall, errors.Wrap(err, "Failed solving solution for upgrade")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
uninstall, solution, err = solv.Upgrade(!l.Options.FullUninstall, l.Options.NoDeps)
|
uninstall, solution, err = solv.Upgrade(l.Options.FullUninstall, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return uninstall, toInstall, errors.Wrap(err, "Failed solving solution for upgrade")
|
return uninstall, toInstall, errors.Wrap(err, "Failed solving solution for upgrade")
|
||||||
}
|
}
|
||||||
@@ -241,10 +241,6 @@ func (l *LuetInstaller) swap(syncedRepos Repositories, toRemove pkg.Packages, to
|
|||||||
syncedRepos.SyncDatabase(allRepos)
|
syncedRepos.SyncDatabase(allRepos)
|
||||||
toInstall = syncedRepos.ResolveSelectors(toInstall)
|
toInstall = syncedRepos.ResolveSelectors(toInstall)
|
||||||
|
|
||||||
if err := l.download(syncedRepos, toInstall); err != nil {
|
|
||||||
return errors.Wrap(err, "Pre-downloading packages")
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't want any conflict with the installed to raise during the upgrade.
|
// We don't want any conflict with the installed to raise during the upgrade.
|
||||||
// In this way we both force uninstalls and we avoid to check with conflicts
|
// In this way we both force uninstalls and we avoid to check with conflicts
|
||||||
// against the current system state which is pending to deletion
|
// against the current system state which is pending to deletion
|
||||||
@@ -253,8 +249,43 @@ func (l *LuetInstaller) swap(syncedRepos Repositories, toRemove pkg.Packages, to
|
|||||||
// now the solver enforces the constraints and explictly denies two packages
|
// now the solver enforces the constraints and explictly denies two packages
|
||||||
// of the same version installed.
|
// of the same version installed.
|
||||||
forced := l.Options.Force
|
forced := l.Options.Force
|
||||||
|
nodeps := l.Options.NoDeps
|
||||||
l.Options.Force = true
|
l.Options.Force = true
|
||||||
|
l.Options.NoDeps = true
|
||||||
|
|
||||||
|
// First check what would have been done
|
||||||
|
installedtmp := pkg.NewInMemoryDatabase(false)
|
||||||
|
|
||||||
|
for _, i := range s.Database.World() {
|
||||||
|
_, err := installedtmp.CreatePackage(i)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "Failed create temporary in-memory db")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
systemAfterChanges := &System{Database: installedtmp}
|
||||||
|
|
||||||
|
for _, u := range toRemove {
|
||||||
|
packs, err := l.computeUninstall(u, systemAfterChanges)
|
||||||
|
if err != nil && !l.Options.Force {
|
||||||
|
Error("Failed computing uninstall for ", u.HumanReadableString())
|
||||||
|
return errors.Wrap(err, "computing uninstall "+u.HumanReadableString())
|
||||||
|
}
|
||||||
|
for _, p := range packs {
|
||||||
|
err = systemAfterChanges.Database.RemovePackage(p)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "Failed removing package from database")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match, packages, assertions, allRepos, err := l.computeInstall(syncedRepos, toInstall, systemAfterChanges)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "computing installation")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := l.download(syncedRepos, match); err != nil {
|
||||||
|
return errors.Wrap(err, "Pre-downloading packages")
|
||||||
|
}
|
||||||
|
|
||||||
for _, u := range toRemove {
|
for _, u := range toRemove {
|
||||||
err := l.Uninstall(u, s)
|
err := l.Uninstall(u, s)
|
||||||
@@ -263,13 +294,9 @@ func (l *LuetInstaller) swap(syncedRepos Repositories, toRemove pkg.Packages, to
|
|||||||
return errors.Wrap(err, "uninstalling "+u.HumanReadableString())
|
return errors.Wrap(err, "uninstalling "+u.HumanReadableString())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Options.Force = forced
|
l.Options.Force = forced
|
||||||
|
l.Options.NoDeps = nodeps
|
||||||
match, packages, assertions, allRepos, err := l.computeInstall(syncedRepos, toInstall, s)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "computing installation")
|
|
||||||
}
|
|
||||||
|
|
||||||
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -325,32 +352,8 @@ func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
|||||||
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
return l.install(syncedRepos, match, packages, assertions, allRepos, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *LuetInstaller) download(syncedRepos Repositories, cp pkg.Packages) error {
|
func (l *LuetInstaller) download(syncedRepos Repositories, toDownload map[string]ArtifactMatch) error {
|
||||||
toDownload := map[string]ArtifactMatch{}
|
|
||||||
|
|
||||||
// FIXME: This can be optimized. We don't need to re-match this to the repository
|
|
||||||
// But we could just do it once
|
|
||||||
|
|
||||||
// Gathers things to download
|
|
||||||
for _, currentPack := range cp {
|
|
||||||
matches := syncedRepos.PackageMatches(pkg.Packages{currentPack})
|
|
||||||
if len(matches) == 0 {
|
|
||||||
return errors.New("Failed matching solutions against repository for " + currentPack.HumanReadableString() + " where are definitions coming from?!")
|
|
||||||
}
|
|
||||||
A:
|
|
||||||
for _, artefact := range matches[0].Repo.GetIndex() {
|
|
||||||
if artefact.GetCompileSpec().GetPackage() == nil {
|
|
||||||
return errors.New("Package in compilespec empty")
|
|
||||||
|
|
||||||
}
|
|
||||||
if matches[0].Package.Matches(artefact.GetCompileSpec().GetPackage()) {
|
|
||||||
|
|
||||||
toDownload[currentPack.GetFingerPrint()] = ArtifactMatch{Package: currentPack, Artifact: artefact, Repository: matches[0].Repo}
|
|
||||||
|
|
||||||
break A
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Download packages into cache in parallel.
|
// Download packages into cache in parallel.
|
||||||
all := make(chan ArtifactMatch)
|
all := make(chan ArtifactMatch)
|
||||||
|
|
||||||
@@ -508,25 +511,13 @@ func (l *LuetInstaller) computeInstall(syncedRepos Repositories, cp pkg.Packages
|
|||||||
|
|
||||||
func (l *LuetInstaller) install(syncedRepos Repositories, toInstall map[string]ArtifactMatch, p pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) error {
|
func (l *LuetInstaller) install(syncedRepos Repositories, toInstall map[string]ArtifactMatch, p pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) error {
|
||||||
// Install packages into rootfs in parallel.
|
// Install packages into rootfs in parallel.
|
||||||
|
if err := l.download(syncedRepos, toInstall); err != nil {
|
||||||
|
return errors.Wrap(err, "Downloading packages")
|
||||||
|
}
|
||||||
|
|
||||||
all := make(chan ArtifactMatch)
|
all := make(chan ArtifactMatch)
|
||||||
|
|
||||||
var wg = new(sync.WaitGroup)
|
wg := new(sync.WaitGroup)
|
||||||
|
|
||||||
// Download first
|
|
||||||
for i := 0; i < l.Options.Concurrency; i++ {
|
|
||||||
wg.Add(1)
|
|
||||||
go l.downloadWorker(i, wg, all)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range toInstall {
|
|
||||||
all <- c
|
|
||||||
}
|
|
||||||
close(all)
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
all = make(chan ArtifactMatch)
|
|
||||||
|
|
||||||
wg = new(sync.WaitGroup)
|
|
||||||
|
|
||||||
// Do the real install
|
// Do the real install
|
||||||
for i := 0; i < l.Options.Concurrency; i++ {
|
for i := 0; i < l.Options.Concurrency; i++ {
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- tar xvf a-test-1.0.package.* -C ./
|
- ls -liah /a
|
||||||
- mv a /b
|
- mv a /b
|
||||||
requires:
|
requires:
|
||||||
- name: "a"
|
- name: "a"
|
||||||
|
@@ -12,7 +12,7 @@ oneTimeTearDown() {
|
|||||||
|
|
||||||
testBuild() {
|
testBuild() {
|
||||||
mkdir $tmpdir/testbuild
|
mkdir $tmpdir/testbuild
|
||||||
luet build --tree "$ROOT_DIR/tests/fixtures/finalizers" --destination $tmpdir/testbuild --compression gzip --all > /dev/null
|
luet build --tree "$ROOT_DIR/tests/fixtures/finalizers" --destination $tmpdir/testbuild --compression gzip --all
|
||||||
buildst=$?
|
buildst=$?
|
||||||
assertEquals 'builds successfully' "$buildst" "0"
|
assertEquals 'builds successfully' "$buildst" "0"
|
||||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/alpine-seed-1.0.package.tar.gz' ]"
|
assertTrue 'create package' "[ -e '$tmpdir/testbuild/alpine-seed-1.0.package.tar.gz' ]"
|
||||||
|
Reference in New Issue
Block a user