mirror of
https://github.com/mudler/luet.git
synced 2025-09-05 09:10:43 +00:00
Compare commits
13 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
4eab1eb738 | ||
|
685bbf46a6 | ||
|
d89225f37d | ||
|
55d34a3b40 | ||
|
85b5c96bdd | ||
|
6f5f400765 | ||
|
be87861657 | ||
|
76e5d37895 | ||
|
8aca246f51 | ||
|
be7b56bae3 | ||
|
eae2382764 | ||
|
76076c8f51 | ||
|
7d11df3225 |
16
.github/workflows/release.yml
vendored
16
.github/workflows/release.yml
vendored
@@ -16,7 +16,19 @@ jobs:
|
|||||||
- name: Login to quay
|
- name: Login to quay
|
||||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||||
- name: Install deps
|
- name: Install deps
|
||||||
run: sudo apt-get install -y upx
|
run: |
|
||||||
|
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||||
|
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||||
|
sudo chmod a+x "/usr/bin/img"
|
||||||
|
- name: Build test
|
||||||
|
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||||
|
- name: Login to quay with img
|
||||||
|
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||||
|
- name: Tests with Img backend
|
||||||
|
run: |
|
||||||
|
sudo -E env "PATH=$PATH" \
|
||||||
|
env "LUET_BACKEND=img" \
|
||||||
|
make test-integration
|
||||||
- name: Tests
|
- name: Tests
|
||||||
run: |
|
run: |
|
||||||
sudo -E \
|
sudo -E \
|
||||||
@@ -24,7 +36,7 @@ jobs:
|
|||||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||||
make deps multiarch-build-small test-integration test-coverage
|
make test-integration test-coverage
|
||||||
- name: Build
|
- name: Build
|
||||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small && sudo chmod -R 777 release/
|
run: sudo -E env "PATH=$PATH" make multiarch-build-small && sudo chmod -R 777 release/
|
||||||
- name: Release
|
- name: Release
|
||||||
|
11
.github/workflows/test.yml
vendored
11
.github/workflows/test.yml
vendored
@@ -18,6 +18,13 @@ jobs:
|
|||||||
- name: setup-docker
|
- name: setup-docker
|
||||||
uses: docker-practice/actions-setup-docker@0.0.1
|
uses: docker-practice/actions-setup-docker@0.0.1
|
||||||
- name: Install deps
|
- name: Install deps
|
||||||
run: sudo apt-get install -y upx
|
run: |
|
||||||
|
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||||
|
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||||
|
sudo chmod a+x "/usr/bin/img"
|
||||||
|
- name: Build
|
||||||
|
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||||
|
- name: Tests with Img backend
|
||||||
|
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
|
||||||
- name: Tests
|
- name: Tests
|
||||||
run: sudo -E env "PATH=$PATH" make deps multiarch-build-small test-integration test-coverage
|
run: sudo -E env "PATH=$PATH" make test-integration test-coverage
|
||||||
|
@@ -79,8 +79,6 @@ Build packages specifying multiple definition trees:
|
|||||||
viper.BindPFlag("wait", cmd.Flags().Lookup("wait"))
|
viper.BindPFlag("wait", cmd.Flags().Lookup("wait"))
|
||||||
viper.BindPFlag("keep-images", cmd.Flags().Lookup("keep-images"))
|
viper.BindPFlag("keep-images", cmd.Flags().Lookup("keep-images"))
|
||||||
|
|
||||||
LuetCfg.Viper.BindPFlag("keep-exported-images", cmd.Flags().Lookup("keep-exported-images"))
|
|
||||||
|
|
||||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||||
@@ -105,7 +103,6 @@ Build packages specifying multiple definition trees:
|
|||||||
keepImages := viper.GetBool("keep-images")
|
keepImages := viper.GetBool("keep-images")
|
||||||
nodeps := viper.GetBool("nodeps")
|
nodeps := viper.GetBool("nodeps")
|
||||||
onlydeps := viper.GetBool("onlydeps")
|
onlydeps := viper.GetBool("onlydeps")
|
||||||
keepExportedImages := viper.GetBool("keep-exported-images")
|
|
||||||
onlyTarget, _ := cmd.Flags().GetBool("only-target-package")
|
onlyTarget, _ := cmd.Flags().GetBool("only-target-package")
|
||||||
full, _ := cmd.Flags().GetBool("full")
|
full, _ := cmd.Flags().GetBool("full")
|
||||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||||
@@ -169,7 +166,6 @@ Build packages specifying multiple definition trees:
|
|||||||
opts.OnlyDeps = onlydeps
|
opts.OnlyDeps = onlydeps
|
||||||
opts.NoDeps = nodeps
|
opts.NoDeps = nodeps
|
||||||
opts.Wait = wait
|
opts.Wait = wait
|
||||||
opts.KeepImageExport = keepExportedImages
|
|
||||||
opts.PackageTargetOnly = onlyTarget
|
opts.PackageTargetOnly = onlyTarget
|
||||||
opts.BuildValuesFile = values
|
opts.BuildValuesFile = values
|
||||||
var solverOpts solver.Options
|
var solverOpts solver.Options
|
||||||
@@ -315,7 +311,6 @@ func init() {
|
|||||||
buildCmd.Flags().Bool("keep-images", true, "Keep built docker images in the host")
|
buildCmd.Flags().Bool("keep-images", true, "Keep built docker images in the host")
|
||||||
buildCmd.Flags().Bool("nodeps", false, "Build only the target packages, skipping deps (it works only if you already built the deps locally, or by using --pull) ")
|
buildCmd.Flags().Bool("nodeps", false, "Build only the target packages, skipping deps (it works only if you already built the deps locally, or by using --pull) ")
|
||||||
buildCmd.Flags().Bool("onlydeps", false, "Build only package dependencies")
|
buildCmd.Flags().Bool("onlydeps", false, "Build only package dependencies")
|
||||||
buildCmd.Flags().Bool("keep-exported-images", false, "Keep exported images used during building")
|
|
||||||
buildCmd.Flags().Bool("only-target-package", false, "Build packages of only the required target. Otherwise builds all the necessary ones not present in the destination")
|
buildCmd.Flags().Bool("only-target-package", false, "Build packages of only the required target. Otherwise builds all the necessary ones not present in the destination")
|
||||||
buildCmd.Flags().String("solver-type", "", "Solver strategy")
|
buildCmd.Flags().String("solver-type", "", "Solver strategy")
|
||||||
buildCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
buildCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
||||||
|
@@ -40,7 +40,7 @@ var Verbose bool
|
|||||||
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
LuetCLIVersion = "0.10.1"
|
LuetCLIVersion = "0.10.2"
|
||||||
LuetEnvPrefix = "LUET"
|
LuetEnvPrefix = "LUET"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@@ -373,7 +373,7 @@ func (a *PackageArtifact) Compress(src string, concurrency int) error {
|
|||||||
}
|
}
|
||||||
// Create gzip writer.
|
// Create gzip writer.
|
||||||
w := gzip.NewWriter(dst)
|
w := gzip.NewWriter(dst)
|
||||||
w.SetConcurrency(concurrency, 10)
|
w.SetConcurrency(1<<20, concurrency)
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
defer dst.Close()
|
defer dst.Close()
|
||||||
_, err = io.Copy(w, bufferedReader)
|
_, err = io.Copy(w, bufferedReader)
|
||||||
@@ -704,6 +704,19 @@ func worker(i int, wg *sync.WaitGroup, s <-chan CopyJob) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func compileRegexes(regexes []string) []*regexp.Regexp {
|
||||||
|
var result []*regexp.Regexp
|
||||||
|
for _, i := range regexes {
|
||||||
|
r, e := regexp.Compile(i)
|
||||||
|
if e != nil {
|
||||||
|
Warning("Failed compiling regex:", e)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result = append(result, r)
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
|
// ExtractArtifactFromDelta extracts deltas from ArtifactLayer from an image in tar format
|
||||||
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, excludes []string, t CompressionImplementation) (Artifact, error) {
|
func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurrency int, keepPerms bool, includes []string, excludes []string, t CompressionImplementation) (Artifact, error) {
|
||||||
|
|
||||||
@@ -737,15 +750,7 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
|||||||
// Handle includes in spec. If specified they filter what gets in the package
|
// Handle includes in spec. If specified they filter what gets in the package
|
||||||
|
|
||||||
if len(includes) > 0 && len(excludes) == 0 {
|
if len(includes) > 0 && len(excludes) == 0 {
|
||||||
var includeRegexp []*regexp.Regexp
|
includeRegexp := compileRegexes(includes)
|
||||||
for _, i := range includes {
|
|
||||||
r, e := regexp.Compile(i)
|
|
||||||
if e != nil {
|
|
||||||
Warning("Failed compiling regex:", e)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
includeRegexp = append(includeRegexp, r)
|
|
||||||
}
|
|
||||||
for _, l := range layers {
|
for _, l := range layers {
|
||||||
// Consider d.Additions (and d.Changes? - warn at least) only
|
// Consider d.Additions (and d.Changes? - warn at least) only
|
||||||
ADDS:
|
ADDS:
|
||||||
@@ -766,15 +771,7 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
|||||||
}
|
}
|
||||||
|
|
||||||
} else if len(includes) == 0 && len(excludes) != 0 {
|
} else if len(includes) == 0 && len(excludes) != 0 {
|
||||||
var excludeRegexp []*regexp.Regexp
|
excludeRegexp := compileRegexes(excludes)
|
||||||
for _, i := range excludes {
|
|
||||||
r, e := regexp.Compile(i)
|
|
||||||
if e != nil {
|
|
||||||
Warning("Failed compiling regex:", e)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
excludeRegexp = append(excludeRegexp, r)
|
|
||||||
}
|
|
||||||
for _, l := range layers {
|
for _, l := range layers {
|
||||||
// Consider d.Additions (and d.Changes? - warn at least) only
|
// Consider d.Additions (and d.Changes? - warn at least) only
|
||||||
ADD:
|
ADD:
|
||||||
@@ -795,25 +792,8 @@ func ExtractArtifactFromDelta(src, dst string, layers []ArtifactLayer, concurren
|
|||||||
}
|
}
|
||||||
|
|
||||||
} else if len(includes) != 0 && len(excludes) != 0 {
|
} else if len(includes) != 0 && len(excludes) != 0 {
|
||||||
|
includeRegexp := compileRegexes(includes)
|
||||||
var includeRegexp []*regexp.Regexp
|
excludeRegexp := compileRegexes(excludes)
|
||||||
for _, i := range includes {
|
|
||||||
r, e := regexp.Compile(i)
|
|
||||||
if e != nil {
|
|
||||||
Warning("Failed compiling regex:", e)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
includeRegexp = append(includeRegexp, r)
|
|
||||||
}
|
|
||||||
var excludeRegexp []*regexp.Regexp
|
|
||||||
for _, i := range excludes {
|
|
||||||
r, e := regexp.Compile(i)
|
|
||||||
if e != nil {
|
|
||||||
Warning("Failed compiling regex:", e)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
excludeRegexp = append(excludeRegexp, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, l := range layers {
|
for _, l := range layers {
|
||||||
// Consider d.Additions (and d.Changes? - warn at least) only
|
// Consider d.Additions (and d.Changes? - warn at least) only
|
||||||
|
@@ -87,7 +87,8 @@ ENV PACKAGE_CATEGORY=app-admin`))
|
|||||||
DockerFileName: "Dockerfile",
|
DockerFileName: "Dockerfile",
|
||||||
Destination: filepath.Join(tmpdir2, "output1.tar"),
|
Destination: filepath.Join(tmpdir2, "output1.tar"),
|
||||||
}
|
}
|
||||||
Expect(b.ImageDefinitionToTar(opts)).ToNot(HaveOccurred())
|
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||||
|
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
|
||||||
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||||
|
|
||||||
@@ -110,7 +111,8 @@ RUN echo bar > /test2`))
|
|||||||
DockerFileName: "LuetDockerfile",
|
DockerFileName: "LuetDockerfile",
|
||||||
Destination: filepath.Join(tmpdir, "output2.tar"),
|
Destination: filepath.Join(tmpdir, "output2.tar"),
|
||||||
}
|
}
|
||||||
Expect(b.ImageDefinitionToTar(opts2)).ToNot(HaveOccurred())
|
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
|
||||||
|
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
|
||||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||||
diffs, err := b.Changes(opts, opts2)
|
diffs, err := b.Changes(opts, opts2)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -127,13 +129,13 @@ RUN echo bar > /test2`))
|
|||||||
|
|
||||||
Expect(diffs).To(Equal(
|
Expect(diffs).To(Equal(
|
||||||
[]ArtifactLayer{{
|
[]ArtifactLayer{{
|
||||||
FromImage: filepath.Join(tmpdir2, "output1.tar"),
|
FromImage: "luet/base",
|
||||||
ToImage: filepath.Join(tmpdir, "output2.tar"),
|
ToImage: "test",
|
||||||
Diffs: ArtifactDiffs{
|
Diffs: ArtifactDiffs{
|
||||||
Additions: artifacts,
|
Additions: artifacts,
|
||||||
},
|
},
|
||||||
}}))
|
}}))
|
||||||
err = b.ExtractRootfs(CompilerBackendOptions{SourcePath: filepath.Join(tmpdir, "output2.tar"), Destination: rootfs}, false)
|
err = b.ExtractRootfs(CompilerBackendOptions{ImageName: "test", Destination: rootfs}, false)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
artifact, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, None)
|
artifact, err := ExtractArtifactFromDelta(rootfs, filepath.Join(tmpdir, "package.tar"), diffs, 2, false, []string{}, []string{}, None)
|
||||||
|
@@ -21,6 +21,8 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
. "github.com/mudler/luet/pkg/logger"
|
||||||
|
|
||||||
"github.com/mudler/luet/pkg/compiler"
|
"github.com/mudler/luet/pkg/compiler"
|
||||||
"github.com/mudler/luet/pkg/config"
|
"github.com/mudler/luet/pkg/config"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -54,10 +56,7 @@ import (
|
|||||||
// ]
|
// ]
|
||||||
func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.CompilerBackendOptions) ([]compiler.ArtifactLayer, error) {
|
func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.CompilerBackendOptions) ([]compiler.ArtifactLayer, error) {
|
||||||
|
|
||||||
srcImage := fromImage.Destination
|
res := compiler.ArtifactLayer{FromImage: fromImage.ImageName, ToImage: toImage.ImageName}
|
||||||
dstImage := toImage.Destination
|
|
||||||
|
|
||||||
res := compiler.ArtifactLayer{FromImage: srcImage, ToImage: dstImage}
|
|
||||||
|
|
||||||
tmpdiffs, err := config.LuetCfg.GetSystem().TempDir("extraction")
|
tmpdiffs, err := config.LuetCfg.GetSystem().TempDir("extraction")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -77,62 +76,24 @@ func GenerateChanges(b compiler.CompilerBackend, fromImage, toImage compiler.Com
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(dstRootFS) // clean up
|
defer os.RemoveAll(dstRootFS) // clean up
|
||||||
|
|
||||||
// Handle both files (.tar) or images. If parameters are beginning with / , don't export the images
|
|
||||||
if !strings.HasPrefix(srcImage, "/") {
|
|
||||||
srcImageTar, err := ioutil.TempFile(tmpdiffs, "srctar")
|
|
||||||
if err != nil {
|
|
||||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer os.Remove(srcImageTar.Name()) // clean up
|
|
||||||
srcImageExport := compiler.CompilerBackendOptions{
|
|
||||||
ImageName: srcImage,
|
|
||||||
Destination: srcImageTar.Name(),
|
|
||||||
}
|
|
||||||
err = b.ExportImage(srcImageExport)
|
|
||||||
if err != nil {
|
|
||||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while exporting src image "+srcImage)
|
|
||||||
}
|
|
||||||
srcImage = srcImageTar.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
srcImageExtract := compiler.CompilerBackendOptions{
|
srcImageExtract := compiler.CompilerBackendOptions{
|
||||||
SourcePath: srcImage,
|
|
||||||
ImageName: fromImage.ImageName,
|
ImageName: fromImage.ImageName,
|
||||||
Destination: srcRootFS,
|
Destination: srcRootFS,
|
||||||
}
|
}
|
||||||
|
Debug("Extracting source image", fromImage.ImageName)
|
||||||
err = b.ExtractRootfs(srcImageExtract, false) // No need to keep permissions as we just collect file diffs
|
err = b.ExtractRootfs(srcImageExtract, false) // No need to keep permissions as we just collect file diffs
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking src image "+srcImage)
|
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking src image "+fromImage.ImageName)
|
||||||
}
|
|
||||||
|
|
||||||
// Handle both files (.tar) or images. If parameters are beginning with / , don't export the images
|
|
||||||
if !strings.HasPrefix(dstImage, "/") {
|
|
||||||
dstImageTar, err := ioutil.TempFile(tmpdiffs, "dsttar")
|
|
||||||
if err != nil {
|
|
||||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
|
||||||
}
|
|
||||||
|
|
||||||
defer os.Remove(dstImageTar.Name()) // clean up
|
|
||||||
dstImageExport := compiler.CompilerBackendOptions{
|
|
||||||
ImageName: dstImage,
|
|
||||||
Destination: dstImageTar.Name(),
|
|
||||||
}
|
|
||||||
err = b.ExportImage(dstImageExport)
|
|
||||||
if err != nil {
|
|
||||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while exporting dst image "+dstImage)
|
|
||||||
}
|
|
||||||
dstImage = dstImageTar.Name()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dstImageExtract := compiler.CompilerBackendOptions{
|
dstImageExtract := compiler.CompilerBackendOptions{
|
||||||
SourcePath: dstImage,
|
|
||||||
ImageName: toImage.ImageName,
|
ImageName: toImage.ImageName,
|
||||||
Destination: dstRootFS,
|
Destination: dstRootFS,
|
||||||
}
|
}
|
||||||
|
Debug("Extracting destination image", toImage.ImageName)
|
||||||
err = b.ExtractRootfs(dstImageExtract, false)
|
err = b.ExtractRootfs(dstImageExtract, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking dst image "+dstImage)
|
return []compiler.ArtifactLayer{}, errors.Wrap(err, "Error met while unpacking dst image "+toImage.ImageName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get Additions/Changes. dst -> src
|
// Get Additions/Changes. dst -> src
|
||||||
|
@@ -33,8 +33,7 @@ var _ = Describe("Docker image diffs", func() {
|
|||||||
Context("Generate diffs from docker images", func() {
|
Context("Generate diffs from docker images", func() {
|
||||||
It("Detect no changes", func() {
|
It("Detect no changes", func() {
|
||||||
opts := compiler.CompilerBackendOptions{
|
opts := compiler.CompilerBackendOptions{
|
||||||
ImageName: "alpine:latest",
|
ImageName: "alpine:latest",
|
||||||
Destination: "alpine:latest",
|
|
||||||
}
|
}
|
||||||
err := b.DownloadImage(opts)
|
err := b.DownloadImage(opts)
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -58,11 +57,9 @@ var _ = Describe("Docker image diffs", func() {
|
|||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
layers, err := GenerateChanges(b, compiler.CompilerBackendOptions{
|
layers, err := GenerateChanges(b, compiler.CompilerBackendOptions{
|
||||||
ImageName: "quay.io/mocaccino/micro",
|
ImageName: "quay.io/mocaccino/micro",
|
||||||
Destination: "quay.io/mocaccino/micro",
|
|
||||||
}, compiler.CompilerBackendOptions{
|
}, compiler.CompilerBackendOptions{
|
||||||
ImageName: "quay.io/mocaccino/extra",
|
ImageName: "quay.io/mocaccino/extra",
|
||||||
Destination: "quay.io/mocaccino/extra",
|
|
||||||
})
|
})
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
Expect(len(layers)).To(Equal(1))
|
Expect(len(layers)).To(Equal(1))
|
||||||
|
@@ -53,7 +53,7 @@ func (*SimpleDocker) BuildImage(opts compiler.CompilerBackendOptions) error {
|
|||||||
}
|
}
|
||||||
buildarg := []string{"build", "-f", dockerfileName, "-t", name, context}
|
buildarg := []string{"build", "-f", dockerfileName, "-t", name, context}
|
||||||
|
|
||||||
Debug(":whale2: Building image " + name)
|
Info(":whale2: Building image " + name)
|
||||||
cmd := exec.Command("docker", buildarg...)
|
cmd := exec.Command("docker", buildarg...)
|
||||||
cmd.Dir = path
|
cmd.Dir = path
|
||||||
out, err := cmd.CombinedOutput()
|
out, err := cmd.CombinedOutput()
|
||||||
@@ -175,7 +175,7 @@ func (*SimpleDocker) ExportImage(opts compiler.CompilerBackendOptions) error {
|
|||||||
return errors.Wrap(err, "Failed exporting image: "+string(out))
|
return errors.Wrap(err, "Failed exporting image: "+string(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
Info(":whale: Exported image:", name)
|
Debug(":whale: Exported image:", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -184,9 +184,22 @@ type ManifestEntry struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
|
func (b *SimpleDocker) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
|
||||||
src := opts.SourcePath
|
name := opts.ImageName
|
||||||
dst := opts.Destination
|
dst := opts.Destination
|
||||||
|
|
||||||
|
tempexport, err := ioutil.TempDir(dst, "tmprootfs")
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "Error met while creating tempdir for rootfs")
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempexport) // clean up
|
||||||
|
|
||||||
|
imageExport := filepath.Join(tempexport, "image.tar")
|
||||||
|
if err := b.ExportImage(compiler.CompilerBackendOptions{ImageName: name, Destination: imageExport}); err != nil {
|
||||||
|
return errors.Wrap(err, "failed while extracting rootfs for "+name)
|
||||||
|
}
|
||||||
|
|
||||||
|
src := imageExport
|
||||||
|
|
||||||
if src == "" && opts.ImageName != "" {
|
if src == "" && opts.ImageName != "" {
|
||||||
tempUnpack, err := ioutil.TempDir(dst, "tempUnpack")
|
tempUnpack, err := ioutil.TempDir(dst, "tempUnpack")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -77,9 +77,10 @@ ENV PACKAGE_CATEGORY=app-admin`))
|
|||||||
DockerFileName: "Dockerfile",
|
DockerFileName: "Dockerfile",
|
||||||
Destination: filepath.Join(tmpdir2, "output1.tar"),
|
Destination: filepath.Join(tmpdir2, "output1.tar"),
|
||||||
}
|
}
|
||||||
Expect(b.ImageDefinitionToTar(opts)).ToNot(HaveOccurred())
|
|
||||||
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
|
||||||
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
Expect(b.BuildImage(opts)).ToNot(HaveOccurred())
|
||||||
|
Expect(b.ExportImage(opts)).ToNot(HaveOccurred())
|
||||||
|
Expect(helpers.Exists(filepath.Join(tmpdir2, "output1.tar"))).To(BeTrue())
|
||||||
|
|
||||||
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
|
err = lspec.WriteStepImageDefinition(lspec.Image, filepath.Join(tmpdir, "LuetDockerfile"))
|
||||||
Expect(err).ToNot(HaveOccurred())
|
Expect(err).ToNot(HaveOccurred())
|
||||||
@@ -100,7 +101,9 @@ RUN echo bar > /test2`))
|
|||||||
DockerFileName: "LuetDockerfile",
|
DockerFileName: "LuetDockerfile",
|
||||||
Destination: filepath.Join(tmpdir, "output2.tar"),
|
Destination: filepath.Join(tmpdir, "output2.tar"),
|
||||||
}
|
}
|
||||||
Expect(b.ImageDefinitionToTar(opts2)).ToNot(HaveOccurred())
|
|
||||||
|
Expect(b.BuildImage(opts2)).ToNot(HaveOccurred())
|
||||||
|
Expect(b.ExportImage(opts2)).ToNot(HaveOccurred())
|
||||||
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
Expect(helpers.Exists(filepath.Join(tmpdir, "output2.tar"))).To(BeTrue())
|
||||||
|
|
||||||
artifacts := []ArtifactNode{{
|
artifacts := []ArtifactNode{{
|
||||||
@@ -115,13 +118,23 @@ RUN echo bar > /test2`))
|
|||||||
|
|
||||||
Expect(b.Changes(opts, opts2)).To(Equal(
|
Expect(b.Changes(opts, opts2)).To(Equal(
|
||||||
[]ArtifactLayer{{
|
[]ArtifactLayer{{
|
||||||
FromImage: filepath.Join(tmpdir2, "output1.tar"),
|
FromImage: "luet/base",
|
||||||
ToImage: filepath.Join(tmpdir, "output2.tar"),
|
ToImage: "test",
|
||||||
Diffs: ArtifactDiffs{
|
Diffs: ArtifactDiffs{
|
||||||
Additions: artifacts,
|
Additions: artifacts,
|
||||||
},
|
},
|
||||||
}}))
|
}}))
|
||||||
|
|
||||||
|
opts2 = CompilerBackendOptions{
|
||||||
|
ImageName: "test",
|
||||||
|
SourcePath: tmpdir,
|
||||||
|
DockerFileName: "LuetDockerfile",
|
||||||
|
Destination: filepath.Join(tmpdir, "output3.tar"),
|
||||||
|
}
|
||||||
|
|
||||||
|
Expect(b.ImageDefinitionToTar(opts2)).ToNot(HaveOccurred())
|
||||||
|
Expect(helpers.Exists(filepath.Join(tmpdir, "output3.tar"))).To(BeTrue())
|
||||||
|
Expect(b.ImageExists(opts2.ImageName)).To(BeFalse())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("Detects available images", func() {
|
It("Detects available images", func() {
|
||||||
|
@@ -46,7 +46,7 @@ func (*SimpleImg) BuildImage(opts compiler.CompilerBackendOptions) error {
|
|||||||
buildarg := []string{"build", "-f", dockerfileName, "-t", name, context}
|
buildarg := []string{"build", "-f", dockerfileName, "-t", name, context}
|
||||||
Spinner(22)
|
Spinner(22)
|
||||||
defer SpinnerStop()
|
defer SpinnerStop()
|
||||||
Debug(":tea: Building image " + name)
|
Info(":tea: Building image " + name)
|
||||||
cmd := exec.Command("img", buildarg...)
|
cmd := exec.Command("img", buildarg...)
|
||||||
cmd.Dir = path
|
cmd.Dir = path
|
||||||
out, err := cmd.CombinedOutput()
|
out, err := cmd.CombinedOutput()
|
||||||
@@ -147,10 +147,16 @@ func (*SimpleImg) ExportImage(opts compiler.CompilerBackendOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ExtractRootfs extracts the docker image content inside the destination
|
// ExtractRootfs extracts the docker image content inside the destination
|
||||||
func (*SimpleImg) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
|
func (s *SimpleImg) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms bool) error {
|
||||||
name := opts.ImageName
|
name := opts.ImageName
|
||||||
path := opts.Destination
|
path := opts.Destination
|
||||||
|
|
||||||
|
if !s.ImageExists(name) {
|
||||||
|
if err := s.DownloadImage(opts); err != nil {
|
||||||
|
return errors.Wrap(err, "failed pulling image "+name+" during extraction")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
os.RemoveAll(path)
|
os.RemoveAll(path)
|
||||||
buildarg := []string{"unpack", "-o", path, name}
|
buildarg := []string{"unpack", "-o", path, name}
|
||||||
Debug(":tea: Extracting image " + name)
|
Debug(":tea: Extracting image " + name)
|
||||||
@@ -158,7 +164,7 @@ func (*SimpleImg) ExtractRootfs(opts compiler.CompilerBackendOptions, keepPerms
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Failed extracting image: "+string(out))
|
return errors.Wrap(err, "Failed extracting image: "+string(out))
|
||||||
}
|
}
|
||||||
Info(":tea: Image " + name + " extracted")
|
Debug(":tea: Image " + name + " extracted")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -236,7 +236,20 @@ func (cs *LuetCompiler) stripFromRootfs(includes []string, rootfs string, includ
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *LuetCompiler) unpackFs(rootfs string, concurrency int, p CompilationSpec) (Artifact, error) {
|
func (cs *LuetCompiler) unpackFs(concurrency int, keepPermissions bool, p CompilationSpec, runnerOpts CompilerBackendOptions) (Artifact, error) {
|
||||||
|
|
||||||
|
rootfs, err := ioutil.TempDir(p.GetOutputPath(), "rootfs")
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "Could not create tempdir")
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(rootfs) // clean up
|
||||||
|
|
||||||
|
err = cs.Backend.ExtractRootfs(CompilerBackendOptions{
|
||||||
|
ImageName: runnerOpts.ImageName, Destination: rootfs}, keepPermissions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "Could not extract rootfs")
|
||||||
|
}
|
||||||
|
|
||||||
if p.GetPackageDir() != "" {
|
if p.GetPackageDir() != "" {
|
||||||
Info(":tophat: Packing from output dir", p.GetPackageDir())
|
Info(":tophat: Packing from output dir", p.GetPackageDir())
|
||||||
rootfs = filepath.Join(rootfs, p.GetPackageDir())
|
rootfs = filepath.Join(rootfs, p.GetPackageDir())
|
||||||
@@ -247,7 +260,7 @@ func (cs *LuetCompiler) unpackFs(rootfs string, concurrency int, p CompilationSp
|
|||||||
cs.stripFromRootfs(p.GetIncludes(), rootfs, true)
|
cs.stripFromRootfs(p.GetIncludes(), rootfs, true)
|
||||||
}
|
}
|
||||||
if len(p.GetExcludes()) > 0 {
|
if len(p.GetExcludes()) > 0 {
|
||||||
// strip from includes
|
// strip from excludes
|
||||||
cs.stripFromRootfs(p.GetExcludes(), rootfs, false)
|
cs.stripFromRootfs(p.GetExcludes(), rootfs, false)
|
||||||
}
|
}
|
||||||
artifact := NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
artifact := NewPackageArtifact(p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar"))
|
||||||
@@ -261,27 +274,33 @@ func (cs *LuetCompiler) unpackFs(rootfs string, concurrency int, p CompilationSp
|
|||||||
return artifact, nil
|
return artifact, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *LuetCompiler) unpackDelta(rootfs string, concurrency int, keepPermissions bool, p CompilationSpec, builderOpts, runnerOpts CompilerBackendOptions) (Artifact, error) {
|
func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p CompilationSpec, builderOpts, runnerOpts CompilerBackendOptions) (Artifact, error) {
|
||||||
|
|
||||||
|
rootfs, err := ioutil.TempDir(p.GetOutputPath(), "rootfs")
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "Could not create tempdir")
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(rootfs) // clean up
|
||||||
|
|
||||||
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
|
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
|
||||||
if cs.Options.PullFirst && !cs.Backend.ImageExists(builderOpts.ImageName) && cs.Backend.ImageAvailable(builderOpts.ImageName) {
|
if cs.Options.PullFirst && !cs.Backend.ImageExists(builderOpts.ImageName) && cs.Backend.ImageAvailable(builderOpts.ImageName) {
|
||||||
err := cs.Backend.DownloadImage(builderOpts)
|
err := cs.Backend.DownloadImage(builderOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Could not pull image")
|
return nil, errors.Wrap(err, "Could not pull image")
|
||||||
}
|
}
|
||||||
} else if !cs.Backend.ImageExists(builderOpts.ImageName) {
|
|
||||||
return nil, errors.New("No image found for " + builderOpts.ImageName)
|
|
||||||
}
|
|
||||||
if err := cs.Backend.ExportImage(builderOpts); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "Could not export image"+builderOpts.ImageName)
|
|
||||||
}
|
|
||||||
if !cs.Options.KeepImageExport {
|
|
||||||
defer os.Remove(builderOpts.Destination)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Info(pkgTag, ":hammer: Generating delta")
|
Info(pkgTag, ":hammer: Generating delta")
|
||||||
diffs, err := cs.Backend.Changes(builderOpts, runnerOpts)
|
diffs, err := cs.Backend.Changes(builderOpts, runnerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Could not generate changes from layers")
|
return nil, errors.Wrap(err, "Could not generate changes from layers")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Debug("Extracting image to grab files from delta")
|
||||||
|
if err := cs.Backend.ExtractRootfs(CompilerBackendOptions{
|
||||||
|
ImageName: runnerOpts.ImageName, Destination: rootfs}, keepPermissions); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "Could not extract rootfs")
|
||||||
|
}
|
||||||
artifact, err := ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), p.GetExcludes(), cs.CompressionType)
|
artifact, err := ExtractArtifactFromDelta(rootfs, p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"), diffs, concurrency, keepPermissions, p.GetIncludes(), p.GetExcludes(), cs.CompressionType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Could not generate deltas")
|
return nil, errors.Wrap(err, "Could not generate deltas")
|
||||||
@@ -456,38 +475,15 @@ func (cs *LuetCompiler) genArtifact(p CompilationSpec, builderOpts, runnerOpts C
|
|||||||
return artifact, nil
|
return artifact, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepare folder content of the image with the package compiled inside
|
|
||||||
if err := cs.Backend.ExportImage(runnerOpts); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "Failed exporting image")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !cs.Options.KeepImageExport {
|
|
||||||
defer os.Remove(runnerOpts.Destination)
|
|
||||||
}
|
|
||||||
|
|
||||||
rootfs, err = ioutil.TempDir(p.GetOutputPath(), "rootfs")
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "Could not create tempdir")
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(rootfs) // clean up
|
|
||||||
|
|
||||||
// TODO: Compression and such
|
|
||||||
err = cs.Backend.ExtractRootfs(CompilerBackendOptions{
|
|
||||||
ImageName: runnerOpts.ImageName,
|
|
||||||
SourcePath: runnerOpts.Destination, Destination: rootfs}, keepPermissions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "Could not extract rootfs")
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.UnpackedPackage() {
|
if p.UnpackedPackage() {
|
||||||
// Take content of container as a base for our package files
|
// Take content of container as a base for our package files
|
||||||
artifact, err = cs.unpackFs(rootfs, concurrency, p)
|
artifact, err = cs.unpackFs(concurrency, keepPermissions, p, runnerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error met while extracting image")
|
return nil, errors.Wrap(err, "Error met while extracting image")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Generate delta between the two images
|
// Generate delta between the two images
|
||||||
artifact, err = cs.unpackDelta(rootfs, concurrency, keepPermissions, p, builderOpts, runnerOpts)
|
artifact, err = cs.unpackDelta(concurrency, keepPermissions, p, builderOpts, runnerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error met while generating delta")
|
return nil, errors.Wrap(err, "Error met while generating delta")
|
||||||
}
|
}
|
||||||
|
@@ -50,7 +50,6 @@ type CompilerOptions struct {
|
|||||||
PullFirst, KeepImg, Push bool
|
PullFirst, KeepImg, Push bool
|
||||||
Concurrency int
|
Concurrency int
|
||||||
CompressionType CompressionImplementation
|
CompressionType CompressionImplementation
|
||||||
KeepImageExport bool
|
|
||||||
|
|
||||||
Wait bool
|
Wait bool
|
||||||
OnlyDeps bool
|
OnlyDeps bool
|
||||||
|
@@ -21,9 +21,11 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
copy "github.com/otiai10/copy"
|
copy "github.com/otiai10/copy"
|
||||||
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func OrderFiles(target string, files []string) ([]string, []string) {
|
func OrderFiles(target string, files []string) ([]string, []string) {
|
||||||
@@ -134,7 +136,30 @@ func EnsureDir(fileName string) error {
|
|||||||
// of the source file. The file mode will be copied from the source and
|
// of the source file. The file mode will be copied from the source and
|
||||||
// the copied data is synced/flushed to stable storage.
|
// the copied data is synced/flushed to stable storage.
|
||||||
func CopyFile(src, dst string) (err error) {
|
func CopyFile(src, dst string) (err error) {
|
||||||
return copy.Copy(src, dst, copy.Options{OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
// Workaround for https://github.com/otiai10/copy/issues/47
|
||||||
|
fi, err := os.Lstat(src)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error reading file info")
|
||||||
|
}
|
||||||
|
|
||||||
|
fm := fi.Mode()
|
||||||
|
switch {
|
||||||
|
case fm&os.ModeNamedPipe != 0:
|
||||||
|
EnsureDir(dst)
|
||||||
|
if err := syscall.Mkfifo(dst, uint32(fi.Mode())); err != nil {
|
||||||
|
return errors.Wrap(err, "failed creating pipe")
|
||||||
|
}
|
||||||
|
if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||||
|
if err := os.Chown(dst, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||||
|
return errors.Wrap(err, "failed chowning file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return copy.Copy(src, dst, copy.Options{
|
||||||
|
Sync: true,
|
||||||
|
OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsDirectory(path string) (bool, error) {
|
func IsDirectory(path string) (bool, error) {
|
||||||
@@ -151,5 +176,7 @@ func IsDirectory(path string) (bool, error) {
|
|||||||
func CopyDir(src string, dst string) (err error) {
|
func CopyDir(src string, dst string) (err error) {
|
||||||
src = filepath.Clean(src)
|
src = filepath.Clean(src)
|
||||||
dst = filepath.Clean(dst)
|
dst = filepath.Clean(dst)
|
||||||
return copy.Copy(src, dst, copy.Options{OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
return copy.Copy(src, dst, copy.Options{
|
||||||
|
Sync: true,
|
||||||
|
OnSymlink: func(string) copy.SymlinkAction { return copy.Shallow }})
|
||||||
}
|
}
|
||||||
|
@@ -12,6 +12,7 @@ oneTimeTearDown() {
|
|||||||
|
|
||||||
testBuild() {
|
testBuild() {
|
||||||
mkdir $tmpdir/testbuild
|
mkdir $tmpdir/testbuild
|
||||||
|
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||||
luet build --tree "$ROOT_DIR/tests/fixtures/retrieve-integration" --destination $tmpdir/testbuild --compression gzip test/b
|
luet build --tree "$ROOT_DIR/tests/fixtures/retrieve-integration" --destination $tmpdir/testbuild --compression gzip test/b
|
||||||
buildst=$?
|
buildst=$?
|
||||||
assertEquals 'builds successfully' "$buildst" "0"
|
assertEquals 'builds successfully' "$buildst" "0"
|
||||||
@@ -20,6 +21,7 @@ testBuild() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
testRepo() {
|
testRepo() {
|
||||||
|
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/retrieve-integration" \
|
luet create-repo --tree "$ROOT_DIR/tests/fixtures/retrieve-integration" \
|
||||||
--output $tmpdir/testbuild \
|
--output $tmpdir/testbuild \
|
||||||
@@ -59,6 +61,7 @@ EOF
|
|||||||
|
|
||||||
|
|
||||||
testInstall() {
|
testInstall() {
|
||||||
|
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||||
luet install -y --config $tmpdir/luet.yaml test/b
|
luet install -y --config $tmpdir/luet.yaml test/b
|
||||||
#luet install -y --config $tmpdir/luet.yaml test/c-1.0 > /dev/null
|
#luet install -y --config $tmpdir/luet.yaml test/c-1.0 > /dev/null
|
||||||
installst=$?
|
installst=$?
|
||||||
@@ -71,6 +74,7 @@ testInstall() {
|
|||||||
|
|
||||||
|
|
||||||
testUnInstall() {
|
testUnInstall() {
|
||||||
|
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||||
luet uninstall -y --full --config $tmpdir/luet.yaml test/b
|
luet uninstall -y --full --config $tmpdir/luet.yaml test/b
|
||||||
installst=$?
|
installst=$?
|
||||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||||
@@ -80,6 +84,7 @@ testUnInstall() {
|
|||||||
|
|
||||||
|
|
||||||
testCleanup() {
|
testCleanup() {
|
||||||
|
[ "$LUET_BACKEND" == "img" ] && startSkipping
|
||||||
luet cleanup --config $tmpdir/luet.yaml
|
luet cleanup --config $tmpdir/luet.yaml
|
||||||
installst=$?
|
installst=$?
|
||||||
assertEquals 'install test successfully' "$installst" "0"
|
assertEquals 'install test successfully' "$installst" "0"
|
||||||
|
Reference in New Issue
Block a user