⚙️ Add ability to build from Dockerfiles directly

This commit is contained in:
Ettore Di Giacinto 2022-04-26 19:24:31 +02:00 committed by mudler
parent 4e2a2adfc1
commit e70a543f42
457 changed files with 33148 additions and 4999 deletions

View File

@ -25,11 +25,8 @@ import (
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/api/core/types/artifact" "github.com/mudler/luet/pkg/api/core/types/artifact"
"github.com/mudler/luet/pkg/compiler" "github.com/mudler/luet/pkg/compiler"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
"github.com/mudler/luet/pkg/installer" "github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/compiler/types/options"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
fileHelpers "github.com/mudler/luet/pkg/helpers/file" fileHelpers "github.com/mudler/luet/pkg/helpers/file"
tree "github.com/mudler/luet/pkg/tree" tree "github.com/mudler/luet/pkg/tree"
@ -120,8 +117,9 @@ Build packages specifying multiple definition trees:
out, _ := cmd.Flags().GetString("output") out, _ := cmd.Flags().GetString("output")
pretend, _ := cmd.Flags().GetBool("pretend") pretend, _ := cmd.Flags().GetBool("pretend")
fromRepo, _ := cmd.Flags().GetBool("from-repositories") fromRepo, _ := cmd.Flags().GetBool("from-repositories")
fromDockerfiles, _ := cmd.Flags().GetBool("dockerfiles")
compilerSpecs := compilerspec.NewLuetCompilationspecs() compilerSpecs := types.NewLuetCompilationspecs()
var db types.PackageDatabase var db types.PackageDatabase
var results Results var results Results
@ -136,8 +134,16 @@ Build packages specifying multiple definition trees:
runtimeDB := pkg.NewInMemoryDatabase(false) runtimeDB := pkg.NewInMemoryDatabase(false)
defer runtimeDB.Clean() defer runtimeDB.Clean()
installerRecipe := tree.NewInstallerRecipe(runtimeDB) installerRecipeParsers := tree.DefaultInstallerParsers
generalRecipe := tree.NewCompilerRecipe(db) generalRecipeParsers := tree.DefaultCompilerParsers
if fromDockerfiles {
installerRecipeParsers = append(installerRecipeParsers, tree.RuntimeDockerfileParser)
generalRecipeParsers = append(generalRecipeParsers, tree.BuildDockerfileParser)
}
installerRecipe := tree.NewInstallerRecipe(runtimeDB, installerRecipeParsers...)
generalRecipe := tree.NewCompilerRecipe(db, generalRecipeParsers...)
for _, src := range treePaths { for _, src := range treePaths {
util.DefaultContext.Info("Loading tree", src) util.DefaultContext.Info("Loading tree", src)
@ -172,40 +178,40 @@ Build packages specifying multiple definition trees:
util.DefaultContext.Debug("Solver", opts.CompactString()) util.DefaultContext.Debug("Solver", opts.CompactString())
compileropts := []options.Option{options.NoDeps(nodeps), compileropts := []types.CompilerOption{compiler.NoDeps(nodeps),
options.WithBackendType(backendType), compiler.WithBackendType(backendType),
options.PushImages(push), compiler.PushImages(push),
options.WithBuildValues(values), compiler.WithBuildValues(values),
options.WithPullRepositories(pullRepo), compiler.WithPullRepositories(pullRepo),
options.WithPushRepository(imageRepository), compiler.WithPushRepository(imageRepository),
options.Rebuild(rebuild), compiler.Rebuild(rebuild),
options.WithTemplateFolder(templateFolders), compiler.WithTemplateFolder(templateFolders),
options.WithSolverOptions(opts), compiler.WithSolverOptions(opts),
options.Wait(wait), compiler.Wait(wait),
options.WithRuntimeDatabase(installerRecipe.GetDatabase()), compiler.WithRuntimeDatabase(installerRecipe.GetDatabase()),
options.OnlyTarget(onlyTarget), compiler.OnlyTarget(onlyTarget),
options.PullFirst(pull), compiler.PullFirst(pull),
options.KeepImg(keepImages), compiler.KeepImg(keepImages),
options.OnlyDeps(onlydeps), compiler.OnlyDeps(onlydeps),
options.WithContext(util.DefaultContext), compiler.WithContext(util.DefaultContext),
options.BackendArgs(backendArgs), compiler.BackendArgs(backendArgs),
options.Concurrency(concurrency), compiler.Concurrency(concurrency),
options.WithCompressionType(compression.Implementation(compressionType))} compiler.WithCompressionType(types.CompressionImplementation(compressionType))}
if pushFinalImages { if pushFinalImages {
compileropts = append(compileropts, options.EnablePushFinalImages) compileropts = append(compileropts, compiler.EnablePushFinalImages)
if pushFinalImagesForce { if pushFinalImagesForce {
compileropts = append(compileropts, options.ForcePushFinalImages) compileropts = append(compileropts, compiler.ForcePushFinalImages)
} }
if pushFinalImagesRepository != "" { if pushFinalImagesRepository != "" {
compileropts = append(compileropts, options.WithFinalRepository(pushFinalImagesRepository)) compileropts = append(compileropts, compiler.WithFinalRepository(pushFinalImagesRepository))
} else if imageRepository != "" { } else if imageRepository != "" {
compileropts = append(compileropts, options.WithFinalRepository(imageRepository)) compileropts = append(compileropts, compiler.WithFinalRepository(imageRepository))
} }
} }
if generateImages { if generateImages {
compileropts = append(compileropts, options.EnableGenerateFinalImages) compileropts = append(compileropts, compiler.EnableGenerateFinalImages)
} }
luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(), compileropts...) luetCompiler := compiler.NewLuetCompiler(compilerBackend, generalRecipe.GetDatabase(), compileropts...)
@ -255,7 +261,7 @@ Build packages specifying multiple definition trees:
artifact, errs = luetCompiler.CompileWithReverseDeps(privileged, compilerSpecs) artifact, errs = luetCompiler.CompileWithReverseDeps(privileged, compilerSpecs)
} else if pretend { } else if pretend {
var toCalculate []*compilerspec.LuetCompilationSpec var toCalculate []*types.LuetCompilationSpec
if full { if full {
var err error var err error
toCalculate, err = luetCompiler.ComputeMinimumCompilableSet(compilerSpecs.All()...) toCalculate, err = luetCompiler.ComputeMinimumCompilableSet(compilerSpecs.All()...)
@ -337,7 +343,7 @@ func init() {
buildCmd.Flags().Bool("push-final-images", false, "Push final images while building") buildCmd.Flags().Bool("push-final-images", false, "Push final images while building")
buildCmd.Flags().Bool("push-final-images-force", false, "Override existing images") buildCmd.Flags().Bool("push-final-images-force", false, "Override existing images")
buildCmd.Flags().String("push-final-images-repository", "", "Repository where to push final images to") buildCmd.Flags().String("push-final-images-repository", "", "Repository where to push final images to")
buildCmd.Flags().Bool("dockerfiles", false, "Source packages also from dockerfiles")
buildCmd.Flags().Bool("full", false, "Build all packages (optimized)") buildCmd.Flags().Bool("full", false, "Build all packages (optimized)")
buildCmd.Flags().StringSlice("values", []string{}, "Build values file to interpolate with each package") buildCmd.Flags().StringSlice("values", []string{}, "Build values file to interpolate with each package")
buildCmd.Flags().StringSliceP("backend-args", "a", []string{}, "Backend args") buildCmd.Flags().StringSliceP("backend-args", "a", []string{}, "Backend args")

View File

@ -20,9 +20,10 @@ import (
helpers "github.com/mudler/luet/cmd/helpers" helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util" "github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler" "github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/compiler/types/compression"
installer "github.com/mudler/luet/pkg/installer" installer "github.com/mudler/luet/pkg/installer"
"github.com/mudler/luet/pkg/tree"
// . "github.com/mudler/luet/pkg/logger" // . "github.com/mudler/luet/pkg/logger"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
@ -93,6 +94,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
source_repo := viper.GetString("repo") source_repo := viper.GetString("repo")
backendType := viper.GetString("backend") backendType := viper.GetString("backend")
fromRepo, _ := cmd.Flags().GetBool("from-repositories") fromRepo, _ := cmd.Flags().GetBool("from-repositories")
dockerFiles, _ := cmd.Flags().GetBool("dockerfiles")
treeFile := installer.NewDefaultTreeRepositoryFile() treeFile := installer.NewDefaultTreeRepositoryFile()
metaFile := installer.NewDefaultMetaRepositoryFile() metaFile := installer.NewDefaultMetaRepositoryFile()
@ -114,6 +116,11 @@ Create a repository from the metadata description defined in the luet.yaml confi
installer.WithContext(util.DefaultContext), installer.WithContext(util.DefaultContext),
} }
if dockerFiles {
opts = append(opts, installer.WithCompilerParser(append(tree.DefaultCompilerParsers, tree.BuildDockerfileParser)...))
opts = append(opts, installer.WithRuntimeParser(append(tree.DefaultInstallerParsers, tree.RuntimeDockerfileParser)...))
}
if source_repo != "" { if source_repo != "" {
// Search for system repository // Search for system repository
lrepo, err := util.DefaultContext.Config.GetSystemRepository(source_repo) lrepo, err := util.DefaultContext.Config.GetSystemRepository(source_repo)
@ -150,7 +157,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
helpers.CheckErr(err) helpers.CheckErr(err)
if treetype != "" { if treetype != "" {
treeFile.SetCompressionType(compression.Implementation(treetype)) treeFile.SetCompressionType(types.CompressionImplementation(treetype))
} }
if treeName != "" { if treeName != "" {
@ -158,7 +165,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
} }
if metatype != "" { if metatype != "" {
metaFile.SetCompressionType(compression.Implementation(metatype)) metaFile.SetCompressionType(types.CompressionImplementation(metatype))
} }
if metaName != "" { if metaName != "" {
@ -188,6 +195,7 @@ func init() {
createrepoCmd.Flags().Bool("reset-revision", false, "Reset repository revision.") createrepoCmd.Flags().Bool("reset-revision", false, "Reset repository revision.")
createrepoCmd.Flags().String("repo", "", "Use repository defined in configuration.") createrepoCmd.Flags().String("repo", "", "Use repository defined in configuration.")
createrepoCmd.Flags().String("backend", "docker", "backend used (docker,img)") createrepoCmd.Flags().String("backend", "docker", "backend used (docker,img)")
createrepoCmd.Flags().Bool("dockerfiles", false, "Read dockerfiles in tree as packages.")
createrepoCmd.Flags().Bool("force-push", false, "Force overwrite of docker images if already present online") createrepoCmd.Flags().Bool("force-push", false, "Force overwrite of docker images if already present online")
createrepoCmd.Flags().Bool("push-images", false, "Enable/Disable docker image push for docker repositories") createrepoCmd.Flags().Bool("push-images", false, "Enable/Disable docker image push for docker repositories")

View File

@ -104,7 +104,7 @@ func ParsePackageStr(p string) (*types.Package, error) {
}, nil }, nil
} }
ver := ">=0" ver := ""
cat := "" cat := ""
name := "" name := ""
@ -116,6 +116,10 @@ func ParsePackageStr(p string) (*types.Package, error) {
cat, name = packageData(p) cat, name = packageData(p)
} }
if (cat != "") && ver == "" {
ver = ">=0"
}
return &types.Package{ return &types.Package{
Name: name, Name: name,
Category: cat, Category: cat,

View File

@ -29,7 +29,7 @@ var _ = Describe("CLI Helpers", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(pack.GetName()).To(Equal("foo")) Expect(pack.GetName()).To(Equal("foo"))
Expect(pack.GetCategory()).To(Equal("")) Expect(pack.GetCategory()).To(Equal(""))
Expect(pack.GetVersion()).To(Equal(">=0")) Expect(pack.GetVersion()).To(Equal(""))
}) })
It("accept unversioned packages with category", func() { It("accept unversioned packages with category", func() {
pack, err := ParsePackageStr("cat/foo") pack, err := ParsePackageStr("cat/foo")

View File

@ -21,9 +21,8 @@ import (
helpers "github.com/mudler/luet/cmd/helpers" helpers "github.com/mudler/luet/cmd/helpers"
"github.com/mudler/luet/cmd/util" "github.com/mudler/luet/cmd/util"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/api/core/types/artifact" "github.com/mudler/luet/pkg/api/core/types/artifact"
"github.com/mudler/luet/pkg/compiler/types/compression"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/viper" "github.com/spf13/viper"
@ -65,9 +64,9 @@ Afterwards, you can use the content generated and associate it with a tree and a
util.DefaultContext.Fatal("Invalid package string ", packageName, ": ", err.Error()) util.DefaultContext.Fatal("Invalid package string ", packageName, ": ", err.Error())
} }
spec := &compilerspec.LuetCompilationSpec{Package: p} spec := &types.LuetCompilationSpec{Package: p}
a := artifact.NewPackageArtifact(filepath.Join(dst, p.GetFingerPrint()+".package.tar")) a := artifact.NewPackageArtifact(filepath.Join(dst, p.GetFingerPrint()+".package.tar"))
a.CompressionType = compression.Implementation(compressionType) a.CompressionType = types.CompressionImplementation(compressionType)
err = a.Compress(sourcePath, concurrency) err = a.Compress(sourcePath, concurrency)
if err != nil { if err != nil {
util.DefaultContext.Fatal("failed compressing ", packageName, ": ", err.Error()) util.DefaultContext.Fatal("failed compressing ", packageName, ": ", err.Error())

View File

@ -26,7 +26,6 @@ import (
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler" "github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/compiler/backend" "github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/options"
"github.com/mudler/luet/pkg/installer" "github.com/mudler/luet/pkg/installer"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
@ -81,12 +80,12 @@ func NewTreeImageCommand() *cobra.Command {
luetCompiler := compiler.NewLuetCompiler( luetCompiler := compiler.NewLuetCompiler(
compilerBackend, compilerBackend,
reciper.GetDatabase(), reciper.GetDatabase(),
options.WithBuildValues(values), compiler.WithBuildValues(values),
options.WithContext(util.DefaultContext), compiler.WithContext(util.DefaultContext),
options.WithPushRepository(imageRepository), compiler.WithPushRepository(imageRepository),
options.WithPullRepositories(pullRepo), compiler.WithPullRepositories(pullRepo),
options.WithTemplateFolder(util.TemplateFolders(util.DefaultContext, installer.BuildTreeResult{}, treePath)), compiler.WithTemplateFolder(util.TemplateFolders(util.DefaultContext, installer.BuildTreeResult{}, treePath)),
options.WithSolverOptions(opts), compiler.WithSolverOptions(opts),
) )
a := args[0] a := args[0]

View File

@ -22,7 +22,7 @@ linkTitle = "luetdocs"
{{% blocks/lead color="primary" %}} {{% blocks/lead color="primary" %}}
Luet uses Container technologies ( Docker, img ) to build packages. Luet uses Container runtimes to build packages in a reproducible manner.
It provides an abstraction over the Dockerfile format introducing relation and versioning of images. It provides an abstraction over the Dockerfile format introducing relation and versioning of images.
{{% /blocks/lead %}} {{% /blocks/lead %}}
@ -42,8 +42,8 @@ New users are always welcome, and have fun!
{{% /blocks/feature %}} {{% /blocks/feature %}}
{{% blocks/feature icon="fa-terminal" title="Container-based" %}} {{% blocks/feature icon="fa-terminal" title="Container-based, reproducible builds" %}}
Use container abstraction to define your package repositories Use container abstraction to define package repositories. Intermediate build images are pushed along to guarantee reproducible builds.
{{% /blocks/feature %}} {{% /blocks/feature %}}
{{< /blocks/section >}} {{< /blocks/section >}}

View File

@ -15,7 +15,7 @@ Luet is written entirely in Go and comes as a single static binary. This has a f
- Package manager has no dependencies on the packages that it installs. There is no chance of breaking the package manager by installing a conflicting package, or uninstalling one. - Package manager has no dependencies on the packages that it installs. There is no chance of breaking the package manager by installing a conflicting package, or uninstalling one.
- Portable - it can run on any architecture - Portable - it can run on any architecture
Luet brings the containers ecosystem to standard software package management and delivery. It is fully built around the container concept, and leverages the huge catalog already present in the wild. It lets you use Docker images from [Docker Hub](https://hub.docker.com/), or from private registries to build packages, and helps you to redistribute them. Luet brings the containers ecosystem to standard software package management and delivery. It is fully built around the container concept, and leverages the huge catalog already present in the wild. It lets you use container images from [Docker Hub](https://hub.docker.com/), or from private registries to build packages, and helps you to redistribute them.
Systems that are using luet as a package manager can consume Luet repositories with only luet itself. No dependency is required by the Package manager, giving you the full control on what you install or not in the system. It can be used to generate *Linux from Scratch* distributions, also to build Docker images, or to simply build standalone packages that you might want to redistribute. Systems that are using luet as a package manager can consume Luet repositories with only luet itself. No dependency is required by the Package manager, giving you the full control on what you install or not in the system. It can be used to generate *Linux from Scratch* distributions, also to build Docker images, or to simply build standalone packages that you might want to redistribute.

View File

@ -32,12 +32,18 @@ Luet provides an abstraction layer on top of the container image layer to make t
To resolve the dependency tree Luet uses a SAT solver and no database. It is responsible for calculating the dependencies of a package and to prevent conflicts. The Luet core is still young, but it has a comprehensive test suite that we use to validate any future changes. To resolve the dependency tree Luet uses a SAT solver and no database. It is responsible for calculating the dependencies of a package and to prevent conflicts. The Luet core is still young, but it has a comprehensive test suite that we use to validate any future changes.
Building a package with Luet requires only a [definition](/docs/docs/concepts/packages/specfile). This definition can be self-contained and be only composed of one [specfile](/docs/docs/concepts/packages/specfile), or a group of them, forming a Luet tree. For more complex use-cases, see [collections](/docs/docs/concepts/packages/collections). Building a package with Luet requires only a [definition](/docs/docs/concepts/packages/specfile). This definition can be self-contained and be only composed of one [specfile](/docs/docs/concepts/packages/specfile), or a group of them, forming a Luet tree. For more complex use-cases, see [collections](/docs/docs/concepts/packages/collections). Luet also supports building packages from standard `Dockerfile` directly.
Run `luet build --help` to get more help for each parameter. Run `luet build --help` to get more help for each parameter.
Build accepts a list of packages to build, which syntax is in the `category/name-version` notation. See also [specfile documentation page](/docs/docs/concepts/packages/specfile/#refering-to-packages-from-the-cli) to see how to express packages from the CLI. Build accepts a list of packages to build, which syntax is in the `category/name-version` notation. See also [specfile documentation page](/docs/docs/concepts/packages/specfile/#refering-to-packages-from-the-cli) to see how to express packages from the CLI.
## Reproducible builds
Pinning a container build is not easy - there are always so many moving pieces, and sometimes just set `FROM` an image tag might not be enough.
Luet while building a package generates intermediate images that are stored and can be optionally pushed in a registry. Those images can be re-used by Luet if building again the same tree to guarantuee highly reproducible builds.
## Environmental variables ## Environmental variables
Luet builds passes its environment variable at the engine which is called during build, so for example the environment variable `DOCKER_HOST` or `DOCKER_BUILDKIT` can be setted. Luet builds passes its environment variable at the engine which is called during build, so for example the environment variable `DOCKER_HOST` or `DOCKER_BUILDKIT` can be setted.
@ -100,6 +106,25 @@ $> luet build --all
Luet "trees" are just a group of specfiles, in the above example, our tree was the current directory. You can also specify a directory with the `--tree` option. Luet doesn't enforce any tree layout, so they can be nested at any level. The only rule of thumb is that a `build.yaml` file needs to have either a `definition.yaml` or a `collection.yaml` file next to it. Luet "trees" are just a group of specfiles, in the above example, our tree was the current directory. You can also specify a directory with the `--tree` option. Luet doesn't enforce any tree layout, so they can be nested at any level. The only rule of thumb is that a `build.yaml` file needs to have either a `definition.yaml` or a `collection.yaml` file next to it.
## Dockerfile example
Luet can seamlessly build packages also from Dockerfiles, consider the following example, that will generate a `curl` package from an `alpine` image:
```bash
$> # put yourself in some workdir
$~/workdir> mkdir curl
$~/workdir> cat <<EOF > curl/Dockerfile
FROM alpine
apk add curl
EOF
$~/workdir> luet build --all
```
## Nesting dependencies ## Nesting dependencies
In the example above we have created a package from a `delta`. Luet by default creates packages by analyzing the differences between the generated containers, and extracts the differences as archive, the resulting files then are compressed and can be consumed later on by `luet install`. In the example above we have created a package from a `delta`. Luet by default creates packages by analyzing the differences between the generated containers, and extracts the differences as archive, the resulting files then are compressed and can be consumed later on by `luet install`.

23
go.mod
View File

@ -7,10 +7,11 @@ require (
github.com/Masterminds/sprig/v3 v3.2.1 github.com/Masterminds/sprig/v3 v3.2.1
github.com/Sabayon/pkgs-checker v0.8.4 github.com/Sabayon/pkgs-checker v0.8.4
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154 github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154
github.com/asottile/dockerfile v3.1.0+incompatible
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
github.com/containerd/containerd v1.5.10 github.com/containerd/containerd v1.6.3-0.20220401172941-5ff8fce1fcc6
github.com/crillab/gophersat v1.3.2-0.20210701121804-72b19f5b6b38 github.com/crillab/gophersat v1.3.2-0.20210701121804-72b19f5b6b38
github.com/docker/cli v20.10.10+incompatible github.com/docker/cli v20.10.13+incompatible
github.com/docker/distribution v2.8.0+incompatible github.com/docker/distribution v2.8.0+incompatible
github.com/docker/docker v20.10.10+incompatible github.com/docker/docker v20.10.10+incompatible
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
@ -18,37 +19,35 @@ require (
github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml v1.0.0
github.com/go-sql-driver/mysql v1.6.0 // indirect github.com/go-sql-driver/mysql v1.6.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/go-containerregistry v0.7.0 github.com/google/go-containerregistry v0.7.0
github.com/google/renameio v1.0.0 github.com/google/renameio v1.0.0
github.com/google/uuid v1.3.0 // indirect
github.com/gookit/color v1.5.0 github.com/gookit/color v1.5.0
github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/go-version v1.3.0 github.com/hashicorp/go-version v1.3.0
github.com/huandu/xstrings v1.3.2 // indirect github.com/huandu/xstrings v1.3.2 // indirect
github.com/imdario/mergo v0.3.12 github.com/imdario/mergo v0.3.12
github.com/ipfs/go-log/v2 v2.4.0 github.com/ipfs/go-log/v2 v2.4.0
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
github.com/klauspost/compress v1.13.6 github.com/klauspost/compress v1.15.1
github.com/klauspost/pgzip v1.2.5 github.com/klauspost/pgzip v1.2.5
github.com/knqyf263/go-deb-version v0.0.0-20190517075300-09fca494f03d github.com/knqyf263/go-deb-version v0.0.0-20190517075300-09fca494f03d
github.com/kyokomi/emoji v2.1.0+incompatible github.com/kyokomi/emoji v2.1.0+incompatible
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0 github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0
github.com/mattn/go-isatty v0.0.14 github.com/mattn/go-isatty v0.0.14
github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.1 github.com/mitchellh/hashstructure/v2 v2.0.2
github.com/mitchellh/mapstructure v1.4.2 // indirect github.com/mitchellh/mapstructure v1.4.2 // indirect
github.com/moby/buildkit v0.10.1 // indirect
github.com/moby/moby v20.10.9+incompatible github.com/moby/moby v20.10.9+incompatible
github.com/moby/sys/mount v0.2.0 // indirect
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290 github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
github.com/onsi/ginkgo/v2 v2.0.0 github.com/onsi/ginkgo/v2 v2.0.0
github.com/onsi/gomega v1.17.0 github.com/onsi/gomega v1.17.0
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.0.2 github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578 github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578
github.com/pelletier/go-toml v1.9.4 // indirect github.com/pelletier/go-toml v1.9.4
github.com/peterbourgon/diskv v2.0.1+incompatible github.com/peterbourgon/diskv v2.0.1+incompatible
github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
@ -58,13 +57,11 @@ require (
github.com/spf13/cobra v1.2.1 github.com/spf13/cobra v1.2.1
github.com/spf13/viper v1.8.1 github.com/spf13/viper v1.8.1
github.com/theupdateframework/notary v0.7.0 github.com/theupdateframework/notary v0.7.0
go.etcd.io/bbolt v1.3.5 go.etcd.io/bbolt v1.3.6
go.uber.org/multierr v1.6.0 go.uber.org/multierr v1.6.0
go.uber.org/zap v1.17.0 go.uber.org/zap v1.17.0
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 // indirect
golang.org/x/mod v0.4.2 golang.org/x/mod v0.4.2
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
gopkg.in/ini.v1 v1.63.2 // indirect gopkg.in/ini.v1 v1.63.2 // indirect
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b

856
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -43,8 +43,6 @@ import (
"github.com/mudler/luet/pkg/api/core/image" "github.com/mudler/luet/pkg/api/core/image"
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
backend "github.com/mudler/luet/pkg/compiler/backend" backend "github.com/mudler/luet/pkg/compiler/backend"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
"github.com/mudler/luet/pkg/helpers" "github.com/mudler/luet/pkg/helpers"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
@ -58,17 +56,17 @@ import (
type PackageArtifact struct { type PackageArtifact struct {
Path string `json:"path"` Path string `json:"path"`
Dependencies []*PackageArtifact `json:"dependencies"` Dependencies []*PackageArtifact `json:"dependencies"`
CompileSpec *compilerspec.LuetCompilationSpec `json:"compilationspec"` CompileSpec *types.LuetCompilationSpec `json:"compilationspec"`
Checksums Checksums `json:"checksums"` Checksums Checksums `json:"checksums"`
SourceAssertion types.PackagesAssertions `json:"-"` SourceAssertion types.PackagesAssertions `json:"-"`
CompressionType compression.Implementation `json:"compressiontype"` CompressionType types.CompressionImplementation `json:"compressiontype"`
Files []string `json:"files"` Files []string `json:"files"`
PackageCacheImage string `json:"package_cacheimage"` PackageCacheImage string `json:"package_cacheimage"`
Runtime *types.Package `json:"runtime,omitempty"` Runtime *types.Package `json:"runtime,omitempty"`
} }
func ImageToArtifact(ctx types.Context, img v1.Image, t compression.Implementation, output string, filter func(h *tar.Header) (bool, error)) (*PackageArtifact, error) { func ImageToArtifact(ctx types.Context, img v1.Image, t types.CompressionImplementation, output string, filter func(h *tar.Header) (bool, error)) (*PackageArtifact, error) {
_, tmpdiffs, err := image.Extract(ctx, img, filter) _, tmpdiffs, err := image.Extract(ctx, img, filter)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs") return nil, errors.Wrap(err, "Error met while creating tempdir for rootfs")
@ -90,17 +88,12 @@ func (p *PackageArtifact) ShallowCopy() *PackageArtifact {
} }
func NewPackageArtifact(path string) *PackageArtifact { func NewPackageArtifact(path string) *PackageArtifact {
return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: compression.None} return &PackageArtifact{Path: path, Dependencies: []*PackageArtifact{}, Checksums: Checksums{}, CompressionType: types.None}
} }
func NewPackageArtifactFromYaml(data []byte) (*PackageArtifact, error) { func NewPackageArtifactFromYaml(data []byte) (*PackageArtifact, error) {
p := &PackageArtifact{Checksums: Checksums{}} p := &PackageArtifact{Checksums: Checksums{}}
err := yaml.Unmarshal(data, p) return p, yaml.Unmarshal(data, p)
if err != nil {
return p, err
}
return p, err
} }
func (a *PackageArtifact) Hash() error { func (a *PackageArtifact) Hash() error {
@ -147,7 +140,6 @@ func (a *PackageArtifact) WriteYAML(dst string) error {
if err != nil { if err != nil {
return errors.Wrap(err, "Generated invalid artifact") return errors.Wrap(err, "Generated invalid artifact")
} }
//p := a.CompileSpec.GetPackage().GetPath() //p := a.CompileSpec.GetPackage().GetPath()
mangle.CompileSpec.GetPackage().SetPath("") mangle.CompileSpec.GetPackage().SetPath("")
@ -233,7 +225,7 @@ func (a *PackageArtifact) GenerateFinalImage(ctx types.Context, imageName string
func (a *PackageArtifact) Compress(src string, concurrency int) error { func (a *PackageArtifact) Compress(src string, concurrency int) error {
switch a.CompressionType { switch a.CompressionType {
case compression.Zstandard: case types.Zstandard:
err := helpers.Tar(src, a.Path) err := helpers.Tar(src, a.Path)
if err != nil { if err != nil {
return err return err
@ -271,7 +263,7 @@ func (a *PackageArtifact) Compress(src string, concurrency int) error {
a.Path = zstdFile a.Path = zstdFile
return nil return nil
case compression.GZip: case types.GZip:
err := helpers.Tar(src, a.Path) err := helpers.Tar(src, a.Path)
if err != nil { if err != nil {
return err return err
@ -315,10 +307,10 @@ func (a *PackageArtifact) Compress(src string, concurrency int) error {
func (a *PackageArtifact) getCompressedName() string { func (a *PackageArtifact) getCompressedName() string {
switch a.CompressionType { switch a.CompressionType {
case compression.Zstandard: case types.Zstandard:
return a.Path + ".zst" return a.Path + ".zst"
case compression.GZip: case types.GZip:
return a.Path + ".gz" return a.Path + ".gz"
} }
return a.Path return a.Path
@ -327,7 +319,7 @@ func (a *PackageArtifact) getCompressedName() string {
// GetUncompressedName returns the artifact path without the extension suffix // GetUncompressedName returns the artifact path without the extension suffix
func (a *PackageArtifact) GetUncompressedName() string { func (a *PackageArtifact) GetUncompressedName() string {
switch a.CompressionType { switch a.CompressionType {
case compression.Zstandard, compression.GZip: case types.Zstandard, types.GZip:
return strings.TrimSuffix(a.Path, filepath.Ext(a.Path)) return strings.TrimSuffix(a.Path, filepath.Ext(a.Path))
} }
return a.Path return a.Path

View File

@ -21,14 +21,12 @@ import (
"path/filepath" "path/filepath"
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/api/core/context" "github.com/mudler/luet/pkg/api/core/context"
"github.com/mudler/luet/pkg/api/core/image" "github.com/mudler/luet/pkg/api/core/image"
. "github.com/mudler/luet/pkg/api/core/types/artifact" . "github.com/mudler/luet/pkg/api/core/types/artifact"
backend "github.com/mudler/luet/pkg/compiler/backend" backend "github.com/mudler/luet/pkg/compiler/backend"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/compiler/types/options"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
. "github.com/mudler/luet/pkg/compiler" . "github.com/mudler/luet/pkg/compiler"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
@ -50,7 +48,7 @@ var _ = Describe("Artifact", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1)) Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
cc := NewLuetCompiler(nil, generalRecipe.GetDatabase(), options.WithContext(context.NewContext())) cc := NewLuetCompiler(nil, generalRecipe.GetDatabase(), compiler.WithContext(context.NewContext()))
lspec, err := cc.FromPackage(&types.Package{Name: "enman", Category: "app-admin", Version: "1.4.0"}) lspec, err := cc.FromPackage(&types.Package{Name: "enman", Category: "app-admin", Version: "1.4.0"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -142,7 +140,7 @@ RUN echo bar > /test2`))
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar")) a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &types.Package{Name: "foo", Version: "1.0"}} a.CompileSpec = &types.LuetCompilationSpec{Package: &types.Package{Name: "foo", Version: "1.0"}}
err = a.Compress(tmpdir, 1) err = a.Compress(tmpdir, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -190,7 +188,7 @@ RUN echo bar > /test2`))
defer os.RemoveAll(tmpWork) // clean up defer os.RemoveAll(tmpWork) // clean up
a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar")) a := NewPackageArtifact(filepath.Join(tmpWork, "fake.tar"))
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &types.Package{Name: "foo", Version: "1.0"}} a.CompileSpec = &types.LuetCompilationSpec{Package: &types.Package{Name: "foo", Version: "1.0"}}
err = a.Compress(tmpdir, 1) err = a.Compress(tmpdir, 1)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -219,15 +217,15 @@ RUN echo bar > /test2`))
It("Retrieves uncompressed name", func() { It("Retrieves uncompressed name", func() {
a := NewPackageArtifact("foo.tar.gz") a := NewPackageArtifact("foo.tar.gz")
a.CompressionType = (compression.GZip) a.CompressionType = (types.GZip)
Expect(a.GetUncompressedName()).To(Equal("foo.tar")) Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
a = NewPackageArtifact("foo.tar.zst") a = NewPackageArtifact("foo.tar.zst")
a.CompressionType = compression.Zstandard a.CompressionType = types.Zstandard
Expect(a.GetUncompressedName()).To(Equal("foo.tar")) Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
a = NewPackageArtifact("foo.tar") a = NewPackageArtifact("foo.tar")
a.CompressionType = compression.None a.CompressionType = types.None
Expect(a.GetUncompressedName()).To(Equal("foo.tar")) Expect(a.GetUncompressedName()).To(Equal("foo.tar"))
}) })
}) })

View File

@ -24,7 +24,6 @@ import (
"github.com/mudler/luet/pkg/api/core/context" "github.com/mudler/luet/pkg/api/core/context"
. "github.com/mudler/luet/pkg/api/core/types/artifact" . "github.com/mudler/luet/pkg/api/core/types/artifact"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
@ -77,13 +76,13 @@ var _ = Describe("Cache", func() {
_, err = cache.Get(a) _, err = cache.Get(a)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
a.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &types.Package{Name: "foo", Category: "bar"}} a.CompileSpec = &types.LuetCompilationSpec{Package: &types.Package{Name: "foo", Category: "bar"}}
_, _, err = cache.Put(a) _, _, err = cache.Put(a)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
c := NewPackageArtifact(filepath.Join(tmpdir, "foo.tar.gz")) c := NewPackageArtifact(filepath.Join(tmpdir, "foo.tar.gz"))
c.Hash() c.Hash()
c.CompileSpec = &compilerspec.LuetCompilationSpec{Package: &types.Package{Name: "foo", Category: "bar"}} c.CompileSpec = &types.LuetCompilationSpec{Package: &types.Package{Name: "foo", Category: "bar"}}
_, err = cache.Get(c) _, err = cache.Get(c)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
}) })

View File

@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License along // You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>. // with this program; if not, see <http://www.gnu.org/licenses/>.
package compilerspec package types
import ( import (
"fmt" "fmt"
@ -21,8 +21,6 @@ import (
"path/filepath" "path/filepath"
"github.com/mitchellh/hashstructure/v2" "github.com/mitchellh/hashstructure/v2"
"github.com/mudler/luet/pkg/api/core/types"
options "github.com/mudler/luet/pkg/compiler/types/options"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
"github.com/otiai10/copy" "github.com/otiai10/copy"
@ -88,21 +86,29 @@ func (specs *LuetCompilationspecs) Unique() *LuetCompilationspecs {
} }
type CopyField struct { type CopyField struct {
Package *types.Package `json:"package"` Package *Package `json:"package"`
Image string `json:"image"` Image string `json:"image"`
Source string `json:"source"` Source string `json:"source"`
Destination string `json:"destination"` Destination string `json:"destination"`
} }
type CompressionImplementation string
const (
None CompressionImplementation = "none" // e.g. tar for standard packages
GZip CompressionImplementation = "gzip"
Zstandard CompressionImplementation = "zstd"
)
type LuetCompilationSpec struct { type LuetCompilationSpec struct {
Steps []string `json:"steps"` // Are run inside a container and the result layer diff is saved Steps []string `json:"steps"` // Are run inside a container and the result layer diff is saved
Env []string `json:"env"` Env []string `json:"env"`
Prelude []string `json:"prelude"` // Are run inside the image which will be our builder Prelude []string `json:"prelude"` // Are run inside the image which will be our builder
Image string `json:"image"` Image string `json:"image"`
Seed string `json:"seed"` Seed string `json:"seed"`
Package *types.Package `json:"package"` Package *Package `json:"package"`
SourceAssertion types.PackagesAssertions `json:"-"` SourceAssertion PackagesAssertions `json:"-"`
PackageDir string `json:"package_dir" yaml:"package_dir"` PackageDir string `json:"package_dir" yaml:"package_dir"`
Retrieve []string `json:"retrieve"` Retrieve []string `json:"retrieve"`
@ -111,7 +117,7 @@ type LuetCompilationSpec struct {
Includes []string `json:"includes"` Includes []string `json:"includes"`
Excludes []string `json:"excludes"` Excludes []string `json:"excludes"`
BuildOptions *options.Compiler `json:"build_options"` BuildOptions *CompilerOptions `json:"build_options"`
Copy []CopyField `json:"copy"` Copy []CopyField `json:"copy"`
@ -131,8 +137,60 @@ type Signature struct {
Includes []string Includes []string
Excludes []string Excludes []string
Copy []CopyField Copy []CopyField
Requires types.Packages Requires Packages
RequiresFinalImages bool RequiresFinalImages bool
Dockerfile string
}
type CompilerOptions struct {
PushImageRepository string
PullImageRepository []string
PullFirst, KeepImg, Push bool
Concurrency int
CompressionType CompressionImplementation
Wait bool
OnlyDeps bool
NoDeps bool
SolverOptions LuetSolverOptions
BuildValuesFile []string
BuildValues []map[string]interface{}
PackageTargetOnly bool
Rebuild bool
BackendArgs []string
BackendType string
// TemplatesFolder. should default to tree/templates
TemplatesFolder []string
// Tells wether to push final container images after building
PushFinalImages bool
PushFinalImagesForce bool
GenerateFinalImages bool
// Image repository to push to
PushFinalImagesRepository string
RuntimeDatabase PackageDatabase
Context Context
}
type CompilerOption func(cfg *CompilerOptions) error
func (cfg *CompilerOptions) Apply(opts ...CompilerOption) error {
for _, opt := range opts {
if opt == nil {
continue
}
if err := opt(cfg); err != nil {
return err
}
}
return nil
} }
func (cs *LuetCompilationSpec) signature() Signature { func (cs *LuetCompilationSpec) signature() Signature {
@ -149,13 +207,14 @@ func (cs *LuetCompilationSpec) signature() Signature {
Excludes: cs.Excludes, Excludes: cs.Excludes,
Copy: cs.Copy, Copy: cs.Copy,
Requires: cs.Package.GetRequires(), Requires: cs.Package.GetRequires(),
Dockerfile: cs.Package.OriginDockerfile,
RequiresFinalImages: cs.RequiresFinalImages, RequiresFinalImages: cs.RequiresFinalImages,
} }
} }
func NewLuetCompilationSpec(b []byte, p *types.Package) (*LuetCompilationSpec, error) { func NewLuetCompilationSpec(b []byte, p *Package) (*LuetCompilationSpec, error) {
var spec LuetCompilationSpec var spec LuetCompilationSpec
var packageDefinition types.Package var packageDefinition Package
err := yaml.Unmarshal(b, &spec) err := yaml.Unmarshal(b, &spec)
if err != nil { if err != nil {
return &spec, err return &spec, err
@ -182,18 +241,18 @@ func NewLuetCompilationSpec(b []byte, p *types.Package) (*LuetCompilationSpec, e
} }
return &spec, nil return &spec, nil
} }
func (cs *LuetCompilationSpec) GetSourceAssertion() types.PackagesAssertions { func (cs *LuetCompilationSpec) GetSourceAssertion() PackagesAssertions {
return cs.SourceAssertion return cs.SourceAssertion
} }
func (cs *LuetCompilationSpec) SetBuildOptions(b options.Compiler) { func (cs *LuetCompilationSpec) SetBuildOptions(b CompilerOptions) {
cs.BuildOptions = &b cs.BuildOptions = &b
} }
func (cs *LuetCompilationSpec) SetSourceAssertion(as types.PackagesAssertions) { func (cs *LuetCompilationSpec) SetSourceAssertion(as PackagesAssertions) {
cs.SourceAssertion = as cs.SourceAssertion = as
} }
func (cs *LuetCompilationSpec) GetPackage() *types.Package { func (cs *LuetCompilationSpec) GetPackage() *Package {
return cs.Package return cs.Package
} }
@ -264,7 +323,7 @@ func (cs *LuetCompilationSpec) SetSeedImage(s string) {
} }
func (cs *LuetCompilationSpec) EmptyPackage() bool { func (cs *LuetCompilationSpec) EmptyPackage() bool {
return len(cs.BuildSteps()) == 0 && !cs.UnpackedPackage() return len(cs.BuildSteps()) == 0 && !cs.UnpackedPackage() && (cs.Package != nil && cs.Package.OriginDockerfile == "" || cs.Package == nil)
} }
func (cs *LuetCompilationSpec) UnpackedPackage() bool { func (cs *LuetCompilationSpec) UnpackedPackage() bool {
@ -369,17 +428,27 @@ func (cs *LuetCompilationSpec) RenderStepImage(image string) (string, error) {
} }
func (cs *LuetCompilationSpec) WriteBuildImageDefinition(path string) error { func (cs *LuetCompilationSpec) WriteBuildImageDefinition(path string) error {
data, err := cs.RenderBuildImage() data, err := cs.RenderBuildImage()
if err != nil { if err != nil {
return err return err
} }
return ioutil.WriteFile(path, []byte(data), 0644) return ioutil.WriteFile(path, []byte(data), 0644)
} }
func (cs *LuetCompilationSpec) WriteStepImageDefinition(fromimage, path string) error { func (cs *LuetCompilationSpec) WriteStepImageDefinition(fromimage, path string) error {
data, err := cs.RenderStepImage(fromimage) var data string
if err != nil { var err error
return err if cs.Package.OriginDockerfile != "" {
// pre-rendered
data = cs.Package.OriginDockerfile
} else {
data, err = cs.RenderStepImage(fromimage)
if err != nil {
return err
}
} }
return ioutil.WriteFile(path, []byte(data), 0644) return ioutil.WriteFile(path, []byte(data), 0644)
} }

View File

@ -245,6 +245,8 @@ type Package struct {
Labels map[string]string `json:"labels,omitempty"` // Affects YAML field names too. Labels map[string]string `json:"labels,omitempty"` // Affects YAML field names too.
TreeDir string `json:"treedir,omitempty"` TreeDir string `json:"treedir,omitempty"`
OriginDockerfile string `json:"dockerfile,omitempty"`
} }
// State represent the package state // State represent the package state
@ -267,6 +269,17 @@ func (p *Package) SetTreeDir(s string) {
func (p *Package) GetTreeDir() string { func (p *Package) GetTreeDir() string {
return p.TreeDir return p.TreeDir
} }
func (p *Package) SetOriginalDockerfile(s string) error {
dat, err := ioutil.ReadFile(s)
if err != nil {
return errors.Wrap(err, "Error reading file "+s)
}
p.OriginDockerfile = string(dat)
return nil
}
func (p *Package) String() string { func (p *Package) String() string {
b, err := p.JSON() b, err := p.JSON()
if err != nil { if err != nil {
@ -712,6 +725,10 @@ func (p *Package) GetRuntimePackage() (*Package, error) {
break break
} }
} }
} else if p.OriginDockerfile != "" {
// XXX: There are no runtime metadata at the moment available except package name in this case
// This needs to be adapted and aligned up with the tree parser
return &Package{Name: p.Name}, nil
} else { } else {
definitionFile := filepath.Join(p.Path, PackageDefinitionFile) definitionFile := filepath.Join(p.Path, PackageDefinitionFile)
dat, err := ioutil.ReadFile(definitionFile) dat, err := ioutil.ReadFile(definitionFile)

View File

@ -13,7 +13,7 @@
// You should have received a copy of the GNU General Public License along // You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>. // with this program; if not, see <http://www.gnu.org/licenses/>.
package compilerspec_test package types_test
import ( import (
"io/ioutil" "io/ioutil"
@ -21,69 +21,68 @@ import (
"path/filepath" "path/filepath"
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
. "github.com/mudler/luet/pkg/api/core/types"
options "github.com/mudler/luet/pkg/compiler/types/options"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
. "github.com/mudler/luet/pkg/compiler" . "github.com/mudler/luet/pkg/compiler"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
"github.com/mudler/luet/pkg/tree" "github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo/v2" ginkgo "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
var _ = Describe("Spec", func() { var _ = ginkgo.Describe("Spec", func() {
Context("Luet specs", func() { ginkgo.Context("Luet specs", func() {
It("Allows normal operations", func() { ginkgo.It("Allows normal operations", func() {
testSpec := &compilerspec.LuetCompilationSpec{Package: &types.Package{Name: "foo", Category: "a", Version: "0"}} testSpec := &LuetCompilationSpec{Package: &Package{Name: "foo", Category: "a", Version: "0"}}
testSpec2 := &compilerspec.LuetCompilationSpec{Package: &types.Package{Name: "bar", Category: "a", Version: "0"}} testSpec2 := &LuetCompilationSpec{Package: &Package{Name: "bar", Category: "a", Version: "0"}}
testSpec3 := &compilerspec.LuetCompilationSpec{Package: &types.Package{Name: "baz", Category: "a", Version: "0"}} testSpec3 := &LuetCompilationSpec{Package: &Package{Name: "baz", Category: "a", Version: "0"}}
testSpec4 := &compilerspec.LuetCompilationSpec{Package: &types.Package{Name: "foo", Category: "a", Version: "0"}} testSpec4 := &LuetCompilationSpec{Package: &Package{Name: "foo", Category: "a", Version: "0"}}
specs := compilerspec.NewLuetCompilationspecs(testSpec, testSpec2) specs := NewLuetCompilationspecs(testSpec, testSpec2)
Expect(specs.Len()).To(Equal(2)) Expect(specs.Len()).To(Equal(2))
Expect(specs.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2})) Expect(specs.All()).To(Equal([]*LuetCompilationSpec{testSpec, testSpec2}))
specs.Add(testSpec3) specs.Add(testSpec3)
Expect(specs.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2, testSpec3})) Expect(specs.All()).To(Equal([]*LuetCompilationSpec{testSpec, testSpec2, testSpec3}))
specs.Add(testSpec4) specs.Add(testSpec4)
Expect(specs.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2, testSpec3, testSpec4})) Expect(specs.All()).To(Equal([]*LuetCompilationSpec{testSpec, testSpec2, testSpec3, testSpec4}))
newSpec := specs.Unique() newSpec := specs.Unique()
Expect(newSpec.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec, testSpec2, testSpec3})) Expect(newSpec.All()).To(Equal([]*LuetCompilationSpec{testSpec, testSpec2, testSpec3}))
newSpec2 := specs.Remove(compilerspec.NewLuetCompilationspecs(testSpec, testSpec2)) newSpec2 := specs.Remove(NewLuetCompilationspecs(testSpec, testSpec2))
Expect(newSpec2.All()).To(Equal([]*compilerspec.LuetCompilationSpec{testSpec3})) Expect(newSpec2.All()).To(Equal([]*LuetCompilationSpec{testSpec3}))
}) })
Context("virtuals", func() { ginkgo.Context("virtuals", func() {
When("is empty", func() { ginkgo.When("is empty", func() {
It("is virtual", func() { ginkgo.It("is virtual", func() {
spec := &compilerspec.LuetCompilationSpec{} spec := &LuetCompilationSpec{}
Expect(spec.IsVirtual()).To(BeTrue()) Expect(spec.IsVirtual()).To(BeTrue())
}) })
}) })
When("has defined steps", func() { ginkgo.When("has defined steps", func() {
It("is not a virtual", func() { ginkgo.It("is not a virtual", func() {
spec := &compilerspec.LuetCompilationSpec{Steps: []string{"foo"}} spec := &LuetCompilationSpec{Steps: []string{"foo"}}
Expect(spec.IsVirtual()).To(BeFalse()) Expect(spec.IsVirtual()).To(BeFalse())
}) })
}) })
When("has defined image", func() { ginkgo.When("has defined image", func() {
It("is not a virtual", func() { ginkgo.It("is not a virtual", func() {
spec := &compilerspec.LuetCompilationSpec{Image: "foo"} spec := &LuetCompilationSpec{Image: "foo"}
Expect(spec.IsVirtual()).To(BeFalse()) Expect(spec.IsVirtual()).To(BeFalse())
}) })
}) })
}) })
}) })
Context("Image hashing", func() { ginkgo.Context("Image hashing", func() {
It("is stable", func() { ginkgo.It("is stable", func() {
spec1 := &compilerspec.LuetCompilationSpec{ spec1 := &LuetCompilationSpec{
Image: "foo", Image: "foo",
BuildOptions: &options.Compiler{BuildValues: []map[string]interface{}{{"foo": "bar", "baz": true}}}, BuildOptions: &types.CompilerOptions{BuildValues: []map[string]interface{}{{"foo": "bar", "baz": true}}},
Package: &types.Package{ Package: &Package{
Name: "foo", Name: "foo",
Category: "Bar", Category: "Bar",
Labels: map[string]string{ Labels: map[string]string{
@ -92,10 +91,10 @@ var _ = Describe("Spec", func() {
}, },
}, },
} }
spec2 := &compilerspec.LuetCompilationSpec{ spec2 := &LuetCompilationSpec{
Image: "foo", Image: "foo",
BuildOptions: &options.Compiler{BuildValues: []map[string]interface{}{{"foo": "bar", "baz": true}}}, BuildOptions: &types.CompilerOptions{BuildValues: []map[string]interface{}{{"foo": "bar", "baz": true}}},
Package: &types.Package{ Package: &Package{
Name: "foo", Name: "foo",
Category: "Bar", Category: "Bar",
Labels: map[string]string{ Labels: map[string]string{
@ -104,10 +103,10 @@ var _ = Describe("Spec", func() {
}, },
}, },
} }
spec3 := &compilerspec.LuetCompilationSpec{ spec3 := &LuetCompilationSpec{
Image: "foo", Image: "foo",
Steps: []string{"foo"}, Steps: []string{"foo"},
Package: &types.Package{ Package: &Package{
Name: "foo", Name: "foo",
Category: "Bar", Category: "Bar",
Labels: map[string]string{ Labels: map[string]string{
@ -133,8 +132,8 @@ var _ = Describe("Spec", func() {
}) })
}) })
Context("Simple package build definition", func() { ginkgo.Context("Simple package build definition", func() {
It("Loads it correctly", func() { ginkgo.It("Loads it correctly", func() {
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false)) generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../../../tests/fixtures/buildtree") err := generalRecipe.Load("../../../../tests/fixtures/buildtree")
@ -143,7 +142,7 @@ var _ = Describe("Spec", func() {
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1)) Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase()) compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase())
lspec, err := compiler.FromPackage(&types.Package{Name: "enman", Category: "app-admin", Version: "1.4.0"}) lspec, err := compiler.FromPackage(&Package{Name: "enman", Category: "app-admin", Version: "1.4.0"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"})) Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))
@ -186,7 +185,7 @@ RUN echo bar > /test2`))
}) })
It("Renders retrieve and env fields", func() { ginkgo.It("Renders retrieve and env fields", func() {
generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false)) generalRecipe := tree.NewGeneralRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../../../tests/fixtures/retrieve") err := generalRecipe.Load("../../../../tests/fixtures/retrieve")
@ -195,7 +194,7 @@ RUN echo bar > /test2`))
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1)) Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(1))
compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase()) compiler := NewLuetCompiler(nil, generalRecipe.GetDatabase())
lspec, err := compiler.FromPackage(&types.Package{Name: "a", Category: "test", Version: "1.0"}) lspec, err := compiler.FromPackage(&Package{Name: "a", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"})) Expect(lspec.Steps).To(Equal([]string{"echo foo > /test", "echo bar > /test2"}))

View File

@ -16,6 +16,7 @@
package compiler package compiler
import ( import (
"bytes"
"crypto/md5" "crypto/md5"
"fmt" "fmt"
"io" "io"
@ -29,6 +30,8 @@ import (
"sync" "sync"
"time" "time"
dockerfile "github.com/asottile/dockerfile"
"github.com/imdario/mergo"
bus "github.com/mudler/luet/pkg/api/core/bus" bus "github.com/mudler/luet/pkg/api/core/bus"
"github.com/mudler/luet/pkg/api/core/context" "github.com/mudler/luet/pkg/api/core/context"
"github.com/mudler/luet/pkg/api/core/image" "github.com/mudler/luet/pkg/api/core/image"
@ -36,14 +39,10 @@ import (
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
artifact "github.com/mudler/luet/pkg/api/core/types/artifact" artifact "github.com/mudler/luet/pkg/api/core/types/artifact"
"github.com/mudler/luet/pkg/compiler/backend" "github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/options"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
"github.com/mudler/luet/pkg/helpers" "github.com/mudler/luet/pkg/helpers"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
"github.com/mudler/luet/pkg/solver" "github.com/mudler/luet/pkg/solver"
"github.com/imdario/mergo"
"github.com/pkg/errors" "github.com/pkg/errors"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
@ -68,17 +67,17 @@ type LuetCompiler struct {
//*tree.CompilerRecipe //*tree.CompilerRecipe
Backend CompilerBackend Backend CompilerBackend
Database types.PackageDatabase Database types.PackageDatabase
Options options.Compiler Options types.CompilerOptions
} }
func NewCompiler(p ...options.Option) *LuetCompiler { func NewCompiler(p ...types.CompilerOption) *LuetCompiler {
c := options.NewDefaultCompiler() c := newDefaultCompiler()
c.Apply(p...) c.Apply(p...)
return &LuetCompiler{Options: *c} return &LuetCompiler{Options: *c}
} }
func NewLuetCompiler(backend CompilerBackend, db types.PackageDatabase, compilerOpts ...options.Option) *LuetCompiler { func NewLuetCompiler(backend CompilerBackend, db types.PackageDatabase, compilerOpts ...types.CompilerOption) *LuetCompiler {
// The CompilerRecipe will gives us a tree with only build deps listed. // The CompilerRecipe will gives us a tree with only build deps listed.
c := NewCompiler(compilerOpts...) c := NewCompiler(compilerOpts...)
@ -92,7 +91,7 @@ func NewLuetCompiler(backend CompilerBackend, db types.PackageDatabase, compiler
return c return c
} }
func (cs *LuetCompiler) compilerWorker(i int, wg *sync.WaitGroup, cspecs chan *compilerspec.LuetCompilationSpec, a *[]*artifact.PackageArtifact, m *sync.Mutex, concurrency int, keepPermissions bool, errors chan error) { func (cs *LuetCompiler) compilerWorker(i int, wg *sync.WaitGroup, cspecs chan *types.LuetCompilationSpec, a *[]*artifact.PackageArtifact, m *sync.Mutex, concurrency int, keepPermissions bool, errors chan error) {
defer wg.Done() defer wg.Done()
for s := range cspecs { for s := range cspecs {
@ -108,14 +107,14 @@ func (cs *LuetCompiler) compilerWorker(i int, wg *sync.WaitGroup, cspecs chan *c
} }
// CompileWithReverseDeps compiles the supplied compilationspecs and their reverse dependencies // CompileWithReverseDeps compiles the supplied compilationspecs and their reverse dependencies
func (cs *LuetCompiler) CompileWithReverseDeps(keepPermissions bool, ps *compilerspec.LuetCompilationspecs) ([]*artifact.PackageArtifact, []error) { func (cs *LuetCompiler) CompileWithReverseDeps(keepPermissions bool, ps *types.LuetCompilationspecs) ([]*artifact.PackageArtifact, []error) {
artifacts, err := cs.CompileParallel(keepPermissions, ps) artifacts, err := cs.CompileParallel(keepPermissions, ps)
if len(err) != 0 { if len(err) != 0 {
return artifacts, err return artifacts, err
} }
cs.Options.Context.Info(":ant: Resolving reverse dependencies") cs.Options.Context.Info(":ant: Resolving reverse dependencies")
toCompile := compilerspec.NewLuetCompilationspecs() toCompile := types.NewLuetCompilationspecs()
for _, a := range artifacts { for _, a := range artifacts {
revdeps := a.CompileSpec.GetPackage().Revdeps(cs.Database) revdeps := a.CompileSpec.GetPackage().Revdeps(cs.Database)
@ -141,8 +140,8 @@ func (cs *LuetCompiler) CompileWithReverseDeps(keepPermissions bool, ps *compile
// CompileParallel compiles the supplied compilationspecs in parallel // CompileParallel compiles the supplied compilationspecs in parallel
// to note, no specific heuristic is implemented, and the specs are run in parallel as they are. // to note, no specific heuristic is implemented, and the specs are run in parallel as they are.
func (cs *LuetCompiler) CompileParallel(keepPermissions bool, ps *compilerspec.LuetCompilationspecs) ([]*artifact.PackageArtifact, []error) { func (cs *LuetCompiler) CompileParallel(keepPermissions bool, ps *types.LuetCompilationspecs) ([]*artifact.PackageArtifact, []error) {
all := make(chan *compilerspec.LuetCompilationSpec) all := make(chan *types.LuetCompilationSpec)
artifacts := []*artifact.PackageArtifact{} artifacts := []*artifact.PackageArtifact{}
mutex := &sync.Mutex{} mutex := &sync.Mutex{}
errors := make(chan error, ps.Len()) errors := make(chan error, ps.Len())
@ -227,7 +226,7 @@ func (cs *LuetCompiler) stripFromRootfs(includes []string, rootfs string, includ
return nil return nil
} }
func (cs *LuetCompiler) unpackFs(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec, runnerOpts backend.Options) (*artifact.PackageArtifact, error) { func (cs *LuetCompiler) unpackFs(concurrency int, keepPermissions bool, p *types.LuetCompilationSpec, runnerOpts backend.Options) (*artifact.PackageArtifact, error) {
if !cs.Backend.ImageExists(runnerOpts.ImageName) { if !cs.Backend.ImageExists(runnerOpts.ImageName) {
if err := cs.Backend.DownloadImage(runnerOpts); err != nil { if err := cs.Backend.DownloadImage(runnerOpts); err != nil {
@ -274,7 +273,7 @@ func (cs *LuetCompiler) unpackFs(concurrency int, keepPermissions bool, p *compi
return a, nil return a, nil
} }
func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec, builderOpts, runnerOpts backend.Options) (*artifact.PackageArtifact, error) { func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p *types.LuetCompilationSpec, builderOpts, runnerOpts backend.Options) (*artifact.PackageArtifact, error) {
rootfs, err := cs.Options.Context.TempDir("rootfs") rootfs, err := cs.Options.Context.TempDir("rootfs")
if err != nil { if err != nil {
@ -339,7 +338,7 @@ func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p *co
func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImage string, func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImage string,
concurrency int, keepPermissions bool, concurrency int, keepPermissions bool,
p *compilerspec.LuetCompilationSpec) (backend.Options, backend.Options, error) { p *types.LuetCompilationSpec) (backend.Options, backend.Options, error) {
var runnerOpts, builderOpts backend.Options var runnerOpts, builderOpts backend.Options
@ -449,7 +448,7 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
return builderOpts, runnerOpts, nil return builderOpts, runnerOpts, nil
} }
func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builderOpts, runnerOpts backend.Options, concurrency int, keepPermissions bool) (*artifact.PackageArtifact, error) { func (cs *LuetCompiler) genArtifact(p *types.LuetCompilationSpec, builderOpts, runnerOpts backend.Options, concurrency int, keepPermissions bool) (*artifact.PackageArtifact, error) {
// generate *artifact.PackageArtifact // generate *artifact.PackageArtifact
var a *artifact.PackageArtifact var a *artifact.PackageArtifact
@ -526,7 +525,7 @@ func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builder
} }
// finalizeImages finalizes images and generates final artifacts (push them as well if necessary). // finalizeImages finalizes images and generates final artifacts (push them as well if necessary).
func (cs *LuetCompiler) finalizeImages(a *artifact.PackageArtifact, p *compilerspec.LuetCompilationSpec, keepPermissions bool) error { func (cs *LuetCompiler) finalizeImages(a *artifact.PackageArtifact, p *types.LuetCompilationSpec, keepPermissions bool) error {
// TODO: This is a small readaptation of repository_docker.go pushImageFromArtifact(). // TODO: This is a small readaptation of repository_docker.go pushImageFromArtifact().
// Maybe can be moved to a common place. // Maybe can be moved to a common place.
@ -626,7 +625,7 @@ func oneOfImagesAvailable(images []string, b CompilerBackend) (bool, string) {
return false, "" return false, ""
} }
func (cs *LuetCompiler) findImageHash(imageHash string, p *compilerspec.LuetCompilationSpec) string { func (cs *LuetCompiler) findImageHash(imageHash string, p *types.LuetCompilationSpec) string {
var resolvedImage string var resolvedImage string
cs.Options.Context.Debug("Resolving image hash for", p.Package.HumanReadableString(), "hash", imageHash, "Pull repositories", p.BuildOptions.PullImageRepository) cs.Options.Context.Debug("Resolving image hash for", p.Package.HumanReadableString(), "hash", imageHash, "Pull repositories", p.BuildOptions.PullImageRepository)
toChecklist := append([]string{fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, imageHash)}, toChecklist := append([]string{fmt.Sprintf("%s:%s", cs.Options.PushImageRepository, imageHash)},
@ -646,7 +645,7 @@ func (cs *LuetCompiler) findImageHash(imageHash string, p *compilerspec.LuetComp
return resolvedImage return resolvedImage
} }
func (cs *LuetCompiler) resolveExistingImageHash(imageHash string, p *compilerspec.LuetCompilationSpec) string { func (cs *LuetCompiler) resolveExistingImageHash(imageHash string, p *types.LuetCompilationSpec) string {
resolvedImage := cs.findImageHash(imageHash, p) resolvedImage := cs.findImageHash(imageHash, p)
if resolvedImage == "" { if resolvedImage == "" {
@ -655,7 +654,7 @@ func (cs *LuetCompiler) resolveExistingImageHash(imageHash string, p *compilersp
return resolvedImage return resolvedImage
} }
func LoadArtifactFromYaml(spec *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) { func LoadArtifactFromYaml(spec *types.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
metaFile := spec.GetPackage().GetMetadataFilePath() metaFile := spec.GetPackage().GetMetadataFilePath()
dat, err := ioutil.ReadFile(spec.Rel(metaFile)) dat, err := ioutil.ReadFile(spec.Rel(metaFile))
if err != nil { if err != nil {
@ -670,7 +669,7 @@ func LoadArtifactFromYaml(spec *compilerspec.LuetCompilationSpec) (*artifact.Pac
return art, nil return art, nil
} }
func (cs *LuetCompiler) getImageArtifact(hash string, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) { func (cs *LuetCompiler) getImageArtifact(hash string, p *types.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
// we check if there is an available image with the given hash and // we check if there is an available image with the given hash and
// we return a full artifact if can be loaded locally. // we return a full artifact if can be loaded locally.
cs.Options.Context.Debug("Get image artifact for", p.Package.HumanReadableString(), "hash", hash, "Pull repositories", p.BuildOptions.PullImageRepository) cs.Options.Context.Debug("Get image artifact for", p.Package.HumanReadableString(), "hash", hash, "Pull repositories", p.BuildOptions.PullImageRepository)
@ -700,7 +699,7 @@ func (cs *LuetCompiler) getImageArtifact(hash string, p *compilerspec.LuetCompil
func (cs *LuetCompiler) compileWithImage(image, builderHash string, packageTagHash string, func (cs *LuetCompiler) compileWithImage(image, builderHash string, packageTagHash string,
concurrency int, concurrency int,
keepPermissions, keepImg bool, keepPermissions, keepImg bool,
p *compilerspec.LuetCompilationSpec, generateArtifact bool) (*artifact.PackageArtifact, error) { p *types.LuetCompilationSpec, generateArtifact bool) (*artifact.PackageArtifact, error) {
// If it is a virtual, check if we have to generate an empty artifact or not. // If it is a virtual, check if we have to generate an empty artifact or not.
if generateArtifact && p.IsVirtual() { if generateArtifact && p.IsVirtual() {
@ -781,8 +780,8 @@ func (cs *LuetCompiler) compileWithImage(image, builderHash string, packageTagHa
// FromDatabase returns all the available compilation specs from a database. If the minimum flag is returned // FromDatabase returns all the available compilation specs from a database. If the minimum flag is returned
// it will be computed a minimal subset that will guarantees that all packages are built ( if not targeting a single package explictly ) // it will be computed a minimal subset that will guarantees that all packages are built ( if not targeting a single package explictly )
func (cs *LuetCompiler) FromDatabase(db types.PackageDatabase, minimum bool, dst string) ([]*compilerspec.LuetCompilationSpec, error) { func (cs *LuetCompiler) FromDatabase(db types.PackageDatabase, minimum bool, dst string) ([]*types.LuetCompilationSpec, error) {
compilerSpecs := compilerspec.NewLuetCompilationspecs() compilerSpecs := types.NewLuetCompilationspecs()
w := db.World() w := db.World()
@ -805,7 +804,7 @@ func (cs *LuetCompiler) FromDatabase(db types.PackageDatabase, minimum bool, dst
} }
} }
func (cs *LuetCompiler) ComputeDepTree(p *compilerspec.LuetCompilationSpec, db types.PackageDatabase) (types.PackagesAssertions, error) { func (cs *LuetCompiler) ComputeDepTree(p *types.LuetCompilationSpec, db types.PackageDatabase) (types.PackagesAssertions, error) {
s := solver.NewResolver(cs.Options.SolverOptions.SolverOptions, pkg.NewInMemoryDatabase(false), db, pkg.NewInMemoryDatabase(false), solver.NewSolverFromOptions(cs.Options.SolverOptions)) s := solver.NewResolver(cs.Options.SolverOptions.SolverOptions, pkg.NewInMemoryDatabase(false), db, pkg.NewInMemoryDatabase(false), solver.NewSolverFromOptions(cs.Options.SolverOptions))
solution, err := s.Install(types.Packages{p.GetPackage()}) solution, err := s.Install(types.Packages{p.GetPackage()})
@ -826,7 +825,7 @@ func (cs *LuetCompiler) ComputeDepTree(p *compilerspec.LuetCompilationSpec, db t
// for _, l := range bt.AllLevels() { // for _, l := range bt.AllLevels() {
// fmt.Println(strings.Join(bt.AllInLevel(l), " ")) // fmt.Println(strings.Join(bt.AllInLevel(l), " "))
// } // }
func (cs *LuetCompiler) BuildTree(compilerSpecs compilerspec.LuetCompilationspecs) (*BuildTree, error) { func (cs *LuetCompiler) BuildTree(compilerSpecs types.LuetCompilationspecs) (*BuildTree, error) {
compilationTree := map[string]map[string]interface{}{} compilationTree := map[string]map[string]interface{}{}
bt := &BuildTree{} bt := &BuildTree{}
@ -866,11 +865,11 @@ func (cs *LuetCompiler) BuildTree(compilerSpecs compilerspec.LuetCompilationspec
} }
// ComputeMinimumCompilableSet strips specs that are eventually compiled by leafs // ComputeMinimumCompilableSet strips specs that are eventually compiled by leafs
func (cs *LuetCompiler) ComputeMinimumCompilableSet(p ...*compilerspec.LuetCompilationSpec) ([]*compilerspec.LuetCompilationSpec, error) { func (cs *LuetCompiler) ComputeMinimumCompilableSet(p ...*types.LuetCompilationSpec) ([]*types.LuetCompilationSpec, error) {
// Generate a set with all the deps of the provided specs // Generate a set with all the deps of the provided specs
// we will use that set to remove the deps from the list of provided compilation specs // we will use that set to remove the deps from the list of provided compilation specs
allDependencies := types.PackagesAssertions{} // Get all packages that will be in deps allDependencies := types.PackagesAssertions{} // Get all packages that will be in deps
result := []*compilerspec.LuetCompilationSpec{} result := []*types.LuetCompilationSpec{}
for _, spec := range p { for _, spec := range p {
sol, err := cs.ComputeDepTree(spec, cs.Database) sol, err := cs.ComputeDepTree(spec, cs.Database)
if err != nil { if err != nil {
@ -889,7 +888,7 @@ func (cs *LuetCompiler) ComputeMinimumCompilableSet(p ...*compilerspec.LuetCompi
// Compile is a non-parallel version of CompileParallel. It builds the compilation specs and generates // Compile is a non-parallel version of CompileParallel. It builds the compilation specs and generates
// an artifact // an artifact
func (cs *LuetCompiler) Compile(keepPermissions bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) { func (cs *LuetCompiler) Compile(keepPermissions bool, p *types.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
return cs.compile(cs.Options.Concurrency, keepPermissions, nil, nil, p) return cs.compile(cs.Options.Concurrency, keepPermissions, nil, nil, p)
} }
@ -901,7 +900,7 @@ func genImageList(refs []string, hash string) []string {
return res return res
} }
func (cs *LuetCompiler) inheritSpecBuildOptions(p *compilerspec.LuetCompilationSpec) { func (cs *LuetCompiler) inheritSpecBuildOptions(p *types.LuetCompilationSpec) {
cs.Options.Context.Debug(p.GetPackage().HumanReadableString(), "Build options before inherit", p.BuildOptions) cs.Options.Context.Debug(p.GetPackage().HumanReadableString(), "Build options before inherit", p.BuildOptions)
// Append push repositories from buildpsec buildoptions as pull if found. // Append push repositories from buildpsec buildoptions as pull if found.
@ -940,7 +939,7 @@ func (cs *LuetCompiler) getSpecHash(pkgs types.Packages, salt string) (string, e
return fmt.Sprintf("%x", h.Sum(nil)), nil return fmt.Sprintf("%x", h.Sum(nil)), nil
} }
func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) error { func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool, p *types.LuetCompilationSpec) error {
if !p.RequiresFinalImages { if !p.RequiresFinalImages {
return nil return nil
} }
@ -1102,8 +1101,8 @@ func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool
return nil return nil
} }
func (cs *LuetCompiler) resolveMultiStageImages(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) error { func (cs *LuetCompiler) resolveMultiStageImages(concurrency int, keepPermissions bool, p *types.LuetCompilationSpec) error {
resolvedCopyFields := []compilerspec.CopyField{} resolvedCopyFields := []types.CopyField{}
copyTag := ">:droplet: copy<" copyTag := ">:droplet: copy<"
if len(p.Copy) != 0 { if len(p.Copy) != 0 {
@ -1131,7 +1130,7 @@ func (cs *LuetCompiler) resolveMultiStageImages(concurrency int, keepPermissions
return errors.Wrap(err, "failed building multi-stage image") return errors.Wrap(err, "failed building multi-stage image")
} }
resolvedCopyFields = append(resolvedCopyFields, compilerspec.CopyField{ resolvedCopyFields = append(resolvedCopyFields, types.CopyField{
Image: cs.resolveExistingImageHash(artifact.PackageCacheImage, spec), Image: cs.resolveExistingImageHash(artifact.PackageCacheImage, spec),
Source: c.Source, Source: c.Source,
Destination: c.Destination, Destination: c.Destination,
@ -1175,7 +1174,7 @@ func CompilerFinalImages(cs *LuetCompiler) (*LuetCompiler, error) {
return copy, nil return copy, nil
} }
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateFinalArtifact *bool, generateDependenciesFinalArtifact *bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) { func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateFinalArtifact *bool, generateDependenciesFinalArtifact *bool, p *types.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
cs.Options.Context.Info(":package: Compiling", p.GetPackage().HumanReadableString(), ".... :coffee:") cs.Options.Context.Info(":package: Compiling", p.GetPackage().HumanReadableString(), ".... :coffee:")
//Before multistage : join - same as multistage, but keep artifacts, join them, create a new one and generate a final image. //Before multistage : join - same as multistage, but keep artifacts, join them, create a new one and generate a final image.
@ -1212,7 +1211,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
targetAssertion := packageHashTree.Target targetAssertion := packageHashTree.Target
bus.Manager.Publish(bus.EventPackagePreBuild, struct { bus.Manager.Publish(bus.EventPackagePreBuild, struct {
CompileSpec *compilerspec.LuetCompilationSpec CompileSpec *types.LuetCompilationSpec
Assert types.PackageAssert Assert types.PackageAssert
PackageHashTree *PackageImageHashTree PackageHashTree *PackageImageHashTree
}{ }{
@ -1282,7 +1281,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
compileSpec.SetOutputPath(p.GetOutputPath()) compileSpec.SetOutputPath(p.GetOutputPath())
bus.Manager.Publish(bus.EventPackagePreBuild, struct { bus.Manager.Publish(bus.EventPackagePreBuild, struct {
CompileSpec *compilerspec.LuetCompilationSpec CompileSpec *types.LuetCompilationSpec
Assert types.PackageAssert Assert types.PackageAssert
}{ }{
CompileSpec: compileSpec, CompileSpec: compileSpec,
@ -1337,7 +1336,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
cs.Options.Context.Success(pkgTag, ":white_check_mark: Done") cs.Options.Context.Success(pkgTag, ":white_check_mark: Done")
bus.Manager.Publish(bus.EventPackagePostBuild, struct { bus.Manager.Publish(bus.EventPackagePostBuild, struct {
CompileSpec *compilerspec.LuetCompilationSpec CompileSpec *types.LuetCompilationSpec
Artifact *artifact.PackageArtifact Artifact *artifact.PackageArtifact
}{ }{
CompileSpec: compileSpec, CompileSpec: compileSpec,
@ -1364,7 +1363,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
a.SourceAssertion = p.GetSourceAssertion() a.SourceAssertion = p.GetSourceAssertion()
a.PackageCacheImage = targetAssertion.Hash.PackageHash a.PackageCacheImage = targetAssertion.Hash.PackageHash
bus.Manager.Publish(bus.EventPackagePostBuild, struct { bus.Manager.Publish(bus.EventPackagePostBuild, struct {
CompileSpec *compilerspec.LuetCompilationSpec CompileSpec *types.LuetCompilationSpec
Artifact *artifact.PackageArtifact Artifact *artifact.PackageArtifact
}{ }{
CompileSpec: p, CompileSpec: p,
@ -1463,14 +1462,14 @@ func (cs *LuetCompiler) templatePackage(vals []map[string]interface{}, pack *typ
} }
// FromPackage returns a compilation spec from a package definition // FromPackage returns a compilation spec from a package definition
func (cs *LuetCompiler) FromPackage(p *types.Package) (*compilerspec.LuetCompilationSpec, error) { func (cs *LuetCompiler) FromPackage(p *types.Package) (*types.LuetCompilationSpec, error) {
// This would be nice to move it out from the compiler, but it is strictly tight to it given the build options
pack, err := cs.Database.FindPackageCandidate(p) pack, err := cs.Database.FindPackageCandidate(p)
if err != nil { if err != nil {
return nil, err return nil, err
} }
opts := options.Compiler{} opts := types.CompilerOptions{}
artifactMetadataFile := filepath.Join(pack.GetTreeDir(), "..", pack.GetMetadataFilePath()) artifactMetadataFile := filepath.Join(pack.GetTreeDir(), "..", pack.GetMetadataFilePath())
cs.Options.Context.Debug("Checking if metadata file is present", artifactMetadataFile) cs.Options.Context.Debug("Checking if metadata file is present", artifactMetadataFile)
@ -1498,6 +1497,31 @@ func (cs *LuetCompiler) FromPackage(p *types.Package) (*compilerspec.LuetCompila
cs.Options.Context.Debug("metadata file not present, skipping", artifactMetadataFile) cs.Options.Context.Debug("metadata file not present, skipping", artifactMetadataFile)
} }
// If the input is a dockerfile, just consume it and parse any image source from it
if pack.OriginDockerfile != "" {
img := ""
// TODO: Carry this info and parse Dockerfile from somewhere else?
cmds, err := dockerfile.ParseReader(bytes.NewBufferString(pack.OriginDockerfile))
if err != nil {
return nil, errors.Wrap(err, "could not decode Dockerfile")
}
for _, c := range cmds {
if c.Cmd == "FROM" &&
len(c.Value) > 0 && !strings.Contains(strings.ToLower(fmt.Sprint(c.Value)), "as") {
img = c.Value[0]
}
}
compilationSpec := &types.LuetCompilationSpec{
Image: img,
Package: pack,
BuildOptions: &types.CompilerOptions{},
}
cs.inheritSpecBuildOptions(compilationSpec)
return compilationSpec, nil
}
// Update processed build values // Update processed build values
dst, err := template.UnMarshalValues(cs.Options.BuildValuesFile) dst, err := template.UnMarshalValues(cs.Options.BuildValuesFile)
if err != nil { if err != nil {
@ -1510,7 +1534,7 @@ func (cs *LuetCompiler) FromPackage(p *types.Package) (*compilerspec.LuetCompila
return nil, errors.Wrap(err, "while rendering package template") return nil, errors.Wrap(err, "while rendering package template")
} }
newSpec, err := compilerspec.NewLuetCompilationSpec(bytes, pack) newSpec, err := types.NewLuetCompilationSpec(bytes, pack)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -32,9 +32,7 @@ import (
"github.com/mudler/luet/pkg/api/core/types/artifact" "github.com/mudler/luet/pkg/api/core/types/artifact"
. "github.com/mudler/luet/pkg/compiler" . "github.com/mudler/luet/pkg/compiler"
sd "github.com/mudler/luet/pkg/compiler/backend" sd "github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/compiler/types/options" "github.com/mudler/luet/pkg/compiler/types/options"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
"github.com/mudler/luet/pkg/tree" "github.com/mudler/luet/pkg/tree"
@ -174,7 +172,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
spec2.SetOutputPath(tmpdir) spec2.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec2))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
for _, artifact := range artifacts { for _, artifact := range artifacts {
Expect(fileHelper.Exists(artifact.Path)).To(BeTrue()) Expect(fileHelper.Exists(artifact.Path)).To(BeTrue())
@ -235,7 +233,7 @@ var _ = Describe("Compiler", func() {
spec2.SetOutputPath(tmpdir) spec2.SetOutputPath(tmpdir)
spec3.SetOutputPath(tmpdir) spec3.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec2, spec3))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(3)) Expect(len(artifacts)).To(Equal(3))
@ -281,11 +279,11 @@ var _ = Describe("Compiler", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
spec2.SetOutputPath(tmpdir) spec2.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
artifacts2, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec2)) artifacts2, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec2))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts2)).To(Equal(1)) Expect(len(artifacts2)).To(Equal(1))
@ -324,7 +322,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
for _, artifact := range artifacts { for _, artifact := range artifacts {
@ -357,7 +355,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
@ -392,7 +390,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
@ -426,7 +424,7 @@ var _ = Describe("Compiler", func() {
// Expect(err).ToNot(HaveOccurred()) // Expect(err).ToNot(HaveOccurred())
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
@ -459,7 +457,7 @@ var _ = Describe("Compiler", func() {
// Expect(err).ToNot(HaveOccurred()) // Expect(err).ToNot(HaveOccurred())
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
@ -492,7 +490,7 @@ var _ = Describe("Compiler", func() {
// Expect(err).ToNot(HaveOccurred()) // Expect(err).ToNot(HaveOccurred())
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
@ -530,7 +528,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
@ -570,7 +568,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(1)) Expect(len(artifacts[0].Dependencies)).To(Equal(1))
@ -613,7 +611,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(1)) Expect(len(artifacts[0].Dependencies)).To(Equal(1))
@ -654,7 +652,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileWithReverseDeps(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileWithReverseDeps(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(2)) Expect(len(artifacts)).To(Equal(2))
@ -683,7 +681,7 @@ var _ = Describe("Compiler", func() {
spec, err := compiler.FromPackage(&types.Package{Name: "vhba", Category: "sys-fs-5.4.2", Version: "20190410"}) spec, err := compiler.FromPackage(&types.Package{Name: "vhba", Category: "sys-fs-5.4.2", Version: "20190410"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
bt, err := compiler.BuildTree(compilerspec.LuetCompilationspecs{*spec}) bt, err := compiler.BuildTree(types.LuetCompilationspecs{*spec})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(bt.AllLevels()).To(Equal([]int{0, 1, 2, 3, 4, 5})) Expect(bt.AllLevels()).To(Equal([]int{0, 1, 2, 3, 4, 5}))
@ -716,7 +714,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(6)) Expect(len(artifacts[0].Dependencies)).To(Equal(6))
@ -751,7 +749,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileWithReverseDeps(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileWithReverseDeps(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(4)) Expect(len(artifacts)).To(Equal(4))
@ -810,7 +808,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
for _, a := range artifacts { for _, a := range artifacts {
Expect(fileHelper.Exists(a.Path)).To(BeTrue()) Expect(fileHelper.Exists(a.Path)).To(BeTrue())
@ -852,7 +850,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(1)) Expect(len(artifacts[0].Dependencies)).To(Equal(1))
@ -889,7 +887,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec2))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(2)) Expect(len(artifacts)).To(Equal(2))
//Expect(len(artifacts[0].Dependencies)).To(Equal(1)) //Expect(len(artifacts[0].Dependencies)).To(Equal(1))
@ -961,7 +959,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
spec2.SetOutputPath(tmpdir2) spec2.SetOutputPath(tmpdir2)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec2))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(2)) Expect(len(artifacts)).To(Equal(2))
Expect(len(artifacts[0].Dependencies)).To(Equal(0)) Expect(len(artifacts[0].Dependencies)).To(Equal(0))
@ -990,7 +988,7 @@ var _ = Describe("Compiler", func() {
spec, err := compiler.FromPackage(&types.Package{Name: "runtime", Category: "layer", Version: "0.1"}) spec, err := compiler.FromPackage(&types.Package{Name: "runtime", Category: "layer", Version: "0.1"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
compiler.Options.CompressionType = compression.GZip compiler.Options.CompressionType = types.GZip
Expect(spec.GetPackage().GetPath()).ToNot(Equal("")) Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
tmpdir, err := ioutil.TempDir("", "tree") tmpdir, err := ioutil.TempDir("", "tree")
@ -999,7 +997,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(1)) Expect(len(artifacts[0].Dependencies)).To(Equal(1))
@ -1044,7 +1042,7 @@ var _ = Describe("Compiler", func() {
spec, err := compiler.FromPackage(&types.Package{Name: "runtime", Category: "layer", Version: "0.1"}) spec, err := compiler.FromPackage(&types.Package{Name: "runtime", Category: "layer", Version: "0.1"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
compiler.Options.CompressionType = compression.GZip compiler.Options.CompressionType = types.GZip
Expect(spec.GetPackage().GetPath()).ToNot(Equal("")) Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
tmpdir, err := ioutil.TempDir("", "tree") tmpdir, err := ioutil.TempDir("", "tree")
@ -1053,7 +1051,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(1)) Expect(len(artifacts[0].Dependencies)).To(Equal(1))
@ -1080,7 +1078,7 @@ var _ = Describe("Compiler", func() {
spec, err := compiler.FromPackage(&types.Package{Name: "runtime", Category: "layer", Version: "0.1"}) spec, err := compiler.FromPackage(&types.Package{Name: "runtime", Category: "layer", Version: "0.1"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
compiler.Options.CompressionType = compression.GZip compiler.Options.CompressionType = types.GZip
Expect(spec.GetPackage().GetPath()).ToNot(Equal("")) Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
tmpdir, err := ioutil.TempDir("", "tree") tmpdir, err := ioutil.TempDir("", "tree")
@ -1089,7 +1087,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
Expect(len(artifacts[0].Dependencies)).To(Equal(1)) Expect(len(artifacts[0].Dependencies)).To(Equal(1))
@ -1143,7 +1141,7 @@ var _ = Describe("Compiler", func() {
b := sd.NewSimpleDockerBackend(ctx) b := sd.NewSimpleDockerBackend(ctx)
joinImage := "luet/cache:08738767caa9a7397fad70ae53db85fa" //resulting join image joinImage := "luet/cache:c4224fd8279e077727573703b6db70d4" //resulting join image
allImages := []string{ allImages := []string{
joinImage, joinImage,
"test/test:c-test-1.2"} "test/test:c-test-1.2"}
@ -1165,7 +1163,7 @@ var _ = Describe("Compiler", func() {
spec, err := compiler.FromPackage(&types.Package{Name: "x", Category: "test", Version: "0.1"}) spec, err := compiler.FromPackage(&types.Package{Name: "x", Category: "test", Version: "0.1"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
compiler.Options.CompressionType = compression.GZip compiler.Options.CompressionType = types.GZip
Expect(spec.GetPackage().GetPath()).ToNot(Equal("")) Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
tmpdir, err := ioutil.TempDir("", "tree") tmpdir, err := ioutil.TempDir("", "tree")
@ -1174,7 +1172,7 @@ var _ = Describe("Compiler", func() {
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
artifacts, errs := compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs := compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))
@ -1183,12 +1181,12 @@ var _ = Describe("Compiler", func() {
ContainSubstring("Generating final image for"), ContainSubstring("Generating final image for"),
ContainSubstring("Adding dependency"), ContainSubstring("Adding dependency"),
ContainSubstring("Final image not found for test/c-1.2"), ContainSubstring("Final image not found for test/c-1.2"),
)) ), log)
Expect(log).ToNot(And( Expect(log).ToNot(And(
ContainSubstring("No runtime db present, first level join only"), ContainSubstring("No runtime db present, first level join only"),
ContainSubstring("Final image already found test/test:c-test-1.2"), ContainSubstring("Final image already found test/test:c-test-1.2"),
)) ), log)
os.WriteFile(logPath, []byte{}, os.ModePerm) // cleanup logs os.WriteFile(logPath, []byte{}, os.ModePerm) // cleanup logs
// Remove the join hash so we force using final images // Remove the join hash so we force using final images
@ -1197,7 +1195,7 @@ var _ = Describe("Compiler", func() {
//compile again //compile again
By("Recompiling") By("Recompiling")
artifacts, errs = compiler.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) artifacts, errs = compiler.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeNil()) Expect(errs).To(BeNil())
Expect(len(artifacts)).To(Equal(1)) Expect(len(artifacts)).To(Equal(1))

View File

@ -19,7 +19,6 @@ import (
"fmt" "fmt"
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -75,7 +74,7 @@ func (ht *PackageImageHashTree) String() string {
// Query takes a compiler and a compilation spec and returns a PackageImageHashTree tied to it. // Query takes a compiler and a compilation spec and returns a PackageImageHashTree tied to it.
// PackageImageHashTree contains all the informations to resolve the spec build images in order to // PackageImageHashTree contains all the informations to resolve the spec build images in order to
// reproducibly re-build images from packages // reproducibly re-build images from packages
func (ht *ImageHashTree) Query(cs *LuetCompiler, p *compilerspec.LuetCompilationSpec) (*PackageImageHashTree, error) { func (ht *ImageHashTree) Query(cs *LuetCompiler, p *types.LuetCompilationSpec) (*PackageImageHashTree, error) {
assertions, err := ht.resolve(cs, p) assertions, err := ht.resolve(cs, p)
if err != nil { if err != nil {
return nil, err return nil, err
@ -110,7 +109,7 @@ func (ht *ImageHashTree) Query(cs *LuetCompiler, p *compilerspec.LuetCompilation
}, nil }, nil
} }
func (ht *ImageHashTree) genBuilderImageTag(p *compilerspec.LuetCompilationSpec, packageImage string) string { func (ht *ImageHashTree) genBuilderImageTag(p *types.LuetCompilationSpec, packageImage string) string {
// Use packageImage as salt into the fp being used // Use packageImage as salt into the fp being used
// so the hash is unique also in cases where // so the hash is unique also in cases where
// some package deps does have completely different // some package deps does have completely different
@ -120,7 +119,7 @@ func (ht *ImageHashTree) genBuilderImageTag(p *compilerspec.LuetCompilationSpec,
// resolve computes the dependency tree of a compilation spec and returns solver assertions // resolve computes the dependency tree of a compilation spec and returns solver assertions
// in order to be able to compile the spec. // in order to be able to compile the spec.
func (ht *ImageHashTree) resolve(cs *LuetCompiler, p *compilerspec.LuetCompilationSpec) (types.PackagesAssertions, error) { func (ht *ImageHashTree) resolve(cs *LuetCompiler, p *types.LuetCompilationSpec) (types.PackagesAssertions, error) {
dependencies, err := cs.ComputeDepTree(p, cs.Database) dependencies, err := cs.ComputeDepTree(p, cs.Database)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().HumanReadableString()) return nil, errors.Wrap(err, "While computing a solution for "+p.GetPackage().HumanReadableString())

View File

@ -21,7 +21,6 @@ import (
"github.com/mudler/luet/pkg/api/core/context" "github.com/mudler/luet/pkg/api/core/context"
. "github.com/mudler/luet/pkg/compiler" . "github.com/mudler/luet/pkg/compiler"
sd "github.com/mudler/luet/pkg/compiler/backend" sd "github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/options"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
"github.com/mudler/luet/pkg/tree" "github.com/mudler/luet/pkg/tree"
. "github.com/onsi/ginkgo/v2" . "github.com/onsi/ginkgo/v2"
@ -31,14 +30,14 @@ import (
var _ = Describe("ImageHashTree", func() { var _ = Describe("ImageHashTree", func() {
ctx := context.NewContext() ctx := context.NewContext()
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false)) generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2)) compiler := NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), Concurrency(2))
hashtree := NewHashTree(generalRecipe.GetDatabase()) hashtree := NewHashTree(generalRecipe.GetDatabase())
Context("Simple package definition", func() { Context("Simple package definition", func() {
BeforeEach(func() { BeforeEach(func() {
generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false)) generalRecipe = tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
err := generalRecipe.Load("../../tests/fixtures/buildable") err := generalRecipe.Load("../../tests/fixtures/buildable")
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2)) compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), Concurrency(2))
hashtree = NewHashTree(generalRecipe.GetDatabase()) hashtree = NewHashTree(generalRecipe.GetDatabase())
}) })
@ -50,13 +49,13 @@ var _ = Describe("ImageHashTree", func() {
packageHash, err := hashtree.Query(compiler, spec) packageHash, err := hashtree.Query(compiler, spec)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(packageHash.Target.Hash.BuildHash).To(Equal("895697a8bb51b219b78ed081fa1b778801e81505bb03f56acafcf3c476620fc1")) Expect(packageHash.Target.Hash.BuildHash).To(Equal("bf767dba10e4aa9c25e09f1f61ed9944b8e4736f72b2a1f9ac0125f68a714580"), packageHash.Target.Hash.BuildHash)
Expect(packageHash.Target.Hash.PackageHash).To(Equal("2a6c3dc0dd7af2902fd8823a24402d89b2030cfbea6e63fe81afb34af8b1a005")) Expect(packageHash.Target.Hash.PackageHash).To(Equal("6ce76e1a85f02841db083e59d4f9d3e4ab16154f925c1d81014c4938a6b1b1f9"), packageHash.Target.Hash.PackageHash)
Expect(packageHash.BuilderImageHash).To(Equal("builder-3a28d240f505d69123735a567beaf80e")) Expect(packageHash.BuilderImageHash).To(Equal("builder-4ba2735d6368f56627776f8fb8ce6a16"), packageHash.BuilderImageHash)
}) })
}) })
expectedPackageHash := "4154ad4e5dfa2aea41292b3c49eeb04ef327456ecb6312f12d7b94d18ac8cb64" expectedPackageHash := "562b4295b87d561af237997e1320560ee9495a02f69c3c77391b783d2e01ced2"
Context("complex package definition", func() { Context("complex package definition", func() {
BeforeEach(func() { BeforeEach(func() {
@ -64,7 +63,7 @@ var _ = Describe("ImageHashTree", func() {
err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision") err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision")
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2)) compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), Concurrency(2))
hashtree = NewHashTree(generalRecipe.GetDatabase()) hashtree = NewHashTree(generalRecipe.GetDatabase())
}) })
@ -75,19 +74,19 @@ var _ = Describe("ImageHashTree", func() {
packageHash, err := hashtree.Query(compiler, spec) packageHash, err := hashtree.Query(compiler, spec)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
expectedHash := "b4b61939260263582da1dfa5289182a0a7570ef8658f3b01b1997fe5d8a95e49" expectedHash := "c5b87e16b2ecafc67e671d8e2c38adf4c4a6eed2a80180229d5892d52e81779b"
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(expectedPackageHash)) Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(expectedPackageHash), packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash)
Expect(packageHash.SourceHash).To(Equal(expectedPackageHash)) Expect(packageHash.SourceHash).To(Equal(expectedPackageHash), packageHash.SourceHash)
Expect(packageHash.BuilderImageHash).To(Equal("builder-381bd2ad9abe1ac6c3c26cba8f8cca0b")) Expect(packageHash.BuilderImageHash).To(Equal("builder-d934bd6bbf716f5d598d764532bc585c"), packageHash.BuilderImageHash)
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281")) //Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
Expect(packageHash.Target.Hash.PackageHash).To(Equal("3a372fcee17b2c7912eabb04b50f7d5a83e75402da0c96c102f7c2e836ebaa10")) Expect(packageHash.Target.Hash.PackageHash).To(Equal("78cace3ee661d14cb2b6236df3dcdc789e36c26a1701ba3e0213e355540a1174"), packageHash.Target.Hash.PackageHash)
a := &types.Package{Name: "a", Category: "test", Version: "1.1"} a := &types.Package{Name: "a", Category: "test", Version: "1.1"}
hash, err := packageHash.DependencyBuildImage(a) hash, err := packageHash.DependencyBuildImage(a)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(hash).To(Equal(expectedHash)) Expect(hash).To(Equal(expectedHash), hash)
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint()) assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
Expect(assertionA.Hash.PackageHash).To(Equal(expectedPackageHash)) Expect(assertionA.Hash.PackageHash).To(Equal(expectedPackageHash))
@ -98,7 +97,7 @@ var _ = Describe("ImageHashTree", func() {
hashB, err := packageHash.DependencyBuildImage(b) hashB, err := packageHash.DependencyBuildImage(b)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(hashB).To(Equal("fc6fdd4bd62d51fc06c2c22e8bc56543727a2340220972594e28c623ea3a9c6c")) Expect(hashB).To(Equal("9ece11c782e862e366ab4b42fdaaea9d89abe41ff4d9ed1bd24c81f6041bc9da"), hashB)
}) })
}) })
@ -109,7 +108,7 @@ var _ = Describe("ImageHashTree", func() {
//Definition of A here is slightly changed in the steps build.yaml file (1 character only) //Definition of A here is slightly changed in the steps build.yaml file (1 character only)
err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision_content_changed") err := generalRecipe.Load("../../tests/fixtures/upgrade_old_repo_revision_content_changed")
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2)) compiler = NewLuetCompiler(sd.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), Concurrency(2))
hashtree = NewHashTree(generalRecipe.GetDatabase()) hashtree = NewHashTree(generalRecipe.GetDatabase())
}) })
@ -119,36 +118,35 @@ var _ = Describe("ImageHashTree", func() {
packageHash, err := hashtree.Query(compiler, spec) packageHash, err := hashtree.Query(compiler, spec)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).ToNot(Equal(expectedPackageHash)) Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).ToNot(Equal(expectedPackageHash), packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash)
sourceHash := "5534399abed19a3c93b0e638811a5ba6d07e68f6782e2b40aaf2b09c408a3154" sourceHash := "726635a86f03483c432e33d80ba85443cf30453960826bd813d816786f712bcf"
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(sourceHash)) Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(sourceHash), packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash)
Expect(packageHash.SourceHash).To(Equal(sourceHash)) Expect(packageHash.SourceHash).To(Equal(sourceHash), packageHash.SourceHash)
Expect(packageHash.SourceHash).ToNot(Equal(expectedPackageHash), packageHash.SourceHash)
Expect(packageHash.SourceHash).ToNot(Equal(expectedPackageHash)) Expect(packageHash.BuilderImageHash).To(Equal("builder-d326b367b72ae030a545e8713d45c9aa"), packageHash.BuilderImageHash)
Expect(packageHash.BuilderImageHash).To(Equal("builder-2a3905cf55bdcd1e4cea6b128cbf5b3a"))
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281")) //Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
Expect(packageHash.Target.Hash.PackageHash).To(Equal("4a13154de2e802fbd250236294562fad8c9f2c51ab8a3fc359323dd1ed064907")) Expect(packageHash.Target.Hash.PackageHash).To(Equal("e99b996d2ae378e901668b2f56b184af694fe1f1bc92544a2813d6102738098d"), packageHash.Target.Hash.PackageHash)
a := &types.Package{Name: "a", Category: "test", Version: "1.1"} a := &types.Package{Name: "a", Category: "test", Version: "1.1"}
hash, err := packageHash.DependencyBuildImage(a) hash, err := packageHash.DependencyBuildImage(a)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(hash).To(Equal("b4b61939260263582da1dfa5289182a0a7570ef8658f3b01b1997fe5d8a95e49")) Expect(hash).To(Equal("c5b87e16b2ecafc67e671d8e2c38adf4c4a6eed2a80180229d5892d52e81779b"), hash)
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint()) assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
Expect(assertionA.Hash.PackageHash).To(Equal("5534399abed19a3c93b0e638811a5ba6d07e68f6782e2b40aaf2b09c408a3154")) Expect(assertionA.Hash.PackageHash).To(Equal("726635a86f03483c432e33d80ba85443cf30453960826bd813d816786f712bcf"), assertionA.Hash.PackageHash)
Expect(assertionA.Hash.PackageHash).ToNot(Equal(expectedPackageHash)) Expect(assertionA.Hash.PackageHash).ToNot(Equal(expectedPackageHash), assertionA.Hash.PackageHash)
b := &types.Package{Name: "b", Category: "test", Version: "1.0"} b := &types.Package{Name: "b", Category: "test", Version: "1.0"}
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint()) assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
Expect(assertionB.Hash.PackageHash).To(Equal("b4b61939260263582da1dfa5289182a0a7570ef8658f3b01b1997fe5d8a95e49")) Expect(assertionB.Hash.PackageHash).To(Equal("c5b87e16b2ecafc67e671d8e2c38adf4c4a6eed2a80180229d5892d52e81779b"), assertionB.Hash.PackageHash)
hashB, err := packageHash.DependencyBuildImage(b) hashB, err := packageHash.DependencyBuildImage(b)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(hashB).To(Equal("fc6fdd4bd62d51fc06c2c22e8bc56543727a2340220972594e28c623ea3a9c6c")) Expect(hashB).To(Equal("9ece11c782e862e366ab4b42fdaaea9d89abe41ff4d9ed1bd24c81f6041bc9da"), hashB)
}) })
}) })

208
pkg/compiler/options.go Normal file
View File

@ -0,0 +1,208 @@
// Copyright © 2022 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compiler
import (
"runtime"
"github.com/mudler/luet/pkg/api/core/types"
)
func newDefaultCompiler() *types.CompilerOptions {
return &types.CompilerOptions{
PushImageRepository: "luet/cache",
PullFirst: false,
Push: false,
CompressionType: types.None,
KeepImg: true,
Concurrency: runtime.NumCPU(),
OnlyDeps: false,
NoDeps: false,
SolverOptions: types.LuetSolverOptions{SolverOptions: types.SolverOptions{Concurrency: 1, Type: types.SolverSingleCoreSimple}},
}
}
func WithOptions(opt *types.CompilerOptions) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg = opt
return nil
}
}
func WithRuntimeDatabase(db types.PackageDatabase) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.RuntimeDatabase = db
return nil
}
}
// WithFinalRepository Sets the final repository where to push
// images of built artifacts
func WithFinalRepository(r string) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.PushFinalImagesRepository = r
return nil
}
}
func EnableGenerateFinalImages(cfg *types.CompilerOptions) error {
cfg.GenerateFinalImages = true
return nil
}
func EnablePushFinalImages(cfg *types.CompilerOptions) error {
cfg.PushFinalImages = true
return nil
}
func ForcePushFinalImages(cfg *types.CompilerOptions) error {
cfg.PushFinalImagesForce = true
return nil
}
func WithBackendType(r string) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.BackendType = r
return nil
}
}
func WithTemplateFolder(r []string) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.TemplatesFolder = r
return nil
}
}
func WithBuildValues(r []string) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.BuildValuesFile = r
return nil
}
}
func WithPullRepositories(r []string) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.PullImageRepository = r
return nil
}
}
// WithPushRepository Sets the image reference where to push
// cache images
func WithPushRepository(r string) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
if len(cfg.PullImageRepository) == 0 {
cfg.PullImageRepository = []string{cfg.PushImageRepository}
}
cfg.PushImageRepository = r
return nil
}
}
func BackendArgs(r []string) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.BackendArgs = r
return nil
}
}
func PullFirst(b bool) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.PullFirst = b
return nil
}
}
func KeepImg(b bool) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.KeepImg = b
return nil
}
}
func Rebuild(b bool) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.Rebuild = b
return nil
}
}
func PushImages(b bool) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.Push = b
return nil
}
}
func Wait(b bool) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.Wait = b
return nil
}
}
func OnlyDeps(b bool) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.OnlyDeps = b
return nil
}
}
func OnlyTarget(b bool) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.PackageTargetOnly = b
return nil
}
}
func NoDeps(b bool) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.NoDeps = b
return nil
}
}
func Concurrency(i int) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
if i == 0 {
i = runtime.NumCPU()
}
cfg.Concurrency = i
return nil
}
}
func WithCompressionType(t types.CompressionImplementation) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.CompressionType = t
return nil
}
}
func WithSolverOptions(c types.LuetSolverOptions) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.SolverOptions = c
return nil
}
}
func WithContext(c types.Context) func(cfg *types.CompilerOptions) error {
return func(cfg *types.CompilerOptions) error {
cfg.Context = c
return nil
}
}

View File

@ -1,9 +0,0 @@
package compression
type Implementation string
const (
None Implementation = "none" // e.g. tar for standard packages
GZip Implementation = "gzip"
Zstandard Implementation = "zstd"
)

View File

@ -1,260 +0,0 @@
// Copyright © 2019-2021 Ettore Di Giacinto <mudler@sabayon.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package options
import (
"runtime"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler/types/compression"
)
type Compiler struct {
PushImageRepository string
PullImageRepository []string
PullFirst, KeepImg, Push bool
Concurrency int
CompressionType compression.Implementation
Wait bool
OnlyDeps bool
NoDeps bool
SolverOptions types.LuetSolverOptions
BuildValuesFile []string
BuildValues []map[string]interface{}
PackageTargetOnly bool
Rebuild bool
BackendArgs []string
BackendType string
// TemplatesFolder. should default to tree/templates
TemplatesFolder []string
// Tells wether to push final container images after building
PushFinalImages bool
PushFinalImagesForce bool
GenerateFinalImages bool
// Image repository to push to
PushFinalImagesRepository string
RuntimeDatabase types.PackageDatabase
Context types.Context
}
func NewDefaultCompiler() *Compiler {
return &Compiler{
PushImageRepository: "luet/cache",
PullFirst: false,
Push: false,
CompressionType: compression.None,
KeepImg: true,
Concurrency: runtime.NumCPU(),
OnlyDeps: false,
NoDeps: false,
SolverOptions: types.LuetSolverOptions{SolverOptions: types.SolverOptions{Concurrency: 1, Type: types.SolverSingleCoreSimple}},
}
}
type Option func(cfg *Compiler) error
func (cfg *Compiler) Apply(opts ...Option) error {
for _, opt := range opts {
if opt == nil {
continue
}
if err := opt(cfg); err != nil {
return err
}
}
return nil
}
func WithOptions(opt *Compiler) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg = opt
return nil
}
}
func WithRuntimeDatabase(db types.PackageDatabase) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.RuntimeDatabase = db
return nil
}
}
// WithFinalRepository Sets the final repository where to push
// images of built artifacts
func WithFinalRepository(r string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.PushFinalImagesRepository = r
return nil
}
}
func EnableGenerateFinalImages(cfg *Compiler) error {
cfg.GenerateFinalImages = true
return nil
}
func EnablePushFinalImages(cfg *Compiler) error {
cfg.PushFinalImages = true
return nil
}
func ForcePushFinalImages(cfg *Compiler) error {
cfg.PushFinalImagesForce = true
return nil
}
func WithBackendType(r string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.BackendType = r
return nil
}
}
func WithTemplateFolder(r []string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.TemplatesFolder = r
return nil
}
}
func WithBuildValues(r []string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.BuildValuesFile = r
return nil
}
}
func WithPullRepositories(r []string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.PullImageRepository = r
return nil
}
}
// WithPushRepository Sets the image reference where to push
// cache images
func WithPushRepository(r string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
if len(cfg.PullImageRepository) == 0 {
cfg.PullImageRepository = []string{cfg.PushImageRepository}
}
cfg.PushImageRepository = r
return nil
}
}
func BackendArgs(r []string) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.BackendArgs = r
return nil
}
}
func PullFirst(b bool) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.PullFirst = b
return nil
}
}
func KeepImg(b bool) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.KeepImg = b
return nil
}
}
func Rebuild(b bool) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.Rebuild = b
return nil
}
}
func PushImages(b bool) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.Push = b
return nil
}
}
func Wait(b bool) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.Wait = b
return nil
}
}
func OnlyDeps(b bool) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.OnlyDeps = b
return nil
}
}
func OnlyTarget(b bool) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.PackageTargetOnly = b
return nil
}
}
func NoDeps(b bool) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.NoDeps = b
return nil
}
}
func Concurrency(i int) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
if i == 0 {
i = runtime.NumCPU()
}
cfg.Concurrency = i
return nil
}
}
func WithCompressionType(t compression.Implementation) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.CompressionType = t
return nil
}
}
func WithSolverOptions(c types.LuetSolverOptions) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.SolverOptions = c
return nil
}
}
func WithContext(c types.Context) func(cfg *Compiler) error {
return func(cfg *Compiler) error {
cfg.Context = c
return nil
}
}

View File

@ -1,28 +0,0 @@
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package compilerspec_test
import (
"testing"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
func TestSpec(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Spec Suite")
}

View File

@ -24,7 +24,6 @@ import (
"github.com/mudler/luet/pkg/api/core/context" "github.com/mudler/luet/pkg/api/core/context"
"github.com/mudler/luet/pkg/api/core/types/artifact" "github.com/mudler/luet/pkg/api/core/types/artifact"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
. "github.com/mudler/luet/pkg/installer/client" . "github.com/mudler/luet/pkg/installer/client"
@ -61,7 +60,7 @@ var _ = Describe("Docker client", func() {
It("Downloads artifacts", func() { It("Downloads artifacts", func() {
f, err := c.DownloadArtifact(&artifact.PackageArtifact{ f, err := c.DownloadArtifact(&artifact.PackageArtifact{
Path: "test.tar", Path: "test.tar",
CompileSpec: &compilerspec.LuetCompilationSpec{ CompileSpec: &types.LuetCompilationSpec{
Package: &types.Package{ Package: &types.Package{
Name: "c", Name: "c",
Category: "test", Category: "test",

View File

@ -25,9 +25,7 @@ import (
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
compiler "github.com/mudler/luet/pkg/compiler" compiler "github.com/mudler/luet/pkg/compiler"
backend "github.com/mudler/luet/pkg/compiler/backend" backend "github.com/mudler/luet/pkg/compiler/backend"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
"github.com/mudler/luet/pkg/compiler/types/options" "github.com/mudler/luet/pkg/compiler/types/options"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
@ -230,7 +228,7 @@ urls:
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
treeFile := NewDefaultTreeRepositoryFile() treeFile := NewDefaultTreeRepositoryFile()
treeFile.SetCompressionType(compression.None) treeFile.SetCompressionType(types.None)
repo.SetRepositoryFile(REPOFILE_TREE_KEY, treeFile) repo.SetRepositoryFile(REPOFILE_TREE_KEY, treeFile)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(repo.GetName()).To(Equal("test")) Expect(repo.GetName()).To(Equal("test"))
@ -622,7 +620,7 @@ urls:
spec2.SetOutputPath(tmpdir) spec2.SetOutputPath(tmpdir)
spec3.SetOutputPath(tmpdir) spec3.SetOutputPath(tmpdir)
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3)) _, errs := c.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec2, spec3))
Expect(errs).To(BeEmpty()) Expect(errs).To(BeEmpty())
@ -738,7 +736,7 @@ urls:
spec4.SetOutputPath(tmpdir) spec4.SetOutputPath(tmpdir)
spec5.SetOutputPath(tmpdir) spec5.SetOutputPath(tmpdir)
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec4, spec5)) _, errs := c.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec2, spec4, spec5))
Expect(errs).To(BeEmpty()) Expect(errs).To(BeEmpty())
@ -862,7 +860,7 @@ urls:
spec3.SetOutputPath(tmpdir) spec3.SetOutputPath(tmpdir)
spec6.SetOutputPath(tmpdir) spec6.SetOutputPath(tmpdir)
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3, spec4, spec5, spec6)) _, errs := c.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec2, spec3, spec4, spec5, spec6))
Expect(errs).To(BeEmpty()) Expect(errs).To(BeEmpty())
@ -995,11 +993,11 @@ urls:
spec2.SetOutputPath(tmpdirnewrepo) spec2.SetOutputPath(tmpdirnewrepo)
spec3.SetOutputPath(tmpdir) spec3.SetOutputPath(tmpdir)
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec3)) _, errs := c.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec3))
Expect(errs).To(BeEmpty()) Expect(errs).To(BeEmpty())
_, errs = c2.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec2)) _, errs = c2.CompileParallel(false, types.NewLuetCompilationspecs(spec2))
Expect(errs).To(BeEmpty()) Expect(errs).To(BeEmpty())
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_old_repo") repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_old_repo")
@ -1110,7 +1108,7 @@ urls:
backend.NewSimpleDockerBackend(ctx), backend.NewSimpleDockerBackend(ctx),
generalRecipe.GetDatabase(), generalRecipe.GetDatabase(),
options.Concurrency(2), options.Concurrency(2),
options.WithCompressionType(compression.GZip), options.WithCompressionType(types.GZip),
) )
spec, err := c.FromPackage(&types.Package{Name: "b", Category: "test", Version: "1.0"}) spec, err := c.FromPackage(&types.Package{Name: "b", Category: "test", Version: "1.0"})
@ -1129,7 +1127,7 @@ urls:
spec2.SetOutputPath(tmpdir) spec2.SetOutputPath(tmpdir)
spec3.SetOutputPath(tmpdir) spec3.SetOutputPath(tmpdir)
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3)) _, errs := c.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec2, spec3))
Expect(errs).To(BeEmpty()) Expect(errs).To(BeEmpty())
@ -1274,7 +1272,7 @@ urls:
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(),
options.Concurrency(2), options.Concurrency(2),
options.WithCompressionType(compression.GZip)) options.WithCompressionType(types.GZip))
spec, err := c.FromPackage(&types.Package{Name: "b", Category: "test", Version: "1.0"}) spec, err := c.FromPackage(&types.Package{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -1291,7 +1289,7 @@ urls:
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
spec2.SetOutputPath(tmpdir) spec2.SetOutputPath(tmpdir)
spec3.SetOutputPath(tmpdir) spec3.SetOutputPath(tmpdir)
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3)) _, errs := c.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec2, spec3))
Expect(errs).To(BeEmpty()) Expect(errs).To(BeEmpty())
@ -1382,7 +1380,7 @@ urls:
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3)) Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(3))
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(),
options.WithCompressionType(compression.GZip)) options.WithCompressionType(types.GZip))
spec, err := c.FromPackage(&types.Package{Name: "b", Category: "test", Version: "1.0"}) spec, err := c.FromPackage(&types.Package{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
@ -1396,7 +1394,7 @@ urls:
defer os.RemoveAll(tmpdir) // clean up defer os.RemoveAll(tmpdir) // clean up
spec.SetOutputPath(tmpdir) spec.SetOutputPath(tmpdir)
spec3.SetOutputPath(tmpdir) spec3.SetOutputPath(tmpdir)
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec3)) _, errs := c.CompileParallel(false, types.NewLuetCompilationspecs(spec, spec3))
Expect(errs).To(BeEmpty()) Expect(errs).To(BeEmpty())
@ -1486,7 +1484,7 @@ urls:
defer os.RemoveAll(tmpdir2) // clean up defer os.RemoveAll(tmpdir2) // clean up
spec.SetOutputPath(tmpdir2) spec.SetOutputPath(tmpdir2)
_, errs = c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec)) _, errs = c.CompileParallel(false, types.NewLuetCompilationspecs(spec))
Expect(errs).To(BeEmpty()) Expect(errs).To(BeEmpty())

View File

@ -29,7 +29,6 @@ import (
"github.com/mudler/luet/pkg/api/core/template" "github.com/mudler/luet/pkg/api/core/template"
artifact "github.com/mudler/luet/pkg/api/core/types/artifact" artifact "github.com/mudler/luet/pkg/api/core/types/artifact"
compression "github.com/mudler/luet/pkg/compiler/types/compression"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
"go.uber.org/multierr" "go.uber.org/multierr"
@ -60,9 +59,9 @@ const (
) )
type LuetRepositoryFile struct { type LuetRepositoryFile struct {
FileName string `json:"filename"` FileName string `json:"filename"`
CompressionType compression.Implementation `json:"compressiontype,omitempty"` CompressionType types.CompressionImplementation `json:"compressiontype,omitempty"`
Checksums artifact.Checksums `json:"checksums,omitempty"` Checksums artifact.Checksums `json:"checksums,omitempty"`
} }
type LuetSystemRepository struct { type LuetSystemRepository struct {
@ -205,21 +204,21 @@ func (m *LuetSystemRepositoryMetadata) ToArtifactIndex() (ans compiler.ArtifactI
func NewDefaultTreeRepositoryFile() LuetRepositoryFile { func NewDefaultTreeRepositoryFile() LuetRepositoryFile {
return LuetRepositoryFile{ return LuetRepositoryFile{
FileName: TREE_TARBALL, FileName: TREE_TARBALL,
CompressionType: compression.GZip, CompressionType: types.GZip,
} }
} }
func NewDefaultCompilerTreeRepositoryFile() LuetRepositoryFile { func NewDefaultCompilerTreeRepositoryFile() LuetRepositoryFile {
return LuetRepositoryFile{ return LuetRepositoryFile{
FileName: COMPILERTREE_TARBALL, FileName: COMPILERTREE_TARBALL,
CompressionType: compression.GZip, CompressionType: types.GZip,
} }
} }
func NewDefaultMetaRepositoryFile() LuetRepositoryFile { func NewDefaultMetaRepositoryFile() LuetRepositoryFile {
return LuetRepositoryFile{ return LuetRepositoryFile{
FileName: REPOSITORY_METAFILE + ".tar", FileName: REPOSITORY_METAFILE + ".tar",
CompressionType: compression.None, CompressionType: types.None,
} }
} }
@ -240,14 +239,14 @@ func (f *LuetRepositoryFile) GetFileName() string {
// SetCompressionType sets the compression type of the repository file. // SetCompressionType sets the compression type of the repository file.
// Each repository can ship arbitrary file that will be downloaded by the client // Each repository can ship arbitrary file that will be downloaded by the client
// in case of need, this sets the compression type that the client will use to uncompress the artifact // in case of need, this sets the compression type that the client will use to uncompress the artifact
func (f *LuetRepositoryFile) SetCompressionType(c compression.Implementation) { func (f *LuetRepositoryFile) SetCompressionType(c types.CompressionImplementation) {
f.CompressionType = c f.CompressionType = c
} }
// GetCompressionType gets the compression type of the repository file. // GetCompressionType gets the compression type of the repository file.
// Each repository can ship arbitrary file that will be downloaded by the client // Each repository can ship arbitrary file that will be downloaded by the client
// in case of need, this gets the compression type that the client will use to uncompress the artifact // in case of need, this gets the compression type that the client will use to uncompress the artifact
func (f *LuetRepositoryFile) GetCompressionType() compression.Implementation { func (f *LuetRepositoryFile) GetCompressionType() types.CompressionImplementation {
return f.CompressionType return f.CompressionType
} }
@ -272,11 +271,11 @@ func GenerateRepository(p ...RepositoryOption) (*LuetSystemRepository, error) {
c := RepositoryConfig{} c := RepositoryConfig{}
c.Apply(p...) c.Apply(p...)
btr := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false)) btr := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false), c.compilerParser...)
runtimeTree := pkg.NewInMemoryDatabase(false) runtimeTree := pkg.NewInMemoryDatabase(false)
tempTree := pkg.NewInMemoryDatabase(false) tempTree := pkg.NewInMemoryDatabase(false)
temptr := tree.NewInstallerRecipe(tempTree) temptr := tree.NewInstallerRecipe(tempTree, c.runtimeParser...)
for _, treeDir := range c.Tree { for _, treeDir := range c.Tree {
if err := temptr.Load(treeDir); err != nil { if err := temptr.Load(treeDir); err != nil {
@ -291,7 +290,7 @@ func GenerateRepository(p ...RepositoryOption) (*LuetSystemRepository, error) {
// instead of local tree // instead of local tree
repodb := pkg.NewInMemoryDatabase(false) repodb := pkg.NewInMemoryDatabase(false)
generalRecipe := tree.NewCompilerRecipe(repodb) generalRecipe := tree.NewCompilerRecipe(repodb, c.compilerParser...)
if c.FromRepository { if c.FromRepository {
if _, err := LoadBuildTree(generalRecipe, repodb, c.context); err != nil { if _, err := LoadBuildTree(generalRecipe, repodb, c.context); err != nil {
@ -354,7 +353,7 @@ func GenerateRepository(p ...RepositoryOption) (*LuetSystemRepository, error) {
repo := &LuetSystemRepository{ repo := &LuetSystemRepository{
LuetRepository: types.NewLuetRepository(c.Name, c.Type, c.Description, c.Urls, c.Priority, true, false), LuetRepository: types.NewLuetRepository(c.Name, c.Type, c.Description, c.Urls, c.Priority, true, false),
Tree: tree.NewInstallerRecipe(runtimeTree), Tree: tree.NewInstallerRecipe(runtimeTree, c.runtimeParser...),
BuildTree: btr, BuildTree: btr,
RepositoryFiles: map[string]LuetRepositoryFile{}, RepositoryFiles: map[string]LuetRepositoryFile{},
PushImages: c.PushImages, PushImages: c.PushImages,

View File

@ -18,6 +18,7 @@ package installer
import ( import (
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
"github.com/mudler/luet/pkg/compiler" "github.com/mudler/luet/pkg/compiler"
"github.com/mudler/luet/pkg/tree"
) )
type RepositoryOption func(cfg *RepositoryConfig) error type RepositoryOption func(cfg *RepositoryConfig) error
@ -34,6 +35,9 @@ type RepositoryConfig struct {
context types.Context context types.Context
PushImages, Force, FromRepository, FromMetadata bool PushImages, Force, FromRepository, FromMetadata bool
compilerParser []tree.FileParser
runtimeParser []tree.FileParser
} }
// Apply applies the given options to the config, returning the first error // Apply applies the given options to the config, returning the first error
@ -57,6 +61,20 @@ func WithContext(c types.Context) func(cfg *RepositoryConfig) error {
} }
} }
func WithRuntimeParser(parsers ...tree.FileParser) RepositoryOption {
return func(cfg *RepositoryConfig) error {
cfg.runtimeParser = append(cfg.runtimeParser, parsers...)
return nil
}
}
func WithCompilerParser(parsers ...tree.FileParser) RepositoryOption {
return func(cfg *RepositoryConfig) error {
cfg.compilerParser = append(cfg.compilerParser, parsers...)
return nil
}
}
func WithDatabase(b types.PackageDatabase) func(cfg *RepositoryConfig) error { func WithDatabase(b types.PackageDatabase) func(cfg *RepositoryConfig) error {
return func(cfg *RepositoryConfig) error { return func(cfg *RepositoryConfig) error {
cfg.DB = b cfg.DB = b

View File

@ -30,7 +30,6 @@ import (
"github.com/mudler/luet/pkg/compiler" "github.com/mudler/luet/pkg/compiler"
backend "github.com/mudler/luet/pkg/compiler/backend" backend "github.com/mudler/luet/pkg/compiler/backend"
"github.com/mudler/luet/pkg/compiler/types/options" "github.com/mudler/luet/pkg/compiler/types/options"
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
pkg "github.com/mudler/luet/pkg/database" pkg "github.com/mudler/luet/pkg/database"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
. "github.com/mudler/luet/pkg/installer" . "github.com/mudler/luet/pkg/installer"
@ -536,7 +535,7 @@ urls:
a, err = c.DownloadArtifact(&artifact.PackageArtifact{ a, err = c.DownloadArtifact(&artifact.PackageArtifact{
Path: "test.tar", Path: "test.tar",
CompileSpec: &compilerspec.LuetCompilationSpec{ CompileSpec: &types.LuetCompilationSpec{
Package: &types.Package{ Package: &types.Package{
Name: "b", Name: "b",
Category: "test", Category: "test",
@ -608,7 +607,7 @@ urls:
a, err = c.DownloadArtifact(&artifact.PackageArtifact{ a, err = c.DownloadArtifact(&artifact.PackageArtifact{
Path: "test.tar", Path: "test.tar",
CompileSpec: &compilerspec.LuetCompilationSpec{ CompileSpec: &types.LuetCompilationSpec{
Package: &types.Package{ Package: &types.Package{
Name: "a", Name: "a",
Category: "test", Category: "test",
@ -629,7 +628,7 @@ urls:
&LuetSystemRepository{ &LuetSystemRepository{
Index: compiler.ArtifactIndex{ Index: compiler.ArtifactIndex{
&artifact.PackageArtifact{ &artifact.PackageArtifact{
CompileSpec: &compilerspec.LuetCompilationSpec{ CompileSpec: &types.LuetCompilationSpec{
Package: &types.Package{}, Package: &types.Package{},
}, },
Path: "bar", Path: "bar",
@ -653,7 +652,7 @@ urls:
Index: compiler.ArtifactIndex{ Index: compiler.ArtifactIndex{
&artifact.PackageArtifact{ &artifact.PackageArtifact{
Path: "foo", Path: "foo",
CompileSpec: &compilerspec.LuetCompilationSpec{ CompileSpec: &types.LuetCompilationSpec{
Package: &types.Package{ Package: &types.Package{
Name: "foo", Name: "foo",
Category: "bar", Category: "bar",
@ -663,7 +662,7 @@ urls:
}, },
&artifact.PackageArtifact{ &artifact.PackageArtifact{
Path: "baz", Path: "baz",
CompileSpec: &compilerspec.LuetCompilationSpec{ CompileSpec: &types.LuetCompilationSpec{
Package: &types.Package{ Package: &types.Package{
Name: "foo", Name: "foo",
Category: "baz", Category: "baz",

View File

@ -35,12 +35,14 @@ const (
CompilerDefinitionFile = "build.yaml" CompilerDefinitionFile = "build.yaml"
) )
var DefaultCompilerParsers = []FileParser{
BuildCollectionParser,
BuildDefinitionParser,
}
func NewCompilerRecipe(d types.PackageDatabase, fp ...FileParser) Builder { func NewCompilerRecipe(d types.PackageDatabase, fp ...FileParser) Builder {
if len(fp) == 0 { if len(fp) == 0 {
fp = []FileParser{ fp = DefaultCompilerParsers
BuildCollectionParser,
BuildDefinitionParser,
}
} }
return &CompilerRecipe{Recipe: Recipe{Database: d}, fileParsers: fp} return &CompilerRecipe{Recipe: Recipe{Database: d}, fileParsers: fp}
} }

View File

@ -0,0 +1,62 @@
// Copyright © 2022 Ettore Di Giacinto <mudler@luet.io>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package tree
import (
"path/filepath"
"strings"
"github.com/mudler/luet/pkg/api/core/types"
"github.com/pkg/errors"
)
func RuntimeDockerfileParser(srcDir, currentpath, name string, templates []string, db types.PackageDatabase) error {
if !strings.Contains(name, "Dockerfile") {
return nil
}
// Path is set only internally when tree is loaded from disk
_, err := db.CreatePackage(&types.Package{Name: filepath.Base(filepath.Dir(currentpath)), Path: filepath.Dir(currentpath), TreeDir: srcDir})
if err != nil {
return errors.Wrap(err, "Error creating package "+currentpath)
}
return nil
}
func BuildDockerfileParser(srcDir, currentpath, name string, templates []string, db types.PackageDatabase) error {
if !strings.Contains(name, "Dockerfile") {
return nil
}
// Simply imply the name package from the directory name
// TODO: Read specific labels from dockerfile as we do read the image already
p := &types.Package{
Name: filepath.Base(filepath.Dir(currentpath)),
Path: filepath.Dir(currentpath),
TreeDir: srcDir}
err := p.SetOriginalDockerfile(currentpath)
if err != nil {
return errors.Wrap(err, "Error reading file "+currentpath)
}
// Path is set only internally when tree is loaded from disk
_, err = db.CreatePackage(p)
if err != nil {
return errors.Wrap(err, "Error creating package "+currentpath)
}
return nil
}

View File

@ -26,6 +26,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"github.com/mudler/luet/pkg/api/core/template"
"github.com/mudler/luet/pkg/api/core/types" "github.com/mudler/luet/pkg/api/core/types"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
@ -36,12 +37,14 @@ const (
FinalizerFile = "finalize.yaml" FinalizerFile = "finalize.yaml"
) )
var DefaultInstallerParsers = []FileParser{
RuntimeCollectionParser,
RuntimeDefinitionParser,
}
func NewInstallerRecipe(db types.PackageDatabase, fp ...FileParser) Builder { func NewInstallerRecipe(db types.PackageDatabase, fp ...FileParser) Builder {
if len(fp) == 0 { if len(fp) == 0 {
fp = []FileParser{ fp = DefaultInstallerParsers
RuntimeCollectionParser,
RuntimeDefinitionParser,
}
} }
return &InstallerRecipe{Database: db, fileParsers: fp} return &InstallerRecipe{Database: db, fileParsers: fp}
} }
@ -87,20 +90,24 @@ func (r *InstallerRecipe) Load(path string) error {
r.SourcePath = append(r.SourcePath, path) r.SourcePath = append(r.SourcePath, path)
c, err := template.FilesInDir(template.FindPossibleTemplatesDir(path))
if err != nil {
return err
}
//r.Tree().SetPackageSet(pkg.NewBoltDatabase(tmpfile.Name())) //r.Tree().SetPackageSet(pkg.NewBoltDatabase(tmpfile.Name()))
// TODO: Handle cleaning after? Cleanup implemented in GetPackageSet().Clean() // TODO: Handle cleaning after? Cleanup implemented in GetPackageSet().Clean()
// the function that handles each file or dir // the function that handles each file or dir
var ff = func(currentpath string, info os.FileInfo, err error) error { var ff = func(currentpath string, info os.FileInfo, err error) error {
for _, p := range r.fileParsers { for _, p := range r.fileParsers {
if err := p(path, currentpath, info.Name(), []string{}, r.Database); err != nil { if err := p(path, currentpath, info.Name(), c, r.Database); err != nil {
return err return err
} }
} }
return nil return nil
} }
err := filepath.Walk(path, ff) err = filepath.Walk(path, ff)
if err != nil { if err != nil {
return err return err
} }

View File

@ -36,10 +36,7 @@ import (
func NewGeneralRecipe(db types.PackageDatabase, fp ...FileParser) Builder { func NewGeneralRecipe(db types.PackageDatabase, fp ...FileParser) Builder {
if len(fp) == 0 { if len(fp) == 0 {
fp = []FileParser{ fp = DefaultInstallerParsers
RuntimeCollectionParser,
RuntimeDefinitionParser,
}
} }
return &Recipe{Database: db, fileParsers: fp} return &Recipe{Database: db, fileParsers: fp}
} }

View File

@ -0,0 +1,3 @@
FROM alpine
RUN apk add curl

View File

@ -0,0 +1,69 @@
#!/bin/bash
export LUET_NOLOCK=true
oneTimeSetUp() {
export tmpdir="$(mktemp -d)"
}
oneTimeTearDown() {
rm -rf "$tmpdir"
}
testBuild() {
mkdir $tmpdir/testbuild
luet build --tree "$ROOT_DIR/tests/fixtures/dockerfiles" --dockerfiles --destination $tmpdir/testbuild --compression gzip curl
buildst=$?
assertEquals 'builds successfully' "$buildst" "0"
assertTrue 'create package' "[ -e '$tmpdir/testbuild/curl--.package.tar.gz' ]"
}
testRepo() {
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
luet create-repo --dockerfiles --tree "$ROOT_DIR/tests/fixtures/dockerfiles" \
--output $tmpdir/testbuild \
--packages $tmpdir/testbuild \
--name "test" \
--descr "Test Repo" \
--urls $tmpdir/testrootfs \
--type disk > /dev/null
createst=$?
assertEquals 'create repo successfully' "$createst" "0"
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
}
testConfig() {
mkdir $tmpdir/testrootfs
cat <<EOF > $tmpdir/luet.yaml
general:
debug: true
system:
rootfs: $tmpdir/testrootfs
database_path: "/"
database_engine: "boltdb"
config_from_host: true
repositories:
- name: "main"
type: "disk"
enable: true
urls:
- "$tmpdir/testbuild"
EOF
luet config --config $tmpdir/luet.yaml
res=$?
assertEquals 'config test successfully' "$res" "0"
}
testInstall() {
luet install -y --config $tmpdir/luet.yaml curl
#luet install -y --config $tmpdir/luet.yaml test/c@1.0 > /dev/null
installst=$?
assertEquals 'install test successfully' "$installst" "0"
assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/usr/bin/curl' ]"
}
# Load shUnit2.
. "$ROOT_DIR/tests/integration/shunit2"/shunit2

View File

@ -51,9 +51,9 @@ testBuild() {
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]" assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]" assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]" assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
assertContains 'Does use the upstream cache without specifying it test/c' "$build_output" "Images available remotely for test/c-1.0 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:d6a82b43c97322cfc549176f54b459d6e6b4a7c756ba5bcd17f1775469ad42c7" assertContains 'Does use the upstream cache without specifying it test/c' "$build_output" "Images available remotely for test/c-1.0 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:5387bd29accbd644df2b9d064c19451cd7a0ba57583a225af8ef76b79fb07511"
assertContains 'Does use the upstream cache without specifying it test/z' "$build_output" "Images available remotely for test/z-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:e324d35eca913bde850b6fd130496b3b347f0090d5bbed900d4b64b837df89d8" assertContains 'Does use the upstream cache without specifying it test/z' "$build_output" "Images available remotely for test/z-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:44aa6020c74536c8eb3bb501e0f69c68c63c071ebfb5da7c395655f78114ea83"
assertContains 'Does use the upstream cache without specifying it test/interpolated' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:bec91b2b88dfeb68c9cad762a99a35233f7a38722573c4982d9b2168aac5992e" assertContains 'Does use the upstream cache without specifying it test/interpolated' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:3229bfee7cb1774e92f9b669ecf6c97c58a70ecb941fa2b1d8a32198a75a76f0"
} }
testRepo() { testRepo() {

View File

@ -56,8 +56,8 @@ EOF
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]" assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]" assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]" assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
assertNotContains 'Does NOT use the upstream cache without specifying it' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:bec91b2b88dfeb68c9cad762a99a35233f7a38722573c4982d9b2168aac5992e" assertNotContains 'Does NOT use the upstream cache without specifying it' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:"
assertContains 'Does generate a new hash as values changed build.yaml for test/interpolated-1.0+2 package image' "$build_output" "Building image luet/cache:e0a392a824a56f720af104df1e9c79cb4cb2af58a8bab728979891554476c6ff done" assertContains 'Does generate a new hash as values changed build.yaml for test/interpolated-1.0+2 package image' "$build_output" "Images not available for test/interpolated-1.0+2"
} }
testRepo() { testRepo() {

5
vendor/github.com/Azure/go-ansiterm/go.mod generated vendored Normal file
View File

@ -0,0 +1,5 @@
module github.com/Azure/go-ansiterm
go 1.16
require golang.org/x/sys v0.0.0-20210616094352-59db8d763f22

2
vendor/github.com/Azure/go-ansiterm/go.sum generated vendored Normal file
View File

@ -0,0 +1,2 @@
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=

View File

@ -10,6 +10,7 @@ import (
"syscall" "syscall"
"github.com/Azure/go-ansiterm" "github.com/Azure/go-ansiterm"
windows "golang.org/x/sys/windows"
) )
// Windows keyboard constants // Windows keyboard constants
@ -162,15 +163,28 @@ func ensureInRange(n int16, min int16, max int16) int16 {
func GetStdFile(nFile int) (*os.File, uintptr) { func GetStdFile(nFile int) (*os.File, uintptr) {
var file *os.File var file *os.File
switch nFile {
case syscall.STD_INPUT_HANDLE: // syscall uses negative numbers
// windows package uses very big uint32
// Keep these switches split so we don't have to convert ints too much.
switch uint32(nFile) {
case windows.STD_INPUT_HANDLE:
file = os.Stdin file = os.Stdin
case syscall.STD_OUTPUT_HANDLE: case windows.STD_OUTPUT_HANDLE:
file = os.Stdout file = os.Stdout
case syscall.STD_ERROR_HANDLE: case windows.STD_ERROR_HANDLE:
file = os.Stderr file = os.Stderr
default: default:
panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) switch nFile {
case syscall.STD_INPUT_HANDLE:
file = os.Stdin
case syscall.STD_OUTPUT_HANDLE:
file = os.Stdout
case syscall.STD_ERROR_HANDLE:
file = os.Stderr
default:
panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
}
} }
fd, err := syscall.GetStdHandle(nFile) fd, err := syscall.GetStdHandle(nFile)

View File

@ -1,3 +1,38 @@
# Binaries for programs and plugins
*.exe *.exe
.idea *.dll
.vscode *.so
*.dylib
# Ignore vscode setting files
.vscode/
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
# Ignore gcs bin directory
service/bin/
service/pkg/
*.img
*.vhd
*.tar.gz
# Make stuff
.rootfs-done
bin/*
rootfs/*
*.o
/build/
deps/*
out/*
.idea/
.vscode/

99
vendor/github.com/Microsoft/hcsshim/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,99 @@
run:
timeout: 8m
linters:
enable:
- stylecheck
linters-settings:
stylecheck:
# https://staticcheck.io/docs/checks
checks: ["all"]
issues:
# This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like
# (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go
# friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't
# supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms,
# while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change.
exclude-rules:
- path: layer.go
linters:
- stylecheck
Text: "ST1003:"
- path: hcsshim.go
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\hcs\\schema2\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\wclayer\\
linters:
- stylecheck
Text: "ST1003:"
- path: hcn\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\hcs\\schema1\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\hns\\
linters:
- stylecheck
Text: "ST1003:"
- path: ext4\\internal\\compactext4\\
linters:
- stylecheck
Text: "ST1003:"
- path: ext4\\internal\\format\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\guestrequest\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\guest\\prot\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\windevice\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\winapi\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\vmcompute\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\regstate\\
linters:
- stylecheck
Text: "ST1003:"
- path: internal\\hcserror\\
linters:
- stylecheck
Text: "ST1003:"

87
vendor/github.com/Microsoft/hcsshim/Makefile generated vendored Normal file
View File

@ -0,0 +1,87 @@
BASE:=base.tar.gz
GO:=go
GO_FLAGS:=-ldflags "-s -w" # strip Go binaries
CGO_ENABLED:=0
GOMODVENDOR:=
CFLAGS:=-O2 -Wall
LDFLAGS:=-static -s # strip C binaries
GO_FLAGS_EXTRA:=
ifeq "$(GOMODVENDOR)" "1"
GO_FLAGS_EXTRA += -mod=vendor
endif
GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA)
SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST))))
# The link aliases for gcstools
GCS_TOOLS=\
generichook
.PHONY: all always rootfs test
all: out/initrd.img out/rootfs.tar.gz
clean:
find -name '*.o' -print0 | xargs -0 -r rm
rm -rf bin deps rootfs out
test:
cd $(SRCROOT) && go test -v ./internal/guest/...
out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile
@mkdir -p out
rm -rf rootfs
mkdir -p rootfs/bin/
cp bin/init rootfs/
cp bin/vsockexec rootfs/bin/
cp bin/cmd/gcs rootfs/bin/
cp bin/cmd/gcstools rootfs/bin/
for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done
git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \
git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch
tar -zcf $@ -C rootfs .
rm -rf rootfs
out/rootfs.tar.gz: out/initrd.img
rm -rf rootfs-conv
mkdir rootfs-conv
gunzip -c out/initrd.img | (cd rootfs-conv && cpio -imd)
tar -zcf $@ -C rootfs-conv .
rm -rf rootfs-conv
out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh
$(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed
gzip -c out/initrd.img.uncompressed > $@
rm out/initrd.img.uncompressed
-include deps/cmd/gcs.gomake
-include deps/cmd/gcstools.gomake
# Implicit rule for includes that define Go targets.
%.gomake: $(SRCROOT)/Makefile
@mkdir -p $(dir $@)
@/bin/echo $(@:deps/%.gomake=bin/%): $(SRCROOT)/hack/gomakedeps.sh > $@.new
@/bin/echo -e '\t@mkdir -p $$(dir $$@) $(dir $@)' >> $@.new
@/bin/echo -e '\t$$(GO_BUILD) -o $$@.new $$(SRCROOT)/$$(@:bin/%=%)' >> $@.new
@/bin/echo -e '\tGO="$(GO)" $$(SRCROOT)/hack/gomakedeps.sh $$@ $$(SRCROOT)/$$(@:bin/%=%) $$(GO_FLAGS) $$(GO_FLAGS_EXTRA) > $(@:%.gomake=%.godeps).new' >> $@.new
@/bin/echo -e '\tmv $(@:%.gomake=%.godeps).new $(@:%.gomake=%.godeps)' >> $@.new
@/bin/echo -e '\tmv $$@.new $$@' >> $@.new
@/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new
mv $@.new $@
VPATH=$(SRCROOT)
bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o
@mkdir -p bin
$(CC) $(LDFLAGS) -o $@ $^
bin/init: init/init.o vsockexec/vsock.o
@mkdir -p bin
$(CC) $(LDFLAGS) -o $@ $^
%.o: %.c
@mkdir -p $(dir $@)
$(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<

View File

@ -2,13 +2,67 @@
[![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster) [![Build status](https://github.com/microsoft/hcsshim/actions/workflows/ci.yml/badge.svg?branch=master)](https://github.com/microsoft/hcsshim/actions?query=branch%3Amaster)
This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS). This package contains the Golang interface for using the Windows [Host Compute Service](https://techcommunity.microsoft.com/t5/containers/introducing-the-host-compute-service-hcs/ba-p/382332) (HCS) to launch and manage [Windows Containers](https://docs.microsoft.com/en-us/virtualization/windowscontainers/about/). It also contains other helpers and functions for managing Windows Containers such as the Golang interface for the Host Network Service (HNS), as well as code for the [guest agent](./internal/guest/README.md) (commonly referred to as the GCS or Guest Compute Service in the codebase) used to support running Linux Hyper-V containers.
It is primarily used in the [Moby Project](https://github.com/moby/moby), but it can be freely used by other projects as well. It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd](https://github.com/containerd/containerd) projects, but it can be freely used by other projects as well.
## Building
While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md).
### Linux Hyper-V Container Guest Agent
To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs.
```powershell
C:\> $env:GOOS="linux"
C:\> go build .\cmd\gcs\
```
or on a Linux machine
```sh
> go build ./cmd/gcs
```
If you want it to be packaged inside of a rootfs to boot with alongside all of the other tools then you'll need to provide a rootfs that it can be packaged inside of. An easy way is to export the rootfs of a container.
```sh
docker pull busybox
docker run --name base_image_container busybox
docker export base_image_container | gzip > base.tar.gz
BASE=./base.tar.gz
make all
```
If the build is successful, in the `./out` folder you should see:
```sh
> ls ./out/
delta.tar.gz initrd.img rootfs.tar.gz
```
### Containerd Shim
For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md.
Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers.
```powershell
C:\> $env:GOOS="windows"
C:\> go build .\cmd\containerd-shim-runhcs-v1
```
Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running:
```powershell
.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii
```
This config file will already have the shim set as the default runtime for cri interactions.
To trial using the shim out with ctr.exe:
```powershell
C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!"
```
## Contributing ## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.microsoft.com. the rights to use your contribution. For details, visit https://cla.microsoft.com.
@ -16,7 +70,27 @@ When you submit a pull request, a CLA-bot will automatically determine whether y
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA. provided by the bot. You will only need to do this once across all repos using our CLA.
We also ask that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to certify they either authored the work themselves or otherwise have permission to use it in this project. We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to
certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for
more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure
that all commits in a given PR are signed-off.
### Test Directory (Important to note)
This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this
project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has
its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file
has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project
(which is the repo itself on your disk).
```
replace (
github.com/Microsoft/hcsshim => ../
)
```
Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the
CI in this project will check if the files are out of date and will fail if this is true.
## Code of Conduct ## Code of Conduct

View File

@ -3,26 +3,34 @@ module github.com/Microsoft/hcsshim
go 1.13 go 1.13
require ( require (
github.com/BurntSushi/toml v0.3.1
github.com/Microsoft/go-winio v0.4.17 github.com/Microsoft/go-winio v0.4.17
github.com/cenkalti/backoff/v4 v4.1.1 github.com/cenkalti/backoff/v4 v4.1.1
github.com/containerd/cgroups v1.0.1 github.com/containerd/cgroups v1.0.1
github.com/containerd/console v1.0.2 github.com/containerd/console v1.0.2
github.com/containerd/containerd v1.4.9 github.com/containerd/containerd v1.5.7
github.com/containerd/continuity v0.1.0 // indirect
github.com/containerd/fifo v1.0.0 // indirect
github.com/containerd/go-runc v1.0.0 github.com/containerd/go-runc v1.0.0
github.com/containerd/ttrpc v1.1.0 github.com/containerd/ttrpc v1.1.0
github.com/containerd/typeurl v1.0.2 github.com/containerd/typeurl v1.0.2
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d github.com/golang/mock v1.6.0
github.com/google/go-cmp v0.5.6
github.com/google/go-containerregistry v0.5.1
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3
github.com/mattn/go-shellwords v1.0.6
github.com/opencontainers/runc v1.0.2
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.8.1 github.com/sirupsen/logrus v1.8.1
github.com/urfave/cli v1.22.2 github.com/urfave/cli v1.22.2
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae
go.etcd.io/bbolt v1.3.6
go.opencensus.io v0.22.3 go.opencensus.io v0.22.3
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
golang.org/x/sys v0.0.0-20210324051608-47abb6519492 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
google.golang.org/grpc v1.33.2 golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
gotest.tools/v3 v3.0.3 // indirect google.golang.org/grpc v1.40.0
) )
replace ( replace (

File diff suppressed because it is too large Load Diff

View File

@ -78,6 +78,13 @@ var (
// ErrNotSupported is an error encountered when hcs doesn't support the request // ErrNotSupported is an error encountered when hcs doesn't support the request
ErrPlatformNotSupported = errors.New("unsupported platform request") ErrPlatformNotSupported = errors.New("unsupported platform request")
// ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped.
ErrProcessAlreadyStopped = syscall.Errno(0x8037011f)
// ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that
// compute system has already been closed.
ErrInvalidHandle = syscall.Errno(0x6)
) )
type ErrorEvent struct { type ErrorEvent struct {
@ -249,6 +256,14 @@ func IsNotExist(err error) bool {
err == ErrElementNotFound err == ErrElementNotFound
} }
// IsErrorInvalidHandle checks whether the error is the result of an operation carried
// out on a handle that is invalid/closed. This error popped up while trying to query
// stats on a container in the process of being stopped.
func IsErrorInvalidHandle(err error) bool {
err = getInnerError(err)
return err == ErrInvalidHandle
}
// IsAlreadyClosed checks if an error is caused by the Container or Process having been // IsAlreadyClosed checks if an error is caused by the Container or Process having been
// already closed by a call to the Close() method. // already closed by a call to the Close() method.
func IsAlreadyClosed(err error) bool { func IsAlreadyClosed(err error) bool {
@ -281,6 +296,7 @@ func IsTimeout(err error) bool {
func IsAlreadyStopped(err error) bool { func IsAlreadyStopped(err error) bool {
err = getInnerError(err) err = getInnerError(err)
return err == ErrVmcomputeAlreadyStopped || return err == ErrVmcomputeAlreadyStopped ||
err == ErrProcessAlreadyStopped ||
err == ErrElementNotFound err == ErrElementNotFound
} }

View File

@ -3,7 +3,9 @@ package hcs
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"errors"
"io" "io"
"os"
"sync" "sync"
"syscall" "syscall"
"time" "time"
@ -16,16 +18,17 @@ import (
// ContainerError is an error encountered in HCS // ContainerError is an error encountered in HCS
type Process struct { type Process struct {
handleLock sync.RWMutex handleLock sync.RWMutex
handle vmcompute.HcsProcess handle vmcompute.HcsProcess
processID int processID int
system *System system *System
hasCachedStdio bool hasCachedStdio bool
stdioLock sync.Mutex stdioLock sync.Mutex
stdin io.WriteCloser stdin io.WriteCloser
stdout io.ReadCloser stdout io.ReadCloser
stderr io.ReadCloser stderr io.ReadCloser
callbackNumber uintptr callbackNumber uintptr
killSignalDelivered bool
closedWaitOnce sync.Once closedWaitOnce sync.Once
waitBlock chan struct{} waitBlock chan struct{}
@ -149,12 +152,45 @@ func (process *Process) Kill(ctx context.Context) (bool, error) {
return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) return false, makeProcessError(process, operation, ErrAlreadyClosed, nil)
} }
if process.killSignalDelivered {
// A kill signal has already been sent to this process. Sending a second
// one offers no real benefit, as processes cannot stop themselves from
// being terminated, once a TerminateProcess has been issued. Sending a
// second kill may result in a number of errors (two of which detailed bellow)
// and which we can avoid handling.
return true, nil
}
resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle) resultJSON, err := vmcompute.HcsTerminateProcess(ctx, process.handle)
if err != nil {
// We still need to check these two cases, as processes may still be killed by an
// external actor (human operator, OOM, random script etc).
if errors.Is(err, os.ErrPermission) || IsAlreadyStopped(err) {
// There are two cases where it should be safe to ignore an error returned
// by HcsTerminateProcess. The first one is cause by the fact that
// HcsTerminateProcess ends up calling TerminateProcess in the context
// of a container. According to the TerminateProcess documentation:
// https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-terminateprocess#remarks
// After a process has terminated, call to TerminateProcess with open
// handles to the process fails with ERROR_ACCESS_DENIED (5) error code.
// It's safe to ignore this error here. HCS should always have permissions
// to kill processes inside any container. So an ERROR_ACCESS_DENIED
// is unlikely to be anything else than what the ending remarks in the
// documentation states.
//
// The second case is generated by hcs itself, if for any reason HcsTerminateProcess
// is called twice in a very short amount of time. In such cases, hcs may return
// HCS_E_PROCESS_ALREADY_STOPPED.
return true, nil
}
}
events := processHcsResult(ctx, resultJSON) events := processHcsResult(ctx, resultJSON)
delivered, err := process.processSignalResult(ctx, err) delivered, err := process.processSignalResult(ctx, err)
if err != nil { if err != nil {
err = makeProcessError(process, operation, err, events) err = makeProcessError(process, operation, err, events)
} }
process.killSignalDelivered = delivered
return delivered, err return delivered, err
} }

View File

@ -27,4 +27,10 @@ type Attachment struct {
CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"` CaptureIoAttributionContext bool `json:"CaptureIoAttributionContext,omitempty"`
ReadOnly bool `json:"ReadOnly,omitempty"` ReadOnly bool `json:"ReadOnly,omitempty"`
SupportCompressedVolumes bool `json:"SupportCompressedVolumes,omitempty"`
AlwaysAllowSparseFiles bool `json:"AlwaysAllowSparseFiles,omitempty"`
ExtensibleVirtualDiskType string `json:"ExtensibleVirtualDiskType,omitempty"`
} }

View File

@ -31,4 +31,6 @@ type Container struct {
RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"` RegistryChanges *RegistryChanges `json:"RegistryChanges,omitempty"`
AssignedDevices []Device `json:"AssignedDevices,omitempty"` AssignedDevices []Device `json:"AssignedDevices,omitempty"`
AdditionalDeviceNamespace *ContainerDefinitionDevice `json:"AdditionalDeviceNamespace,omitempty"`
} }

View File

@ -14,5 +14,5 @@ type CpuGroupConfig struct {
Affinity *CpuGroupAffinity `json:"Affinity,omitempty"` Affinity *CpuGroupAffinity `json:"Affinity,omitempty"`
GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"` GroupProperties []CpuGroupProperty `json:"GroupProperties,omitempty"`
// Hypervisor CPU group IDs exposed to clients // Hypervisor CPU group IDs exposed to clients
HypervisorGroupId int32 `json:"HypervisorGroupId,omitempty"` HypervisorGroupId uint64 `json:"HypervisorGroupId,omitempty"`
} }

View File

@ -12,9 +12,9 @@ package hcsschema
type DeviceType string type DeviceType string
const ( const (
ClassGUID DeviceType = "ClassGuid" ClassGUID DeviceType = "ClassGuid"
DeviceInstance DeviceType = "DeviceInstance" DeviceInstanceID DeviceType = "DeviceInstance"
GPUMirror DeviceType = "GpuMirror" GPUMirror DeviceType = "GpuMirror"
) )
type Device struct { type Device struct {
@ -22,6 +22,6 @@ type Device struct {
Type DeviceType `json:"Type,omitempty"` Type DeviceType `json:"Type,omitempty"`
// The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid. // The interface class guid of the device interfaces to assign to the container. Only used when Type is ClassGuid.
InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"` InterfaceClassGuid string `json:"InterfaceClassGuid,omitempty"`
// The location path of the device to assign to the container. Only used when Type is DeviceInstance. // The location path of the device to assign to the container. Only used when Type is DeviceInstanceID.
LocationPath string `json:"LocationPath,omitempty"` LocationPath string `json:"LocationPath,omitempty"`
} }

View File

@ -0,0 +1,14 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ContainerDefinitionDevice struct {
DeviceExtension []DeviceExtension `json:"device_extension,omitempty"`
}

View File

@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type DeviceCategory struct {
Name string `json:"name,omitempty"`
InterfaceClass []InterfaceClass `json:"interface_class,omitempty"`
}

View File

@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type DeviceExtension struct {
DeviceCategory *DeviceCategory `json:"device_category,omitempty"`
Namespace *DeviceExtensionNamespace `json:"namespace,omitempty"`
}

View File

@ -0,0 +1,17 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type DeviceInstance struct {
Id string `json:"id,omitempty"`
LocationPath string `json:"location_path,omitempty"`
PortName string `json:"port_name,omitempty"`
InterfaceClass []InterfaceClass `json:"interface_class,omitempty"`
}

View File

@ -0,0 +1,16 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type DeviceNamespace struct {
RequiresDriverstore bool `json:"requires_driverstore,omitempty"`
DeviceCategory []DeviceCategory `json:"device_category,omitempty"`
DeviceInstance []DeviceInstance `json:"device_instance,omitempty"`
}

View File

@ -0,0 +1,16 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type InterfaceClass struct {
Type_ string `json:"type,omitempty"`
Identifier string `json:"identifier,omitempty"`
Recurse bool `json:"recurse,omitempty"`
}

View File

@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type DeviceExtensionNamespace struct {
Ob *ObjectNamespace `json:"ob,omitempty"`
Device *DeviceNamespace `json:"device,omitempty"`
}

View File

@ -0,0 +1,18 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ObjectDirectory struct {
Name string `json:"name,omitempty"`
Clonesd string `json:"clonesd,omitempty"`
Shadow string `json:"shadow,omitempty"`
Symlink []ObjectSymlink `json:"symlink,omitempty"`
Objdir []ObjectDirectory `json:"objdir,omitempty"`
}

View File

@ -0,0 +1,16 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ObjectNamespace struct {
Shadow string `json:"shadow,omitempty"`
Symlink []ObjectSymlink `json:"symlink,omitempty"`
Objdir []ObjectDirectory `json:"objdir,omitempty"`
}

View File

@ -0,0 +1,18 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type ObjectSymlink struct {
Name string `json:"name,omitempty"`
Path string `json:"path,omitempty"`
Scope string `json:"scope,omitempty"`
Pathtoclone string `json:"pathtoclone,omitempty"`
AccessMask int32 `json:"access_mask,omitempty"`
}

View File

@ -0,0 +1,15 @@
/*
* HCS API
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* API version: 2.4
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package hcsschema
type VirtualPMemMapping struct {
HostPath string `json:"HostPath,omitempty"`
ImageFormat string `json:"ImageFormat,omitempty"`
}

View File

@ -20,6 +20,7 @@ type HNSEndpoint struct {
IPv6Address net.IP `json:",omitempty"` IPv6Address net.IP `json:",omitempty"`
DNSSuffix string `json:",omitempty"` DNSSuffix string `json:",omitempty"`
DNSServerList string `json:",omitempty"` DNSServerList string `json:",omitempty"`
DNSDomain string `json:",omitempty"`
GatewayAddress string `json:",omitempty"` GatewayAddress string `json:",omitempty"`
GatewayAddressV6 string `json:",omitempty"` GatewayAddressV6 string `json:",omitempty"`
EnableInternalDNS bool `json:",omitempty"` EnableInternalDNS bool `json:",omitempty"`

View File

@ -22,9 +22,9 @@ const (
type NatPolicy struct { type NatPolicy struct {
Type PolicyType `json:"Type"` Type PolicyType `json:"Type"`
Protocol string Protocol string `json:",omitempty"`
InternalPort uint16 InternalPort uint16 `json:",omitempty"`
ExternalPort uint16 ExternalPort uint16 `json:",omitempty"`
} }
type QosPolicy struct { type QosPolicy struct {
@ -88,20 +88,20 @@ const (
type ACLPolicy struct { type ACLPolicy struct {
Type PolicyType `json:"Type"` Type PolicyType `json:"Type"`
Id string `json:"Id,omitempty"` Id string `json:"Id,omitempty"`
Protocol uint16 Protocol uint16 `json:",omitempty"`
Protocols string `json:"Protocols,omitempty"` Protocols string `json:"Protocols,omitempty"`
InternalPort uint16 InternalPort uint16 `json:",omitempty"`
Action ActionType Action ActionType
Direction DirectionType Direction DirectionType
LocalAddresses string LocalAddresses string `json:",omitempty"`
RemoteAddresses string RemoteAddresses string `json:",omitempty"`
LocalPorts string `json:"LocalPorts,omitempty"` LocalPorts string `json:"LocalPorts,omitempty"`
LocalPort uint16 LocalPort uint16 `json:",omitempty"`
RemotePorts string `json:"RemotePorts,omitempty"` RemotePorts string `json:"RemotePorts,omitempty"`
RemotePort uint16 RemotePort uint16 `json:",omitempty"`
RuleType RuleType `json:"RuleType,omitempty"` RuleType RuleType `json:"RuleType,omitempty"`
Priority uint16 Priority uint16 `json:",omitempty"`
ServiceName string ServiceName string `json:",omitempty"`
} }
type Policy struct { type Policy struct {

View File

@ -21,7 +21,7 @@ func ActivateLayer(ctx context.Context, path string) (err error) {
err = activateLayer(&stdDriverInfo, path) err = activateLayer(&stdDriverInfo, path)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
return nil return nil
} }

View File

@ -21,7 +21,7 @@ func CreateLayer(ctx context.Context, path, parent string) (err error) {
err = createLayer(&stdDriverInfo, path, parent) err = createLayer(&stdDriverInfo, path, parent)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
return nil return nil
} }

View File

@ -28,7 +28,7 @@ func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []str
err = createSandboxLayer(&stdDriverInfo, path, 0, layers) err = createSandboxLayer(&stdDriverInfo, path, 0, layers)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
return nil return nil
} }

View File

@ -19,7 +19,7 @@ func DestroyLayer(ctx context.Context, path string) (err error) {
err = destroyLayer(&stdDriverInfo, path) err = destroyLayer(&stdDriverInfo, path)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
return nil return nil
} }

View File

@ -25,7 +25,7 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error
err = expandSandboxSize(&stdDriverInfo, path, size) err = expandSandboxSize(&stdDriverInfo, path, size)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
// Manually expand the volume now in order to work around bugs in 19H1 and // Manually expand the volume now in order to work around bugs in 19H1 and

View File

@ -35,7 +35,7 @@ func ExportLayer(ctx context.Context, path string, exportFolderPath string, pare
err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers) err = exportLayer(&stdDriverInfo, path, exportFolderPath, layers)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
return nil return nil
} }

View File

@ -27,7 +27,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
log.G(ctx).Debug("Calling proc (1)") log.G(ctx).Debug("Calling proc (1)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil) err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, nil)
if err != nil { if err != nil {
return "", hcserror.New(err, title+" - failed", "(first call)") return "", hcserror.New(err, title, "(first call)")
} }
// Allocate a mount path of the returned length. // Allocate a mount path of the returned length.
@ -41,7 +41,7 @@ func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) {
log.G(ctx).Debug("Calling proc (2)") log.G(ctx).Debug("Calling proc (2)")
err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0]) err = getLayerMountPath(&stdDriverInfo, path, &mountPathLength, &mountPathp[0])
if err != nil { if err != nil {
return "", hcserror.New(err, title+" - failed", "(second call)") return "", hcserror.New(err, title, "(second call)")
} }
mountPath := syscall.UTF16ToString(mountPathp[0:]) mountPath := syscall.UTF16ToString(mountPathp[0:])

View File

@ -21,7 +21,7 @@ func GetSharedBaseImages(ctx context.Context) (_ string, err error) {
var buffer *uint16 var buffer *uint16
err = getBaseImages(&buffer) err = getBaseImages(&buffer)
if err != nil { if err != nil {
return "", hcserror.New(err, title+" - failed", "") return "", hcserror.New(err, title, "")
} }
imageData := interop.ConvertAndFreeCoTaskMemString(buffer) imageData := interop.ConvertAndFreeCoTaskMemString(buffer)
span.AddAttributes(trace.StringAttribute("imageData", imageData)) span.AddAttributes(trace.StringAttribute("imageData", imageData))

View File

@ -20,7 +20,7 @@ func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error
err = grantVmAccess(vmid, filepath) err = grantVmAccess(vmid, filepath)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
return nil return nil
} }

View File

@ -36,7 +36,7 @@ func ImportLayer(ctx context.Context, path string, importFolderPath string, pare
err = importLayer(&stdDriverInfo, path, importFolderPath, layers) err = importLayer(&stdDriverInfo, path, importFolderPath, layers)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
return nil return nil
} }

View File

@ -21,7 +21,7 @@ func LayerExists(ctx context.Context, path string) (_ bool, err error) {
var exists uint32 var exists uint32
err = layerExists(&stdDriverInfo, path, &exists) err = layerExists(&stdDriverInfo, path, &exists)
if err != nil { if err != nil {
return false, hcserror.New(err, title+" - failed", "") return false, hcserror.New(err, title, "")
} }
span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0)) span.AddAttributes(trace.BoolAttribute("layer-exists", exists != 0))
return exists != 0, nil return exists != 0, nil

View File

@ -76,7 +76,7 @@ func readTombstones(path string) (map[string]([]string), error) {
defer tf.Close() defer tf.Close()
s := bufio.NewScanner(tf) s := bufio.NewScanner(tf)
if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" {
return nil, errors.New("Invalid tombstones file") return nil, errors.New("invalid tombstones file")
} }
ts := make(map[string]([]string)) ts := make(map[string]([]string))

View File

@ -17,12 +17,12 @@ func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) {
ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
defer span.End() defer span.End()
defer func() { oc.SetSpanStatus(span, err) }() defer func() { oc.SetSpanStatus(span, err) }()
span.AddAttributes(trace.StringAttribute("name", name)) span.AddAttributes(trace.StringAttribute("objectName", name))
var id guid.GUID var id guid.GUID
err = nameToGuid(name, &id) err = nameToGuid(name, &id)
if err != nil { if err != nil {
return guid.GUID{}, hcserror.New(err, title+" - failed", "") return guid.GUID{}, hcserror.New(err, title, "")
} }
span.AddAttributes(trace.StringAttribute("guid", id.String())) span.AddAttributes(trace.StringAttribute("guid", id.String()))
return id, nil return id, nil

View File

@ -38,7 +38,7 @@ func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (
defer prepareLayerLock.Unlock() defer prepareLayerLock.Unlock()
err = prepareLayer(&stdDriverInfo, path, layers) err = prepareLayer(&stdDriverInfo, path, layers)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
return nil return nil
} }

View File

@ -19,7 +19,7 @@ func UnprepareLayer(ctx context.Context, path string) (err error) {
err = unprepareLayer(&stdDriverInfo, path) err = unprepareLayer(&stdDriverInfo, path)
if err != nil { if err != nil {
return hcserror.New(err, title+" - failed", "") return hcserror.New(err, title, "")
} }
return nil return nil
} }

View File

@ -0,0 +1,44 @@
package winapi
import (
"unsafe"
"golang.org/x/sys/windows"
)
const PSEUDOCONSOLE_INHERIT_CURSOR = 0x1
// CreatePseudoConsole creates a windows pseudo console.
func CreatePseudoConsole(size windows.Coord, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) error {
// We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), hInput, hOutput, 0, hpcon)
}
// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`.
func ResizePseudoConsole(hpcon windows.Handle, size windows.Coord) error {
// We need this wrapper as the function takes a COORD struct and not a pointer to one, so we need to cast to something beforehand.
return resizePseudoConsole(hpcon, *((*uint32)(unsafe.Pointer(&size))))
}
// HRESULT WINAPI CreatePseudoConsole(
// _In_ COORD size,
// _In_ HANDLE hInput,
// _In_ HANDLE hOutput,
// _In_ DWORD dwFlags,
// _Out_ HPCON* phPC
// );
//
//sys createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) = kernel32.CreatePseudoConsole
// void WINAPI ClosePseudoConsole(
// _In_ HPCON hPC
// );
//
//sys ClosePseudoConsole(hpc windows.Handle) = kernel32.ClosePseudoConsole
// HRESULT WINAPI ResizePseudoConsole(
// _In_ HPCON hPC ,
// _In_ COORD size
// );
//
//sys resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole

View File

@ -1,27 +1,4 @@
package winapi package winapi
// VOID RtlMoveMemory(
// _Out_ VOID UNALIGNED *Destination,
// _In_ const VOID UNALIGNED *Source,
// _In_ SIZE_T Length
// );
//sys RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) = kernel32.RtlMoveMemory
//sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc //sys LocalAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc
//sys LocalFree(ptr uintptr) = kernel32.LocalFree //sys LocalFree(ptr uintptr) = kernel32.LocalFree
// BOOL QueryWorkingSet(
// HANDLE hProcess,
// PVOID pv,
// DWORD cb
// );
//sys QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSet
type PSAPI_WORKING_SET_INFORMATION struct {
NumberOfEntries uintptr
WorkingSetInfo [1]PSAPI_WORKING_SET_BLOCK
}
type PSAPI_WORKING_SET_BLOCK struct {
Flags uintptr
}

View File

@ -2,9 +2,7 @@ package winapi
const PROCESS_ALL_ACCESS uint32 = 2097151 const PROCESS_ALL_ACCESS uint32 = 2097151
// DWORD GetProcessImageFileNameW( const (
// HANDLE hProcess, PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
// LPWSTR lpImageFileName, PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
// DWORD nSize )
// );
//sys GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) = kernel32.GetProcessImageFileNameW

View File

@ -20,36 +20,41 @@ func Uint16BufferToSlice(buffer *uint16, bufferLength int) (result []uint16) {
return return
} }
// UnicodeString corresponds to UNICODE_STRING win32 struct defined here
// https://docs.microsoft.com/en-us/windows/win32/api/ntdef/ns-ntdef-_unicode_string
type UnicodeString struct { type UnicodeString struct {
Length uint16 Length uint16
MaximumLength uint16 MaximumLength uint16
Buffer *uint16 Buffer *uint16
} }
// NTSTRSAFE_UNICODE_STRING_MAX_CCH is a constant defined in ntstrsafe.h. This value
// denotes the maximum number of wide chars a path can have.
const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767
//String converts a UnicodeString to a golang string //String converts a UnicodeString to a golang string
func (uni UnicodeString) String() string { func (uni UnicodeString) String() string {
// UnicodeString is not guaranteed to be null terminated, therefore // UnicodeString is not guaranteed to be null terminated, therefore
// use the UnicodeString's Length field // use the UnicodeString's Length field
return syscall.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2))) return windows.UTF16ToString(Uint16BufferToSlice(uni.Buffer, int(uni.Length/2)))
} }
// NewUnicodeString allocates a new UnicodeString and copies `s` into // NewUnicodeString allocates a new UnicodeString and copies `s` into
// the buffer of the new UnicodeString. // the buffer of the new UnicodeString.
func NewUnicodeString(s string) (*UnicodeString, error) { func NewUnicodeString(s string) (*UnicodeString, error) {
// Get length of original `s` to use in the UnicodeString since the `buf`
// created later will have an additional trailing null character
length := len(s)
if length > 32767 {
return nil, syscall.ENAMETOOLONG
}
buf, err := windows.UTF16FromString(s) buf, err := windows.UTF16FromString(s)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if len(buf) > NTSTRSAFE_UNICODE_STRING_MAX_CCH {
return nil, syscall.ENAMETOOLONG
}
uni := &UnicodeString{ uni := &UnicodeString{
Length: uint16(length * 2), // The length is in bytes and should not include the trailing null character.
MaximumLength: uint16(length * 2), Length: uint16((len(buf) - 1) * 2),
MaximumLength: uint16((len(buf) - 1) * 2),
Buffer: &buf[0], Buffer: &buf[0],
} }
return uni, nil return uni, nil

View File

@ -2,4 +2,4 @@
// be thought of as an extension to golang.org/x/sys/windows. // be thought of as an extension to golang.org/x/sys/windows.
package winapi package winapi
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go //go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go

View File

@ -37,13 +37,15 @@ func errnoErr(e syscall.Errno) error {
} }
var ( var (
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modntdll = windows.NewLazySystemDLL("ntdll.dll") modntdll = windows.NewLazySystemDLL("ntdll.dll")
modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll")
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
modpsapi = windows.NewLazySystemDLL("psapi.dll")
modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll")
procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole")
procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole")
procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole")
procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation")
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
procSearchPathW = modkernel32.NewProc("SearchPathW") procSearchPathW = modkernel32.NewProc("SearchPathW")
@ -57,11 +59,8 @@ var (
procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject")
procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject")
procLogonUserW = modadvapi32.NewProc("LogonUserW") procLogonUserW = modadvapi32.NewProc("LogonUserW")
procRtlMoveMemory = modkernel32.NewProc("RtlMoveMemory")
procLocalAlloc = modkernel32.NewProc("LocalAlloc") procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree") procLocalFree = modkernel32.NewProc("LocalFree")
procQueryWorkingSet = modpsapi.NewProc("QueryWorkingSet")
procGetProcessImageFileNameW = modkernel32.NewProc("GetProcessImageFileNameW")
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
@ -74,6 +73,33 @@ var (
procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError")
) )
func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) {
r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func ClosePseudoConsole(hpc windows.Handle) {
syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0)
return
}
func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0)
if int32(r0) < 0 {
if r0&0x1fff0000 == 0x00070000 {
r0 &= 0xffff
}
hr = syscall.Errno(r0)
}
return
}
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) { func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
status = uint32(r0) status = uint32(r0)
@ -219,18 +245,6 @@ func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uin
return return
} }
func RtlMoveMemory(destination *byte, source *byte, length uintptr) (err error) {
r1, _, e1 := syscall.Syscall(procRtlMoveMemory.Addr(), 3, uintptr(unsafe.Pointer(destination)), uintptr(unsafe.Pointer(source)), uintptr(length))
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func LocalAlloc(flags uint32, size int) (ptr uintptr) { func LocalAlloc(flags uint32, size int) (ptr uintptr) {
r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0)
ptr = uintptr(r0) ptr = uintptr(r0)
@ -242,31 +256,6 @@ func LocalFree(ptr uintptr) {
return return
} }
func QueryWorkingSet(handle windows.Handle, pv uintptr, cb uint32) (err error) {
r1, _, e1 := syscall.Syscall(procQueryWorkingSet.Addr(), 3, uintptr(handle), uintptr(pv), uintptr(cb))
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func GetProcessImageFileName(hProcess windows.Handle, imageFileName *uint16, nSize uint32) (size uint32, err error) {
r0, _, e1 := syscall.Syscall(procGetProcessImageFileNameW.Addr(), 3, uintptr(hProcess), uintptr(unsafe.Pointer(imageFileName)), uintptr(nSize))
size = uint32(r0)
if size == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
amount = uint32(r0) amount = uint32(r0)

View File

@ -35,4 +35,16 @@ const (
// V20H2 corresponds to Windows Server 20H2 (semi-annual channel). // V20H2 corresponds to Windows Server 20H2 (semi-annual channel).
V20H2 = 19042 V20H2 = 19042
// V21H1 corresponds to Windows Server 21H1 (semi-annual channel).
V21H1 = 19043
// V21H2Win10 corresponds to Windows 10 (November 2021 Update).
V21H2Win10 = 19044
// V21H2Server corresponds to Windows Server 2022 (ltsc2022).
V21H2Server = 20348
// V21H2Win11 corresponds to Windows 11 (original release).
V21H2Win11 = 22000
) )

29
vendor/github.com/asottile/dockerfile/.coveragerc generated vendored Normal file
View File

@ -0,0 +1,29 @@
[run]
branch = True
source =
.
omit =
.tox/*
/usr/*
setup.py
# Don't complain if non-runnable code isn't run
*/__main__.py
[report]
exclude_lines =
# Have to re-enable the standard pragma
\#\s*pragma: no cover
# Don't complain if tests don't hit defensive assertion code:
^\s*raise AssertionError\b
^\s*raise NotImplementedError\b
^\s*return NotImplemented\b
^\s*raise$
# Don't complain if non-runnable code isn't run:
^if __name__ == ['"]__main__['"]:$
[html]
directory = coverage-html
# vim:ft=dosini

9
vendor/github.com/asottile/dockerfile/.gitignore generated vendored Normal file
View File

@ -0,0 +1,9 @@
*.pyc
/*.egg-info
/.coverage
/.eggs
/.mypy_cache
/.pytest_cache
/.tox
/build
/dist

3
vendor/github.com/asottile/dockerfile/.gitmodules generated vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "vendor/github.com/moby/buildkit"]
path = vendor/github.com/moby/buildkit
url = https://github.com/moby/buildkit

View File

@ -0,0 +1,48 @@
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.5.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: debug-statements
- id: name-tests-test
- id: requirements-txt-fixer
- repo: https://gitlab.com/pycqa/flake8
rev: 3.7.9
hooks:
- id: flake8
- repo: https://github.com/pre-commit/mirrors-autopep8
rev: v1.5
hooks:
- id: autopep8
- repo: https://github.com/asottile/reorder_python_imports
rev: v1.9.0
hooks:
- id: reorder-python-imports
args: [--py3-plus]
- repo: https://github.com/asottile/pyupgrade
rev: v1.26.2
hooks:
- id: pyupgrade
args: [--py36-plus]
- repo: https://github.com/asottile/add-trailing-comma
rev: v1.5.0
hooks:
- id: add-trailing-comma
args: [--py36-plus]
- repo: https://github.com/asottile/setup-cfg-fmt
rev: v1.6.0
hooks:
- id: setup-cfg-fmt
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.761
hooks:
- id: mypy
- repo: local
hooks:
- id: gofmt
name: gofmt
language: system
entry: gofmt -l -w
files: \.go$

19
vendor/github.com/asottile/dockerfile/LICENSE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2017 Anthony Sottile
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

4
vendor/github.com/asottile/dockerfile/MANIFEST.in generated vendored Normal file
View File

@ -0,0 +1,4 @@
include *.go pylib/*.c pylib/*.go
recursive-include vendor/github.com/moby/buildkit/frontend/dockerfile/command *.go
recursive-include vendor/github.com/moby/buildkit/frontend/dockerfile/parser *.go
global-exclude *_test.go

79
vendor/github.com/asottile/dockerfile/README.md generated vendored Normal file
View File

@ -0,0 +1,79 @@
[![Build Status](https://asottile.visualstudio.com/asottile/_apis/build/status/asottile.dockerfile?branchName=master)](https://asottile.visualstudio.com/asottile/_build/latest?definitionId=14&branchName=master)
[![Build status](https://ci.appveyor.com/api/projects/status/l5kj12ysd49xul1l?svg=true)](https://ci.appveyor.com/project/asottile/dockerfile)
dockerfile
==========
The goal of this repository is to provide a wrapper around
[docker/docker](https://github.com/docker/docker)'s parser for dockerfiles.
## python library
### Installation
This project uses [setuptools-golang](https://github.com/asottile/setuptools-golang)
when built from source. To build from source you'll need a go compiler.
If you're using linux and sufficiently new pip (>=8.1) you should be able to
just download prebuilt manylinux1 wheels.
```
pip install dockerfile
```
### Usage
There's three api functions provided by this library:
#### `dockerfile.all_cmds()`
List all of the known dockerfile cmds.
```python
>>> dockerfile.all_cmds()
('add', 'arg', 'cmd', 'copy', 'entrypoint', 'env', 'expose', 'from', 'healthcheck', 'label', 'maintainer', 'onbuild', 'run', 'shell', 'stopsignal', 'user', 'volume', 'workdir')
```
#### `dockerfile.parse_file(filename)`
Parse a Dockerfile by filename.
Returns a `tuple` of `dockerfile.Command` objects representing each layer of
the Dockerfile.
Possible exceptions:
- `dockerfile.GoIOError`: The file could not be opened.
- `dockerfile.ParseError`: The Dockerfile was not parseable.
```python
>>> pprint.pprint(dockerfile.parse_file('testfiles/Dockerfile.ok'))
(Command(cmd='from', sub_cmd=None, json=False, original='FROM ubuntu:xenial', start_line=1, flags=(), value=('ubuntu:xenial',)),
Command(cmd='cmd', sub_cmd=None, json=True, original='CMD ["echo", "hi"]', start_line=2, flags=(), value=('echo', 'hi')))
```
#### `dockerfile.parse_string(s)`
Parse a dockerfile using a string.
Returns a `tuple` of `dockerfile.Command` objects representing each layer of
the Dockerfile.
Possible exceptions:
- `dockerfile.ParseError`: The Dockerfile was not parseable.
```python
>>> dockerfile.parse_string('FROM ubuntu:xenial')
(Command(cmd='from', sub_cmd=None, json=False, original='FROM ubuntu:xenial', start_line=1, flags=(), value=('ubuntu:xenial',)),)
```
## go library
Slightly more convenient than the api provided by docker/docker? Might not be
terribly useful -- the main point of this repository was a python wrapper.
### Installation
```
go get github.com/asottile/dockerfile
```
### Usage
[godoc](https://godoc.org/github.com/asottile/dockerfile)

16
vendor/github.com/asottile/dockerfile/appveyor.yml generated vendored Normal file
View File

@ -0,0 +1,16 @@
environment:
matrix:
- PYTHON: 'C:\Python37'
install:
- 'SET PATH=%PYTHON%;%PYTHON%\Scripts;C:\MinGW\bin;C:\go-x86\bin;%PATH%'
- 'SET GOROOT=C:\go-x86'
- git submodule update --init
- pip install pytest .
# Not a C# project
build: false
test_script: pytest tests
cache: '%LOCALAPPDATA%\pip\cache'

View File

@ -0,0 +1,42 @@
trigger:
branches:
include: [master, test-me-*]
tags:
include: ['*']
resources:
repositories:
- repository: self
checkoutOptions:
submodules: true
- repository: asottile
type: github
endpoint: github
name: asottile/azure-pipeline-templates
ref: refs/tags/v1.0.1
jobs:
- template: job--pre-commit.yml@asottile
- template: job--go-test.yml@asottile
parameters:
go_versions: ['1.12.17', '1.13.8']
os: linux
tests: '.' # only test the top level
- template: job--python-tox.yml@asottile
parameters:
toxenvs: [py36]
os: linux
name_postfix: _go_1_12
pre_test:
- task: GoTool@0
inputs:
version: '1.12.17'
- template: job--python-tox.yml@asottile
parameters:
toxenvs: [pypy3, py36, py37, py38]
os: linux
name_postfix: _go_1_13
pre_test:
- task: GoTool@0
inputs:
version: '1.13.8'

95
vendor/github.com/asottile/dockerfile/parse.go generated vendored Normal file
View File

@ -0,0 +1,95 @@
package dockerfile
import (
"io"
"os"
"sort"
"github.com/moby/buildkit/frontend/dockerfile/command"
"github.com/moby/buildkit/frontend/dockerfile/parser"
)
// Represents a single line (layer) in a Dockerfile.
// For example `FROM ubuntu:xenial`
type Command struct {
Cmd string // lowercased command name (ex: `from`)
SubCmd string // for ONBUILD only this holds the sub-command
Json bool // whether the value is written in json form
Original string // The original source line
StartLine int // The original source line number which starts this command
EndLine int // The original source line number which ends this command
Flags []string // Any flags such as `--from=...` for `COPY`.
Value []string // The contents of the command (ex: `ubuntu:xenial`)
}
// A failure in opening a file for reading.
type IOError struct {
Msg string
}
func (e IOError) Error() string {
return e.Msg
}
// A failure in parsing the file as a dockerfile.
type ParseError struct {
Msg string
}
func (e ParseError) Error() string {
return e.Msg
}
// List all legal cmds in a dockerfile
func AllCmds() []string {
var ret []string
for k := range command.Commands {
ret = append(ret, k)
}
sort.Strings(ret)
return ret
}
// Parse a Dockerfile from a reader. A ParseError may occur.
func ParseReader(file io.Reader) ([]Command, error) {
res, err := parser.Parse(file)
if err != nil {
return nil, ParseError{err.Error()}
}
var ret []Command
for _, child := range res.AST.Children {
cmd := Command{
Cmd: child.Value,
Original: child.Original,
StartLine: child.StartLine,
EndLine: child.EndLine,
Flags: child.Flags,
}
// Only happens for ONBUILD
if child.Next != nil && len(child.Next.Children) > 0 {
cmd.SubCmd = child.Next.Children[0].Value
child = child.Next.Children[0]
}
cmd.Json = child.Attributes["json"]
for n := child.Next; n != nil; n = n.Next {
cmd.Value = append(cmd.Value, n.Value)
}
ret = append(ret, cmd)
}
return ret, nil
}
// Parse a Dockerfile from a filename. An IOError or ParseError may occur.
func ParseFile(filename string) ([]Command, error) {
file, err := os.Open(filename)
if err != nil {
return nil, IOError{err.Error()}
}
defer file.Close()
return ParseReader(file)
}

View File

@ -0,0 +1,4 @@
coverage
pre-commit
pytest
setuptools-golang>=1.0.0

38
vendor/github.com/asottile/dockerfile/setup.cfg generated vendored Normal file
View File

@ -0,0 +1,38 @@
[metadata]
name = dockerfile
version = 3.1.0
description = Parse a dockerfile into a high-level representation using the official go parser.
long_description = file: README.md
long_description_content_type = text/markdown
url = https://github.com/asottile/dockerfile
author = Anthony Sottile
author_email = asottile@umich.edu
license = MIT
license_file = LICENSE
classifiers =
License :: OSI Approved :: MIT License
Programming Language :: Python :: 3
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
[options]
python_requires = >=3.6.1
setup_requires =
setuptools-golang>=1.7.0
[mypy]
check_untyped_defs = true
disallow_any_generics = true
disallow_incomplete_defs = true
disallow_untyped_defs = true
no_implicit_optional = true
[mypy-testing.*]
disallow_untyped_defs = false
[mypy-tests.*]
disallow_untyped_defs = false

Some files were not shown because too many files have changed in this diff Show More