Compare commits

...

19 Commits

Author SHA1 Message Date
Ettore Di Giacinto
01638567a7 Tag 0.21.0 2021-12-16 00:22:17 +01:00
Ettore Di Giacinto
fbe9b038dd 🔧 Consider removals when appending packages to be uninstalled 2021-12-15 21:11:21 +01:00
Ettore Di Giacinto
0a90129e34 🔧 Restore tree imglist hash output
Fixes #271
2021-12-15 18:38:47 +01:00
Ettore Di Giacinto
b05b00c615 🔧 🎨 Enhance package upgrade strategy order
Enhance package upgrade ordering during swap taking into accounts of files
shipped by packages.

This change also introduce a new method for clients to get the
underlying cache data, thus consuming it in installer to fix progressbar display
2021-12-15 18:04:45 +01:00
Ettore Di Giacinto
938d41fe9e 🔧 Allow to perform automatically oscheck after upgrades 2021-12-12 12:23:30 +01:00
Ettore Di Giacinto
163bd77d27 🔧 Emit post/pre upgrade events 2021-12-12 10:45:28 +01:00
Ettore Di Giacinto
309f5c0559 📒 update vendor/ 2021-12-07 18:26:35 +01:00
Ettore Di Giacinto
1f6d0cc66c 🆕 Update go-pluggable 2021-12-07 18:23:49 +01:00
Ettore Di Giacinto
07e37ea059 🔧 Add luet reinstall --installed
Fixes #273
2021-12-07 18:22:05 +01:00
Ettore Di Giacinto
432b1db116 🆕 Tag 0.20.13 2021-12-06 21:47:12 +01:00
Ettore Di Giacinto
8e16d3abd3 🔧 Use ImageID for generating dockerfile names
It is safer, and plays better with buildx
2021-12-06 21:46:15 +01:00
Ettore Di Giacinto
1f29fdd680 🔧 Add oscheck
Fixes #50
2021-12-05 23:22:56 +01:00
Ettore Di Giacinto
da85a7306f 🔧 Consistently use Tempdir in compiler 2021-12-04 21:48:43 +01:00
Ettore Di Giacinto
78307eef57 🔧 Add contextual logging accessors 2021-12-04 21:40:32 +01:00
Ettore Di Giacinto
e11521ddce 📒 Update CONTRIBUTING 2021-12-04 21:35:59 +01:00
Ettore Di Giacinto
1e6aca0ba1 🔧 CLI: add quiet mode 2021-12-04 21:35:34 +01:00
Ettore Di Giacinto
79e98af604 Handle error if we can't generate a compilation spec from a package 2021-11-27 21:12:14 +01:00
Ettore Di Giacinto
71d5b03382 Tag 0.20.12 2021-11-25 15:04:16 +01:00
Ettore Di Giacinto
a02ab16510 Don't load requires while parsing compilespec that consume final images
When depending on those package otherwise we try to compile the full
tree instead of reconstrucing the image which is result of a join while
keeping the revdep tree invariate
2021-11-25 14:18:15 +01:00
85 changed files with 1161 additions and 3619 deletions

View File

@@ -17,7 +17,7 @@ Join us in [slack](https://luet.slack.com/join/shared_invite/enQtOTQxMjcyNDQ0MDU
## All Code Changes Happen Through Pull Requests
Pull requests are the best way to propose changes to the codebase. We actively welcome your pull requests:
1. Fork the repo you want to contribute to and create your branch from `develop`.
1. Fork the repo you want to contribute to and create your branch from `master`.
2. If you've added code that should be tested, add tests.
3. If you've changed APIs, update the [documentation](https://github.com/Luet-lab/docs).
4. Ensure the test suite passes.

140
cmd/oscheck.go Normal file
View File

@@ -0,0 +1,140 @@
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
//
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with this program; if not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"fmt"
"os"
"strings"
"github.com/mudler/luet/pkg/api/core/types"
installer "github.com/mudler/luet/pkg/installer"
pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver"
"github.com/mudler/luet/cmd/util"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var osCheckCmd = &cobra.Command{
Use: "oscheck",
Short: "Checks packages integrity",
Long: `List packages that are installed in the system which files are missing in the system.
$ luet oscheck
To reinstall packages in the list:
$ luet oscheck --reinstall
`,
Aliases: []string{"i"},
PreRun: func(cmd *cobra.Command, args []string) {
util.BindSystemFlags(cmd)
util.BindSolverFlags(cmd)
viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
viper.BindPFlag("force", cmd.Flags().Lookup("force"))
viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
},
Run: func(cmd *cobra.Command, args []string) {
force := viper.GetBool("force")
onlydeps := viper.GetBool("onlydeps")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
util.SetSystemConfig(util.DefaultContext)
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
packs := system.OSCheck()
if !util.DefaultContext.Config.General.Quiet {
if len(packs) == 0 {
util.DefaultContext.Success("All good!")
os.Exit(0)
} else {
util.DefaultContext.Info("Following packages are missing files or are incomplete:")
for _, p := range packs {
util.DefaultContext.Info(p.HumanReadableString())
}
}
} else {
var s []string
for _, p := range packs {
s = append(s, p.HumanReadableString())
}
fmt.Println(strings.Join(s, " "))
}
reinstall, _ := cmd.Flags().GetBool("reinstall")
if reinstall {
// Strip version for reinstall
toInstall := pkg.Packages{}
for _, p := range packs {
new := p.Clone()
new.SetVersion(">=0")
toInstall = append(toInstall, new)
}
util.SetSolverConfig(util.DefaultContext)
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
util.DefaultContext.Debug("Solver", util.DefaultContext.Config.GetSolverOptions().CompactString())
// Load config protect configs
util.DefaultContext.Config.LoadConfigProtect(util.DefaultContext)
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
Concurrency: util.DefaultContext.Config.GetGeneral().Concurrency,
SolverOptions: *util.DefaultContext.Config.GetSolverOptions(),
NoDeps: true,
Force: force,
OnlyDeps: onlydeps,
PreserveSystemEssentialData: true,
Ask: !yes,
DownloadOnly: downloadOnly,
Context: util.DefaultContext,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
})
err := inst.Swap(packs, toInstall, system)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
}
},
}
func init() {
osCheckCmd.Flags().String("system-dbpath", "", "System db path")
osCheckCmd.Flags().String("system-target", "", "System rootpath")
osCheckCmd.Flags().String("system-engine", "", "System DB engine")
osCheckCmd.Flags().Bool("reinstall", false, "reinstall")
osCheckCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+types.AvailableResolvers+" )")
osCheckCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
osCheckCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
osCheckCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
osCheckCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
osCheckCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
osCheckCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
osCheckCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
osCheckCmd.Flags().Bool("download-only", false, "Download only")
RootCmd.AddCommand(osCheckCmd)
}

View File

@@ -52,18 +52,10 @@ var reinstallCmd = &cobra.Command{
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
installed, _ := cmd.Flags().GetBool("installed")
util.SetSystemConfig(util.DefaultContext)
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toUninstall = append(toUninstall, pack)
toAdd = append(toAdd, pack)
}
util.SetSolverConfig(util.DefaultContext)
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
@@ -87,6 +79,25 @@ var reinstallCmd = &cobra.Command{
})
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
if installed {
for _, p := range system.Database.World() {
toUninstall = append(toUninstall, p)
c := p.Clone()
c.SetVersion(">=0")
toAdd = append(toAdd, c)
}
} else {
for _, a := range args {
pack, err := helpers.ParsePackageStr(a)
if err != nil {
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
}
toUninstall = append(toUninstall, pack)
toAdd = append(toAdd, pack)
}
}
err := inst.Swap(toUninstall, toAdd, system)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
@@ -107,6 +118,7 @@ func init() {
reinstallCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
reinstallCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
reinstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
reinstallCmd.Flags().Bool("installed", false, "Reinstall installed packages")
reinstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
reinstallCmd.Flags().Bool("download-only", false, "Download only")

View File

@@ -30,7 +30,7 @@ var cfgFile string
var Verbose bool
const (
LuetCLIVersion = "0.20.11"
LuetCLIVersion = "0.21.0"
LuetEnvPrefix = "LUET"
)

View File

@@ -371,6 +371,10 @@ Search can also return results in the terminal in different ways: as terminal ou
default:
if tableMode {
t.Render()
} else if util.DefaultContext.Config.General.Quiet {
for _, tt := range results.Packages {
fmt.Printf("%s/%s-%s\n", tt.Category, tt.Name, tt.Version)
}
} else {
l.Render()
}

View File

@@ -102,7 +102,12 @@ func NewTreeImageCommand() *cobra.Command {
}
ht := compiler.NewHashTree(reciper.GetDatabase())
hashtree, err := ht.Query(luetCompiler, spec)
copy, err := compiler.CompilerFinalImages(luetCompiler)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}
hashtree, err := ht.Query(copy, spec)
if err != nil {
util.DefaultContext.Fatal("Error: " + err.Error())
}

View File

@@ -43,6 +43,8 @@ var upgradeCmd = &cobra.Command{
universe, _ := cmd.Flags().GetBool("universe")
clean, _ := cmd.Flags().GetBool("clean")
sync, _ := cmd.Flags().GetBool("sync")
osCheck, _ := cmd.Flags().GetBool("oscheck")
yes := viper.GetBool("yes")
downloadOnly, _ := cmd.Flags().GetBool("download-only")
@@ -67,6 +69,7 @@ var upgradeCmd = &cobra.Command{
UpgradeNewRevisions: sync,
PreserveSystemEssentialData: true,
Ask: !yes,
AutoOSCheck: osCheck,
DownloadOnly: downloadOnly,
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
Context: util.DefaultContext,
@@ -97,6 +100,7 @@ func init() {
upgradeCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
upgradeCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
upgradeCmd.Flags().Bool("download-only", false, "Download only")
upgradeCmd.Flags().Bool("oscheck", false, "Perform automatically oschecks after upgrades")
RootCmd.AddCommand(upgradeCmd)
}

View File

@@ -156,11 +156,15 @@ func DisplayVersionBanner(c *types.Context, banner func(), version func() string
}
}
if display {
banner()
pterm.DefaultCenter.Print(version())
for _, l := range license {
pterm.DefaultCenter.Print(l)
if c.Config.General.Quiet {
pterm.Info.Printf("Luet %s\n", version())
pterm.Info.Println(strings.Join(license, "\n"))
} else {
banner()
pterm.DefaultCenter.Print(version())
for _, l := range license {
pterm.DefaultCenter.Print(l)
}
}
}
}

View File

@@ -120,6 +120,7 @@ func setDefaults(viper *viper.Viper) {
viper.SetDefault("general.concurrency", runtime.NumCPU())
viper.SetDefault("general.debug", false)
viper.SetDefault("general.quiet", false)
viper.SetDefault("general.show_build_output", false)
viper.SetDefault("general.fatal_warnings", false)
viper.SetDefault("general.http_timeout", 360)
@@ -159,7 +160,8 @@ func InitViper(ctx *types.Context, RootCmd *cobra.Command) {
cobra.OnInitialize(initConfig)
pflags := RootCmd.PersistentFlags()
pflags.StringVar(&cfgFile, "config", "", "config file (default is $HOME/.luet.yaml)")
pflags.BoolP("debug", "d", false, "verbose output")
pflags.BoolP("debug", "d", false, "debug output")
pflags.BoolP("quiet", "q", false, "quiet output")
pflags.Bool("fatal", false, "Enables Warnings to exit")
pflags.Bool("enable-logfile", false, "Enable log to file")
pflags.Bool("no-spinner", false, "Disable spinner.")
@@ -188,6 +190,7 @@ func InitViper(ctx *types.Context, RootCmd *cobra.Command) {
viper.BindPFlag("general.concurrency", pflags.Lookup("concurrency"))
viper.BindPFlag("general.debug", pflags.Lookup("debug"))
viper.BindPFlag("general.quiet", pflags.Lookup("quiet"))
viper.BindPFlag("general.fatal_warnings", pflags.Lookup("fatal"))
viper.BindPFlag("general.same_owner", pflags.Lookup("same-owner"))
viper.BindPFlag("plugin", pflags.Lookup("plugin"))

2
go.mod
View File

@@ -42,7 +42,7 @@ require (
github.com/moby/moby v20.10.9+incompatible
github.com/moby/sys/mount v0.2.0 // indirect
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d
github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
github.com/onsi/ginkgo v1.16.4
github.com/onsi/gomega v1.16.0

4
go.sum
View File

@@ -810,8 +810,8 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d h1:fKh+rvwZQCA+TPzK0EMwwbqhjvRHaQ6H8AsVU1Wt+NQ=
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d/go.mod h1:puRUWSwyecW2V355tKncwPVPRAjQBduPsFjG0mrV/Nw=
github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d h1:NKvvf/q1dWDde+yg5cMiU5EuYZ2jNuKs/9hb8xod8A0=
github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d/go.mod h1:WmKcT8ONmhDQIqQ+HxU+tkGWjzBEyY/KFO8LTGCu4AI=
github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e h1:CZI+kJW2+WjZXLWWnVzi6NDQ6SfwSfeNqq5d1iDiwyY=
github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e/go.mod h1:WmKcT8ONmhDQIqQ+HxU+tkGWjzBEyY/KFO8LTGCu4AI=
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290 h1:426hFyXMpXeqIeGJn2cGAW9ogvM2Jf+Jv23gtVPvBLM=
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290/go.mod h1:uP5BBgFxq2wNWo7n1vnY5SSbgL0WDshVJrOO12tZ/lA=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=

View File

@@ -2,6 +2,7 @@ package bus
import (
"fmt"
"github.com/mudler/go-pluggable"
"github.com/mudler/luet/pkg/api/core/types"
)
@@ -13,6 +14,10 @@ var (
EventPackageInstall pluggable.EventType = "package.install"
// EventPackageUnInstall is the event fired when a new package is being uninstalled
EventPackageUnInstall pluggable.EventType = "package.uninstall"
// EventPreUpgrade is the event fired before an upgrade is attempted
EventPreUpgrade pluggable.EventType = "package.pre.upgrade"
// EventPostUpgrade is the event fired after an upgrade is done
EventPostUpgrade pluggable.EventType = "package.post.upgrade"
// Package build
@@ -62,6 +67,8 @@ var Manager *Bus = &Bus{
EventPackageInstall,
EventPackageUnInstall,
EventPackagePreBuild,
EventPreUpgrade,
EventPostUpgrade,
EventPackagePreBuildArtifact,
EventPackagePostBuildArtifact,
EventPackagePostBuild,

View File

@@ -61,6 +61,7 @@ type LuetGeneralConfig struct {
ShowBuildOutput bool `yaml:"show_build_output,omitempty" mapstructure:"show_build_output"`
FatalWarns bool `yaml:"fatal_warnings,omitempty" mapstructure:"fatal_warnings"`
HTTPTimeout int `yaml:"http_timeout,omitempty" mapstructure:"http_timeout"`
Quiet bool `yaml:"quiet" mapstructure:"quiet"`
}
type LuetSolverOptions struct {
@@ -137,7 +138,7 @@ func (sc *LuetSystemConfig) GetSystemRepoDatabaseDirPath() string {
return dbpath
}
func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (p string) {
var cachepath string
if sc.PkgsCachePath != "" {
cachepath = sc.PkgsCachePath
@@ -147,11 +148,13 @@ func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
}
if filepath.IsAbs(cachepath) {
ans = cachepath
p = cachepath
} else {
ans = filepath.Join(sc.GetSystemRepoDatabaseDirPath(), cachepath)
p = filepath.Join(sc.GetSystemRepoDatabaseDirPath(), cachepath)
}
sc.PkgsCachePath = cachepath // Be consistent with the path we set
return
}

View File

@@ -48,6 +48,7 @@ type Context struct {
Config *LuetConfig
IsTerminal bool
NoSpinner bool
name string
s *pterm.SpinnerPrinter
spinnerLock *sync.Mutex
@@ -72,6 +73,12 @@ func NewContext() *Context {
}
}
func (c *Context) WithName(name string) *Context {
newc := c.Copy()
newc.name = name
return newc
}
func (c *Context) Copy() *Context {
configCopy := *c.Config
@@ -112,6 +119,11 @@ func (c *Context) Init() (err error) {
c.NoColor()
}
if c.Config.General.Quiet {
c.NoColor()
pterm.DisableStyling()
}
c.Debug("Colors", c.Config.GetLogging().Color)
c.Debug("Logging level", c.Config.GetLogging().Level)
c.Debug("Debug mode", c.Config.GetGeneral().Debug)
@@ -306,6 +318,10 @@ func (c *Context) Msg(level LogLevel, ln bool, msg ...interface{}) {
levelMsg = re.ReplaceAllString(levelMsg, "")
}
if c.name != "" {
levelMsg = fmt.Sprintf("[%s] %s", c.name, levelMsg)
}
if c.z != nil {
c.log2File(level, message)
}

View File

@@ -239,19 +239,6 @@ func (cs *LuetCompiler) unpackFs(concurrency int, keepPermissions bool, p *compi
return nil, err
}
// artifact.ImageToArtifact(
// cs.Options.Context,
// img,
// cs.Options.CompressionType,
// p.Rel(p.GetPackage().GetFingerPrint()+".package.tar"),
// image.ExtractFiles(
// cs.Options.Context,
// strings.TrimLeft(p.GetPackageDir(), "/"),
// p.GetIncludes(),
// p.GetExcludes(),
// ),
// )
// TODO: Trim includes/excludes from "/" ?
_, rootfs, err := image.Extract(
cs.Options.Context,
img,
@@ -286,11 +273,11 @@ func (cs *LuetCompiler) unpackFs(concurrency int, keepPermissions bool, p *compi
func (cs *LuetCompiler) unpackDelta(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec, builderOpts, runnerOpts backend.Options) (*artifact.PackageArtifact, error) {
rootfs, err := ioutil.TempDir(p.GetOutputPath(), "rootfs")
rootfs, err := cs.Options.Context.Config.System.TempDir("rootfs")
if err != nil {
return nil, errors.Wrap(err, "Could not create tempdir")
}
defer os.RemoveAll(rootfs) // clean up
defer os.RemoveAll(rootfs)
pkgTag := ":package: " + p.GetPackage().HumanReadableString()
if cs.Options.PullFirst {
@@ -384,7 +371,7 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
}
// First we create the builder image
if err := p.WriteBuildImageDefinition(filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+"-builder.dockerfile")); err != nil {
if err := p.WriteBuildImageDefinition(filepath.Join(buildDir, p.GetPackage().ImageID()+"-builder.dockerfile")); err != nil {
return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
}
@@ -399,21 +386,21 @@ func (cs *LuetCompiler) buildPackageImage(image, buildertaggedImage, packageImag
// steps in prelude are == 0 those are equivalent.
// Then we write the step image, which uses the builder one
if err := p.WriteStepImageDefinition(buildertaggedImage, filepath.Join(buildDir, p.GetPackage().GetFingerPrint()+".dockerfile")); err != nil {
if err := p.WriteStepImageDefinition(buildertaggedImage, filepath.Join(buildDir, p.GetPackage().ImageID()+".dockerfile")); err != nil {
return builderOpts, runnerOpts, errors.Wrap(err, "Could not generate image definition")
}
builderOpts = backend.Options{
ImageName: buildertaggedImage,
SourcePath: buildDir,
DockerFileName: p.GetPackage().GetFingerPrint() + "-builder.dockerfile",
DockerFileName: p.GetPackage().ImageID() + "-builder.dockerfile",
Destination: p.Rel(p.GetPackage().GetFingerPrint() + "-builder.image.tar"),
BackendArgs: cs.Options.BackendArgs,
}
runnerOpts = backend.Options{
ImageName: packageImage,
SourcePath: buildDir,
DockerFileName: p.GetPackage().GetFingerPrint() + ".dockerfile",
DockerFileName: p.GetPackage().ImageID() + ".dockerfile",
Destination: p.Rel(p.GetPackage().GetFingerPrint() + ".image.tar"),
BackendArgs: cs.Options.BackendArgs,
}
@@ -471,11 +458,11 @@ func (cs *LuetCompiler) genArtifact(p *compilerspec.LuetCompilationSpec, builder
if p.EmptyPackage() {
fakePackage := p.Rel(p.GetPackage().GetFingerPrint() + ".package.tar")
rootfs, err = ioutil.TempDir(p.GetOutputPath(), "rootfs")
rootfs, err = cs.Options.Context.Config.System.TempDir("rootfs")
if err != nil {
return nil, errors.Wrap(err, "Could not create tempdir")
}
defer os.RemoveAll(rootfs) // clean up
defer os.RemoveAll(rootfs)
a := artifact.NewPackageArtifact(fakePackage)
a.CompressionType = cs.Options.CompressionType
@@ -952,11 +939,11 @@ func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool
}
// otherwise, generate it and push it aside
joinDir, err := ioutil.TempDir(p.GetOutputPath(), "join")
joinDir, err := cs.Options.Context.Config.System.TempDir("join")
if err != nil {
return errors.Wrap(err, "could not create tempdir for joining images")
}
defer os.RemoveAll(joinDir) // clean up
defer os.RemoveAll(joinDir)
for _, p := range fromPackages {
cs.Options.Context.Info(joinTag, ":arrow_right_hook:", p.HumanReadableString(), ":leaves:")
@@ -991,11 +978,11 @@ func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool
}
}
artifactDir, err := ioutil.TempDir(p.GetOutputPath(), "artifact")
artifactDir, err := cs.Options.Context.Config.System.TempDir("join")
if err != nil {
return errors.Wrap(err, "could not create tempdir for final artifact")
}
defer os.RemoveAll(joinDir) // clean up
defer os.RemoveAll(artifactDir)
cs.Options.Context.Info(joinTag, ":droplet: generating artifact for source image of", p.GetPackage().HumanReadableString())
@@ -1066,6 +1053,36 @@ func (cs *LuetCompiler) resolveMultiStageImages(concurrency int, keepPermissions
return nil
}
func CompilerFinalImages(cs *LuetCompiler) (*LuetCompiler, error) {
// When computing the hash tree, we need to take into consideration
// that packages that require final images have to be seen as packages without deps
// This is because we don't really want to calculate the deptree of them as
// as it is handled already when we are creating the images in resolveFinalImages().
c := *cs
copy := &c
memDB := pkg.NewInMemoryDatabase(false)
// Create a copy to avoid races
dbCopy := pkg.NewInMemoryDatabase(false)
err := cs.Database.Clone(dbCopy)
if err != nil {
return nil, errors.Wrap(err, "failed cloning db")
}
for _, p := range dbCopy.World() {
copy := p.Clone()
spec, err := cs.FromPackage(p)
if err != nil {
return nil, errors.Wrap(err, "failed getting compile spec for package "+p.HumanReadableString())
}
if spec.RequiresFinalImages {
copy.Requires([]*pkg.DefaultPackage{})
}
memDB.CreatePackage(copy)
}
copy.Database = memDB
return copy, nil
}
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateFinalArtifact *bool, generateDependenciesFinalArtifact *bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
cs.Options.Context.Info(":package: Compiling", p.GetPackage().HumanReadableString(), ".... :coffee:")
@@ -1089,8 +1106,11 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
}
ht := NewHashTree(cs.Database)
packageHashTree, err := ht.Query(cs, p)
copy, err := CompilerFinalImages(cs)
if err != nil {
return nil, err
}
packageHashTree, err := ht.Query(copy, p)
if err != nil {
return nil, errors.Wrap(err, "failed querying hashtree")
}
@@ -1148,6 +1168,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
buildTarget := !cs.Options.OnlyDeps
if buildDeps {
cs.Options.Context.Info(":deciduous_tree: Build dependencies for " + p.GetPackage().HumanReadableString())
for _, assertion := range dependencies { //highly dependent on the order
depsN++
@@ -1163,6 +1184,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
return nil, errors.Wrap(err, "Error while generating compilespec for "+assertion.Package.GetName())
}
compileSpec.BuildOptions.PullImageRepository = append(compileSpec.BuildOptions.PullImageRepository, p.BuildOptions.PullImageRepository...)
cs.Options.Context.Debug("PullImage repos:", compileSpec.BuildOptions.PullImageRepository)
compileSpec.SetOutputPath(p.GetOutputPath())
@@ -1319,11 +1341,12 @@ func (cs *LuetCompiler) templatePackage(vals []map[string]interface{}, pack pkg.
} else {
bv := cs.Options.BuildValuesFile
if len(vals) > 0 {
valuesdir, err := ioutil.TempDir("", "genvalues")
valuesdir, err := cs.Options.Context.Config.System.TempDir("genvalues")
if err != nil {
return nil, errors.Wrap(err, "Could not create tempdir")
}
defer os.RemoveAll(valuesdir) // clean up
defer os.RemoveAll(valuesdir)
for _, b := range vals {
out, err := yaml.Marshal(b)
if err != nil {

View File

@@ -64,11 +64,13 @@ func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.
c.context.Spinner()
defer c.context.SpinnerStop()
resultingArtifact := a.ShallowCopy()
artifactName := path.Base(a.Path)
downloaded := false
resultingArtifact, err := c.CacheGet(a)
if err == nil {
return resultingArtifact, nil
}
// TODO:
// Files are in URI/packagename:version (GetPackageImageName() method)
// use downloadAndExtract .. and egenrate an archive to consume. Checksum should be already checked while downloading the image
@@ -79,71 +81,57 @@ func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.
// We discard checksum, that are checked while during pull and unpack by containerd
resultingArtifact.Checksums = artifact.Checksums{}
// Check if file is already in cache
fileName, err := c.Cache.Get(resultingArtifact)
// Check if file is already in cache
if err == nil {
resultingArtifact = a
resultingArtifact.Path = fileName
resultingArtifact.Checksums = artifact.Checksums{}
c.context.Debug("Use artifact", artifactName, "from cache.")
} else {
temp, err := c.context.Config.GetSystem().TempDir("image")
if err != nil {
return nil, err
}
defer os.RemoveAll(temp)
temp, err := c.context.Config.GetSystem().TempDir("image")
tempArtifact, err := c.context.Config.GetSystem().TempFile("artifact")
if err != nil {
return nil, err
}
defer os.RemoveAll(tempArtifact.Name())
for _, uri := range c.RepoData.Urls {
imageName := fmt.Sprintf("%s:%s", uri, a.CompileSpec.GetPackage().ImageID())
c.context.Info("Downloading image", imageName)
// imageName := fmt.Sprintf("%s/%s", uri, artifact.GetCompileSpec().GetPackage().GetPackageImageName())
info, err := docker.DownloadAndExtractDockerImage(c.context, imageName, temp, c.auth, c.RepoData.Verify)
if err != nil {
return nil, err
c.context.Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
continue
}
defer os.RemoveAll(temp)
tempArtifact, err := c.context.Config.GetSystem().TempFile("artifact")
c.context.Info(
fmt.Sprintf("Image: %s. Pulled: %s. Size: %s",
imageName,
info.Target.Digest,
units.BytesSize(float64(info.Target.Size)),
),
)
c.context.Debug("\nCompressing result ", filepath.Join(temp), "to", tempArtifact.Name())
resultingArtifact.Path = tempArtifact.Name() // First set to cache file
err = resultingArtifact.Compress(temp, 1)
if err != nil {
return nil, err
}
defer os.RemoveAll(tempArtifact.Name())
for _, uri := range c.RepoData.Urls {
imageName := fmt.Sprintf("%s:%s", uri, a.CompileSpec.GetPackage().ImageID())
c.context.Info("Downloading image", imageName)
// imageName := fmt.Sprintf("%s/%s", uri, artifact.GetCompileSpec().GetPackage().GetPackageImageName())
info, err := docker.DownloadAndExtractDockerImage(c.context, imageName, temp, c.auth, c.RepoData.Verify)
if err != nil {
c.context.Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
continue
}
c.context.Info(fmt.Sprintf("Pulled: %s", info.Target.Digest))
c.context.Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.Target.Size))))
c.context.Debug("\nCompressing result ", filepath.Join(temp), "to", tempArtifact.Name())
resultingArtifact.Path = tempArtifact.Name() // First set to cache file
err = resultingArtifact.Compress(temp, 1)
if err != nil {
c.context.Error(fmt.Sprintf("Failed compressing package %s: %s", imageName, err.Error()))
continue
}
_, _, err = c.Cache.Put(resultingArtifact)
if err != nil {
c.context.Error(fmt.Sprintf("Failed storing package %s from cache: %s", imageName, err.Error()))
continue
}
fileName, err := c.Cache.Get(resultingArtifact)
if err != nil {
c.context.Error(fmt.Sprintf("Failed getting package %s from cache: %s", imageName, err.Error()))
continue
}
resultingArtifact.Path = fileName // Cache is persistent. tempArtifact is not
downloaded = true
break
c.context.Error(fmt.Sprintf("Failed compressing package %s: %s", imageName, err.Error()))
continue
}
if !downloaded {
return nil, errors.Wrap(err, "no image available from repositories")
_, _, err = c.Cache.Put(resultingArtifact)
if err != nil {
c.context.Error(fmt.Sprintf("Failed storing package %s from cache: %s", imageName, err.Error()))
continue
}
downloaded = true
return c.CacheGet(resultingArtifact)
}
if !downloaded {
return nil, errors.Wrap(err, "no image available from repositories")
}
return resultingArtifact, nil
@@ -194,3 +182,29 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
return file.Name(), err
}
func (c *DockerClient) CacheGet(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
resultingArtifact := a.ShallowCopy()
// TODO:
// Files are in URI/packagename:version (GetPackageImageName() method)
// use downloadAndExtract .. and egenrate an archive to consume. Checksum should be already checked while downloading the image
// with the above functions, because Docker images already contain such metadata
// - Check how verification is done when calling DownloadArtifact outside, similarly we need to check DownloadFile, and how verification
// is done in such cases (see repository.go)
// We discard checksum, that are checked while during pull and unpack by containerd
resultingArtifact.Checksums = artifact.Checksums{}
// Check if file is already in cache
fileName, err := c.Cache.Get(resultingArtifact)
// Check if file is already in cache
if err == nil {
artifactName := path.Base(a.Path)
c.context.Debug("Use artifact", artifactName, "from cache.")
resultingArtifact = a
resultingArtifact.Path = fileName
resultingArtifact.Checksums = artifact.Checksums{}
}
return resultingArtifact, err
}

View File

@@ -166,32 +166,33 @@ func (c *HttpClient) DownloadFile(p string) (string, error) {
return file.Name(), nil
}
func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
func (c *HttpClient) CacheGet(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
newart := a.ShallowCopy()
artifactName := path.Base(a.Path)
fileName, err := c.Cache.Get(a)
newart.Path = fileName
return newart, err
}
func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
artifactName := path.Base(a.Path)
newart, err := c.CacheGet(a)
// Check if file is already in cache
if err == nil {
newart.Path = fileName
c.context.Debug("Use artifact", artifactName, "from cache.")
} else {
d, err := c.DownloadFile(artifactName)
if err != nil {
return nil, errors.Wrapf(err, "failed downloading %s", artifactName)
}
defer os.RemoveAll(d)
newart.Path = d
c.Cache.Put(newart)
fileName, err := c.Cache.Get(newart)
if err != nil {
return nil, errors.Wrapf(err, "failed getting file from cache %v", newart)
}
newart.Path = fileName
return newart, nil
}
return newart, nil
d, err := c.DownloadFile(artifactName)
if err != nil {
return nil, errors.Wrapf(err, "failed downloading %s", artifactName)
}
defer os.RemoveAll(d)
newart.Path = d
c.Cache.Put(newart)
return c.CacheGet(newart)
}

View File

@@ -44,32 +44,32 @@ func (c *LocalClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.P
var err error
artifactName := path.Base(a.Path)
newart := a.ShallowCopy()
fileName, err := c.Cache.Get(a)
newart, err := c.CacheGet(a)
// Check if file is already in cache
if err == nil {
newart.Path = fileName
c.context.Debug("Use artifact", artifactName, "from cache.")
} else {
d, err := c.DownloadFile(artifactName)
if err != nil {
return nil, errors.Wrapf(err, "failed downloading %s", artifactName)
}
defer os.RemoveAll(d)
newart.Path = d
c.Cache.Put(newart)
fileName, err := c.Cache.Get(newart)
if err != nil {
return nil, errors.Wrapf(err, "failed getting file from cache %v", newart)
}
newart.Path = fileName
return newart, nil
}
return newart, nil
d, err := c.DownloadFile(artifactName)
if err != nil {
return nil, errors.Wrapf(err, "failed downloading %s", artifactName)
}
defer os.RemoveAll(d)
newart.Path = d
c.Cache.Put(newart)
return c.CacheGet(newart)
}
func (c *LocalClient) CacheGet(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
newart := a.ShallowCopy()
fileName, err := c.Cache.Get(a)
newart.Path = fileName
return newart, err
}
func (c *LocalClient) DownloadFile(name string) (string, error) {

View File

@@ -53,6 +53,7 @@ type LuetInstallerOptions struct {
DownloadOnly bool
Relaxed bool
PackageRepositories types.LuetRepositories
AutoOSCheck bool
Context *types.Context
}
@@ -267,13 +268,16 @@ func (l *LuetInstaller) swap(o Option, syncedRepos Repositories, toRemove pkg.Pa
return nil
}
ops := l.getOpsWithOptions(toRemove, match, Option{
ops, err := l.getOpsWithOptions(toRemove, match, Option{
Force: o.Force,
NoDeps: false,
OnlyDeps: o.OnlyDeps,
RunFinalizers: false,
CheckFileConflicts: false,
}, o, syncedRepos, packages, assertions, allRepos)
}, o, syncedRepos, packages, assertions, allRepos, s)
if err != nil {
return errors.Wrap(err, "failed computing installer options")
}
err = l.runOps(ops, s)
if err != nil {
@@ -317,8 +321,8 @@ type installOperation struct {
// installerOp is the operation that is sent to the
// upgradeWorker's channel (todo)
type installerOp struct {
Uninstall operation
Install installOperation
Uninstall []operation
Install []installOperation
}
func (l *LuetInstaller) runOps(ops []installerOp, s *System) error {
@@ -349,13 +353,12 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
for p := range c {
if p.Uninstall.Package != nil {
for _, pp := range p.Uninstall {
l.Options.Context.Debug("Replacing package inplace")
toUninstall, uninstall, err := l.generateUninstallFn(p.Uninstall.Option, s, p.Uninstall.Package)
toUninstall, uninstall, err := l.generateUninstallFn(pp.Option, s, pp.Package)
if err != nil {
l.Options.Context.Error("Failed to generate Uninstall function for" + err.Error())
l.Options.Context.Debug("Skipping uninstall, fail to generate uninstall function, error: " + err.Error())
continue
//return errors.Wrap(err, "while computing uninstall")
}
systemLock.Lock()
err = uninstall()
@@ -364,22 +367,21 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
if err != nil {
l.Options.Context.Error("Failed uninstall for ", packsToList(toUninstall))
continue
//return errors.Wrap(err, "uninstalling "+packsToList(toUninstall))
}
}
if p.Install.Package != nil {
artMatch := p.Install.Matches[p.Install.Package.GetFingerPrint()]
ass := p.Install.Assertions.Search(p.Install.Package.GetFingerPrint())
packageToInstall, _ := p.Install.Packages.Find(p.Install.Package.GetPackageName())
for _, pp := range p.Install {
artMatch := pp.Matches[pp.Package.GetFingerPrint()]
ass := pp.Assertions.Search(pp.Package.GetFingerPrint())
packageToInstall, _ := pp.Packages.Find(pp.Package.GetPackageName())
systemLock.Lock()
err := l.install(
p.Install.Option,
p.Install.Reposiories,
map[string]ArtifactMatch{p.Install.Package.GetFingerPrint(): artMatch},
pp.Option,
pp.Reposiories,
map[string]ArtifactMatch{pp.Package.GetFingerPrint(): artMatch},
pkg.Packages{packageToInstall},
solver.PackagesAssertions{*ass},
p.Install.Database,
pp.Database,
s,
)
systemLock.Unlock()
@@ -395,35 +397,76 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
// checks wheter we can uninstall and install in place and compose installer worker ops
func (l *LuetInstaller) getOpsWithOptions(
toUninstall pkg.Packages, installMatch map[string]ArtifactMatch, installOpt, uninstallOpt Option,
syncedRepos Repositories, toInstall pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase) []installerOp {
syncedRepos Repositories, toInstall pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) ([]installerOp, error) {
l.Options.Context.Debug("Computing installation order")
resOps := []installerOp{}
insertPackage := func(install pkg.Package, uninstall ...pkg.Package) {
uOpts := []operation{}
for _, u := range uninstall {
uOpts = append(uOpts, operation{Package: u, Option: uninstallOpt})
}
resOps = append(resOps, installerOp{
Uninstall: uOpts,
Install: []installOperation{{
operation: operation{
Package: install,
Option: installOpt,
},
Matches: installMatch,
Packages: toInstall,
Reposiories: syncedRepos,
Assertions: solution,
Database: allRepos,
}},
})
}
removals := make(map[string]interface{})
for _, match := range installMatch {
if pack, err := toUninstall.Find(match.Package.GetPackageName()); err == nil {
resOps = append(resOps, installerOp{
Uninstall: operation{Package: pack, Option: uninstallOpt},
Install: installOperation{
operation: operation{
Package: match.Package,
Option: installOpt,
},
Matches: installMatch,
Packages: toInstall,
Reposiories: syncedRepos,
Assertions: solution,
Database: allRepos,
},
})
a, err := l.getPackage(match, l.Options.Context)
if err != nil && !l.Options.Force {
return nil, errors.Wrap(err, "Failed downloading package")
}
files, err := a.FileList()
if err != nil && !l.Options.Force {
return nil, errors.Wrapf(err, "Could not get filelist for %s", a.CompileSpec.Package.HumanReadableString())
}
var foundPackages []pkg.Package
for _, f := range files {
if exists, p, _ := s.ExistsPackageFile(f); exists {
_, err := toUninstall.Find(p.GetPackageName())
if err == nil {
// Packages that is being installed have a file that
// is going to be removed by another package
foundPackages = append(foundPackages, p)
}
}
}
foundPackages = pkg.Packages(foundPackages).Unique()
if len(foundPackages) > 0 {
if pack, err := toUninstall.Find(match.Package.GetPackageName()); err == nil {
foundPackages = append(foundPackages, pack)
}
toRemove := []pkg.Package{}
for _, p := range foundPackages {
if _, ok := removals[p.GetPackageName()]; !ok {
toRemove = append(toRemove, p)
removals[p.GetPackageName()] = nil
}
}
insertPackage(match.Package, toRemove...)
} else if pack, err := toUninstall.Find(match.Package.GetPackageName()); err == nil {
if _, ok := removals[pack.GetPackageName()]; !ok {
insertPackage(match.Package, pack)
removals[pack.GetPackageName()] = nil
}
} else {
resOps = append(resOps, installerOp{
Install: installOperation{
operation: operation{Package: match.Package, Option: installOpt},
Matches: installMatch,
Reposiories: syncedRepos,
Packages: toInstall,
Assertions: solution,
Database: allRepos,
},
})
insertPackage(match.Package)
}
}
@@ -434,24 +477,24 @@ func (l *LuetInstaller) getOpsWithOptions(
if match.Package.GetPackageName() == p.GetPackageName() {
found = true
}
}
if !found {
resOps = append(resOps, installerOp{
Uninstall: operation{Package: p, Option: uninstallOpt},
})
if _, ok := removals[p.GetPackageName()]; !ok {
resOps = append(resOps, installerOp{
Uninstall: []operation{{Package: p, Option: uninstallOpt}},
})
removals[p.GetPackageName()] = nil
}
}
}
return resOps
return resOps, nil
}
func (l *LuetInstaller) checkAndUpgrade(r Repositories, s *System) error {
// Spinner(32)
uninstall, toInstall, err := l.computeUpgrade(r, s)
if err != nil {
return errors.Wrap(err, "failed computing upgrade")
}
// SpinnerStop()
if len(toInstall) == 0 && len(uninstall) == 0 {
l.Options.Context.Info("Nothing to upgrade")
@@ -487,7 +530,34 @@ func (l *LuetInstaller) checkAndUpgrade(r Repositories, s *System) error {
}
}
return l.swap(o, r, uninstall, toInstall, s)
bus.Manager.Publish(bus.EventPreUpgrade, struct{ Uninstall, Install pkg.Packages }{Uninstall: uninstall, Install: toInstall})
err = l.swap(o, r, uninstall, toInstall, s)
bus.Manager.Publish(bus.EventPostUpgrade, struct {
Error error
Uninstall, Install pkg.Packages
}{Uninstall: uninstall, Install: toInstall, Error: err})
if err != nil {
return err
}
if l.Options.AutoOSCheck {
l.Options.Context.Info("Performing automatic oscheck")
packs := s.OSCheck()
if len(packs) > 0 {
p := ""
for _, r := range packs {
p += " " + r.HumanReadableString()
}
l.Options.Context.Info("Following packages requires reinstallation: " + p)
return l.swap(o, r, packs, packs, s)
}
l.Options.Context.Info("OSCheck done")
}
return err
}
func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
@@ -566,6 +636,21 @@ func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
func (l *LuetInstaller) download(syncedRepos Repositories, toDownload map[string]ArtifactMatch) error {
// Don't attempt to download stuff that is already in cache
missArtifacts := false
for _, m := range toDownload {
c := m.Repository.Client(l.Options.Context)
_, err := c.CacheGet(m.Artifact)
if err != nil {
missArtifacts = true
}
}
if !missArtifacts {
l.Options.Context.Debug("Packages already in cache, skipping download")
return nil
}
// Download packages into cache in parallel.
all := make(chan ArtifactMatch)
@@ -595,7 +680,6 @@ func (l *LuetInstaller) download(syncedRepos Repositories, toDownload map[string
}
close(all)
wg.Wait()
return nil
}
@@ -786,7 +870,7 @@ func (l *LuetInstaller) checkFileconflicts(toInstall map[string]ArtifactMatch, c
filesToInstall := []string{}
for _, m := range toInstall {
a, err := l.downloadPackage(m, l.Options.Context)
a, err := l.getPackage(m, l.Options.Context)
if err != nil && !l.Options.Force {
return errors.Wrap(err, "Failed downloading package")
}
@@ -882,11 +966,11 @@ func (l *LuetInstaller) install(o Option, syncedRepos Repositories, toInstall ma
return s.ExecuteFinalizers(l.Options.Context, toFinalize)
}
func (l *LuetInstaller) downloadPackage(a ArtifactMatch, ctx *types.Context) (*artifact.PackageArtifact, error) {
func (l *LuetInstaller) getPackage(a ArtifactMatch, ctx *types.Context) (artifact *artifact.PackageArtifact, err error) {
cli := a.Repository.Client(ctx)
artifact, err := cli.DownloadArtifact(a.Artifact)
artifact, err = cli.DownloadArtifact(a.Artifact)
if err != nil {
return nil, errors.Wrap(err, "Error on download artifact")
}
@@ -900,7 +984,7 @@ func (l *LuetInstaller) downloadPackage(a ArtifactMatch, ctx *types.Context) (*a
func (l *LuetInstaller) installPackage(m ArtifactMatch, s *System) error {
a, err := l.downloadPackage(m, l.Options.Context)
a, err := l.getPackage(m, l.Options.Context)
if err != nil && !l.Options.Force {
return errors.Wrap(err, "Failed downloading package")
}
@@ -925,7 +1009,7 @@ func (l *LuetInstaller) downloadWorker(i int, wg *sync.WaitGroup, pb *pterm.Prog
for p := range c {
// TODO: Keep trace of what was added from the tar, and save it into system
_, err := l.downloadPackage(p, ctx)
_, err := l.getPackage(p, ctx)
if err != nil {
l.Options.Context.Error("Failed downloading package "+p.Package.GetName(), err.Error())
return errors.Wrap(err, "Failed downloading package "+p.Package.GetName())

View File

@@ -654,6 +654,7 @@ urls:
Expect(err).ToNot(HaveOccurred())
inst := NewLuetInstaller(LuetInstallerOptions{
Concurrency: 1, Context: ctx,
Relaxed: true,
PackageRepositories: types.LuetRepositories{*repo2.LuetRepository},
})
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
@@ -702,6 +703,254 @@ urls:
})
It("Compute the correct upgrade order", func() {
tmpdir, err := ioutil.TempDir("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
err = generalRecipe.Load("../../tests/fixtures/upgrade_complex")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2))
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
spec2, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.1"})
Expect(err).ToNot(HaveOccurred())
spec4, err := c.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"})
Expect(err).ToNot(HaveOccurred())
spec5, err := c.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.2"})
Expect(err).ToNot(HaveOccurred())
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
tmpdir, err = ioutil.TempDir("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
spec.SetOutputPath(tmpdir)
spec2.SetOutputPath(tmpdir)
spec4.SetOutputPath(tmpdir)
spec5.SetOutputPath(tmpdir)
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec4, spec5))
Expect(errs).To(BeEmpty())
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_complex")
Expect(err).ToNot(HaveOccurred())
Expect(repo.GetName()).To(Equal("test"))
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
err = repo.Write(ctx, tmpdir, false, false)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
Expect(repo.GetType()).To(Equal("disk"))
fakeroot, err := ioutil.TempDir("", "fakeroot")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(fakeroot) // clean up
repo2, err := NewLuetSystemRepositoryFromYaml([]byte(`
name: "test"
type: "disk"
enable: true
urls:
- "`+tmpdir+`"
`), pkg.NewInMemoryDatabase(false))
Expect(err).ToNot(HaveOccurred())
inst := NewLuetInstaller(LuetInstallerOptions{
Concurrency: 1, Context: ctx,
Relaxed: true,
PackageRepositories: types.LuetRepositories{*repo2.LuetRepository},
})
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
Expect(repo.GetType()).To(Equal("disk"))
bolt, err := ioutil.TempDir("", "db")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(bolt) // clean up
systemDB := pkg.NewBoltDatabase(filepath.Join(bolt, "db.db"))
system := &System{Database: systemDB, Target: fakeroot}
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
Expect(len(system.Database.GetPackages())).To(Equal(1))
p, err := system.Database.GetPackage(system.Database.GetPackages()[0])
Expect(err).ToNot(HaveOccurred())
Expect(p.GetName()).To(Equal("b"))
files, err := systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(files).To(Equal([]string{"test5", "test6"}))
Expect(err).ToNot(HaveOccurred())
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}}, system)
Expect(err).ToNot(HaveOccurred())
files, err = systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"})
Expect(files).To(Equal([]string{"test3", "test4"}))
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test3"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test4"))).To(BeTrue())
err = inst.Upgrade(system)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test3"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test4"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
})
It("Compute the correct upgrade order with a package replacing multiple ones", func() {
tmpdir, err := ioutil.TempDir("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
err = generalRecipe.Load("../../tests/fixtures/upgrade_complex_multiple")
Expect(err).ToNot(HaveOccurred())
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(6))
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2))
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
spec2, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.1"})
Expect(err).ToNot(HaveOccurred())
spec3, err := c.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.1"})
Expect(err).ToNot(HaveOccurred())
spec4, err := c.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"})
Expect(err).ToNot(HaveOccurred())
spec5, err := c.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.2"})
Expect(err).ToNot(HaveOccurred())
spec6, err := c.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.2"})
Expect(err).ToNot(HaveOccurred())
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
tmpdir, err = ioutil.TempDir("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
spec.SetOutputPath(tmpdir)
spec2.SetOutputPath(tmpdir)
spec4.SetOutputPath(tmpdir)
spec5.SetOutputPath(tmpdir)
spec3.SetOutputPath(tmpdir)
spec6.SetOutputPath(tmpdir)
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3, spec4, spec5, spec6))
Expect(errs).To(BeEmpty())
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_complex_multiple")
Expect(err).ToNot(HaveOccurred())
Expect(repo.GetName()).To(Equal("test"))
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
err = repo.Write(ctx, tmpdir, false, false)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
Expect(repo.GetType()).To(Equal("disk"))
fakeroot, err := ioutil.TempDir("", "fakeroot")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(fakeroot) // clean up
repo2, err := NewLuetSystemRepositoryFromYaml([]byte(`
name: "test"
type: "disk"
enable: true
urls:
- "`+tmpdir+`"
`), pkg.NewInMemoryDatabase(false))
Expect(err).ToNot(HaveOccurred())
inst := NewLuetInstaller(LuetInstallerOptions{
Concurrency: 1, Context: ctx,
Relaxed: true,
PackageRepositories: types.LuetRepositories{*repo2.LuetRepository},
})
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
Expect(repo.GetType()).To(Equal("disk"))
bolt, err := ioutil.TempDir("", "db")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(bolt) // clean up
systemDB := pkg.NewBoltDatabase(filepath.Join(bolt, "db.db"))
system := &System{Database: systemDB, Target: fakeroot}
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(err).ToNot(HaveOccurred())
Expect(len(system.Database.GetPackages())).To(Equal(1))
p, err := system.Database.GetPackage(system.Database.GetPackages()[0])
Expect(err).ToNot(HaveOccurred())
Expect(p.GetName()).To(Equal("b"))
files, err := systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
Expect(files).To(Equal([]string{"test5", "test6"}))
Expect(err).ToNot(HaveOccurred())
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.1"}}, system)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test1"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test2"))).To(BeTrue())
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}}, system)
Expect(err).ToNot(HaveOccurred())
files, err = systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"})
Expect(files).To(Equal([]string{"test3", "test4"}))
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test3"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test4"))).To(BeTrue())
err = inst.Upgrade(system)
Expect(err).ToNot(HaveOccurred())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test1"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test2"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test3"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test4"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
})
It("Handles package drops", func() {
//repo:=NewLuetSystemRepository()

View File

@@ -23,6 +23,7 @@ import (
type Client interface {
DownloadArtifact(*artifact.PackageArtifact) (*artifact.PackageArtifact, error)
DownloadFile(string) (string, error)
CacheGet(*artifact.PackageArtifact) (*artifact.PackageArtifact, error)
}
type Repositories []*LuetSystemRepository

View File

@@ -1,6 +1,8 @@
package installer
import (
"os"
"path/filepath"
"sync"
"github.com/hashicorp/go-multierror"
@@ -12,9 +14,10 @@ import (
)
type System struct {
Database pkg.PackageDatabase
Target string
fileIndex map[string]pkg.Package
Database pkg.PackageDatabase
Target string
fileIndex map[string]pkg.Package
fileIndexPackages map[string]pkg.Package
sync.Mutex
}
@@ -22,6 +25,21 @@ func (s *System) World() (pkg.Packages, error) {
return s.Database.World(), nil
}
func (s *System) OSCheck() (notFound pkg.Packages) {
s.buildFileIndex()
s.Lock()
defer s.Unlock()
for f, p := range s.fileIndex {
if _, err := os.Lstat(filepath.Join(s.Target, f)); err != nil {
if _, err := s.Database.FindPackage(p); err == nil {
notFound = append(notFound, p)
}
}
}
notFound = notFound.Unique()
return
}
func (s *System) ExecuteFinalizers(ctx *types.Context, packs []pkg.Package) error {
var errs error
executedFinalizer := map[string]bool{}
@@ -56,15 +74,28 @@ func (s *System) ExecuteFinalizers(ctx *types.Context, packs []pkg.Package) erro
}
func (s *System) buildFileIndex() {
// XXX: Replace with cache
s.Lock()
defer s.Unlock()
// Check if cache is empty or if it got modified
if s.fileIndex == nil { //|| len(s.Database.GetPackages()) != len(s.fileIndex) {
if s.fileIndex == nil {
s.fileIndex = make(map[string]pkg.Package)
}
if s.fileIndexPackages == nil {
s.fileIndexPackages = make(map[string]pkg.Package)
}
// Check if cache is empty or if it got modified
if len(s.Database.GetPackages()) != len(s.fileIndexPackages) {
s.fileIndexPackages = make(map[string]pkg.Package)
for _, p := range s.Database.World() {
files, _ := s.Database.GetPackageFiles(p)
for _, f := range files {
s.fileIndex[f] = p
if _, ok := s.fileIndexPackages[p.GetPackageName()]; !ok {
files, _ := s.Database.GetPackageFiles(p)
for _, f := range files {
s.fileIndex[f] = p
}
s.fileIndexPackages[p.GetPackageName()] = p
}
}
}

View File

@@ -19,6 +19,10 @@ import (
// . "github.com/mudler/luet/pkg/installer"
"io/ioutil"
"os"
"path/filepath"
. "github.com/mudler/luet/pkg/installer"
pkg "github.com/mudler/luet/pkg/package"
@@ -66,5 +70,18 @@ var _ = Describe("System", func() {
Expect(err).ToNot(HaveOccurred())
Expect(p).To(Equal(b))
})
It("detect missing files", func() {
dir, err := ioutil.TempDir("", "test")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(dir)
s.Target = dir
notfound := s.OSCheck()
Expect(len(notfound)).To(Equal(2))
ioutil.WriteFile(filepath.Join(dir, "f"), []byte{}, os.ModePerm)
ioutil.WriteFile(filepath.Join(dir, "foo"), []byte{}, os.ModePerm)
notfound = s.OSCheck()
Expect(len(notfound)).To(Equal(1))
})
})
})

View File

@@ -174,6 +174,7 @@ func (r *CompilerRecipe) Load(path string) error {
filepath.Dir(currentpath))
}
pack.Requires(packbuild.GetRequires())
pack.Conflicts(packbuild.GetConflicts())
}

View File

@@ -6,4 +6,8 @@ requires:
category: "test"
version: ">=0"
requires_final_images: true
requires_final_images: true
steps:
- |
/bin/sh -c "if [ -e /usr/bin/generate.sh ]; then ls -liah /usr/bin && exit 1; fi"

View File

@@ -6,8 +6,9 @@ requires:
prelude:
- echo foo > /test
- echo bar > /test2
- cp -rf generate.sh /usr/bin/
steps:
- echo artifact5 > /newc
- echo artifact6 > /newnewc
- chmod +x generate.sh
- ./generate.sh
- ./generate.sh

View File

@@ -0,0 +1,4 @@
image: "alpine"
steps:
- echo artifact5 > /test5
- echo artifact6 > /test6

View File

@@ -0,0 +1,4 @@
category: "test"
name: "a"
version: "1.2"

View File

@@ -0,0 +1,4 @@
image: "alpine"
steps:
- echo artifact3 > /test3
- echo artifact4 > /test4

View File

@@ -0,0 +1,3 @@
category: "test"
name: "a"
version: "1.1"

View File

@@ -0,0 +1,4 @@
image: "alpine"
steps:
- echo artifact3 > /test3
- echo artifact4 > /test4

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.1"

View File

@@ -0,0 +1,4 @@
image: "alpine"
steps:
- echo artifact5 > /test5
- echo artifact6 > /test6

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1,5 @@
image: "alpine"
steps:
- echo artifact5 > /test5
- echo artifact6 > /test6
- echo artifact5 > /test2

View File

@@ -0,0 +1,4 @@
category: "test"
name: "a"
version: "1.2"

View File

@@ -0,0 +1,4 @@
image: "alpine"
steps:
- echo artifact3 > /test3
- echo artifact4 > /test4

View File

@@ -0,0 +1,3 @@
category: "test"
name: "a"
version: "1.1"

View File

@@ -0,0 +1,4 @@
image: "alpine"
steps:
- echo artifact3 > /test3
- echo artifact4 > /test4

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.1"

View File

@@ -0,0 +1,4 @@
image: "alpine"
steps:
- echo artifact5 > /test5
- echo artifact6 > /test6

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1,3 @@
image: "alpine"
steps:
- echo artifact5 > /test1

View File

@@ -0,0 +1,4 @@
category: "test"
name: "c"
version: "1.2"

View File

@@ -0,0 +1,4 @@
image: "alpine"
steps:
- echo artifact3 > /test1
- echo artifact4 > /test2

View File

@@ -0,0 +1,3 @@
category: "test"
name: "c"
version: "1.1"

View File

@@ -0,0 +1,10 @@
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo c > /c
- echo c > /cd
requires:
- category: "test"
name: "a"
version: ">=1.0"

View File

@@ -0,0 +1,9 @@
category: "test"
name: "c"
version: "1.0"
# Boom?
requires:
- category: "test"
name: "a"
version: ">=0.1"

View File

@@ -0,0 +1,11 @@
image: "alpine"
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo artifact3 > /test3
- echo artifact4 > /test4
requires:
- category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1,8 @@
category: "test"
name: "a"
version: "1.1"
requires:
- category: "test"
name: "b"
version: ">=0.1"

View File

@@ -0,0 +1,11 @@
image: "alpine"
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo artifact3 > /testaa
- echo artifact4 > /testaa2
requires:
- category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1,3 @@
category: "test"
name: "a"
version: "1.0"

View File

@@ -0,0 +1,11 @@
image: "alpine"
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo artifact3 > /testlatest
- echo artifact4 > /testlatest2
requires:
- category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1,3 @@
category: "test"
name: "a"
version: "1.2"

View File

@@ -0,0 +1,9 @@
image: "alpine"
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo artifact5 > /newc
- echo artifact6 > /newnewc
- chmod +x generate.sh
- ./generate.sh

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.1"

View File

@@ -0,0 +1 @@
echo generated > /sonewc

View File

@@ -0,0 +1,9 @@
image: "alpine"
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo artifact5 > /test5
- echo artifact6 > /test6
- chmod +x generate.sh
- ./generate.sh

View File

@@ -0,0 +1,3 @@
category: "test"
name: "b"
version: "1.0"

View File

@@ -0,0 +1 @@
echo generated > /artifact42

View File

@@ -0,0 +1,9 @@
prelude:
- echo foo > /test
- echo bar > /test2
steps:
- echo z > /z
requires:
- category: "test"
name: "a"
version: ">=1.0"

View File

@@ -0,0 +1,3 @@
category: "test"
name: "z"
version: "1.0"

View File

@@ -85,17 +85,17 @@ EOF
}
testInstall() {
luet install -y --config $tmpdir/luet.yaml test/b@1.0
luet install -y --relax --config $tmpdir/luet.yaml test/b@1.0
installst=$?
assertEquals 'install test successfully' "$installst" "0"
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/test5' ]"
luet install -y --config $tmpdir/luet.yaml test/a@1.0
luet install -y --relax --config $tmpdir/luet.yaml test/a@1.0
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
installst=$?
assertEquals 'install test successfully' "$installst" "0"
luet install -y --config $tmpdir/luet.yaml test/c@1.0
luet install -y --relax --config $tmpdir/luet.yaml test/c@1.0
installst=$?
assertEquals 'install test successfully' "$installst" "0"
assertTrue 'package installed C' "[ -e '$tmpdir/testrootfs/c' ]"

View File

@@ -0,0 +1,131 @@
#!/bin/bash
export LUET_NOLOCK=true
oneTimeSetUp() {
export tmpdir="$(mktemp -d)"
}
oneTimeTearDown() {
rm -rf "$tmpdir"
}
testBuild() {
mkdir $tmpdir/testbuild
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/b@1.0
buildst=$?
assertTrue 'create package B 1.0' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.gz' ]"
assertEquals 'builds successfully' "$buildst" "0"
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/b@1.1
buildst=$?
assertEquals 'builds successfully' "$buildst" "0"
assertTrue 'create package B 1.1' "[ -e '$tmpdir/testbuild/b-test-1.1.package.tar.gz' ]"
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/a@1.0
buildst=$?
assertEquals 'builds successfully' "$buildst" "0"
assertTrue 'create package A 1.0' "[ -e '$tmpdir/testbuild/a-test-1.0.package.tar.gz' ]"
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/a@1.1
buildst=$?
assertEquals 'builds successfully' "$buildst" "0"
assertTrue 'create package A 1.1' "[ -e '$tmpdir/testbuild/a-test-1.1.package.tar.gz' ]"
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/a@1.2
buildst=$?
assertEquals 'builds successfully' "$buildst" "0"
assertTrue 'create package A 1.2' "[ -e '$tmpdir/testbuild/a-test-1.2.package.tar.gz' ]"
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/z@1.0
buildst=$?
assertEquals 'builds successfully' "$buildst" "0"
assertTrue 'create package Z 1.0' "[ -e '$tmpdir/testbuild/z-test-1.0.package.tar.gz' ]"
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/c@1.0
buildst=$?
assertEquals 'builds successfully' "$buildst" "0"
assertTrue 'create package C 1.0' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.gz' ]"
}
testRepo() {
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
luet create-repo --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" \
--output $tmpdir/testbuild \
--packages $tmpdir/testbuild \
--name "test" \
--descr "Test Repo" \
--urls $tmpdir/testrootfs \
--type http
createst=$?
assertEquals 'create repo successfully' "$createst" "0"
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
}
testConfig() {
mkdir $tmpdir/testrootfs
cat <<EOF > $tmpdir/luet.yaml
general:
debug: true
system:
rootfs: $tmpdir/testrootfs
database_path: "/"
database_engine: "boltdb"
config_from_host: true
repositories:
- name: "main"
type: "disk"
enable: true
urls:
- "$tmpdir/testbuild"
EOF
luet config --config $tmpdir/luet.yaml
res=$?
assertEquals 'config test successfully' "$res" "0"
}
testInstall() {
luet install -y --relax --config $tmpdir/luet.yaml test/b@1.0
installst=$?
assertEquals 'install test successfully' "$installst" "0"
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/test5' ]"
luet install -y --relax --config $tmpdir/luet.yaml test/z@1.0
installst=$?
assertEquals 'install test successfully' "$installst" "0"
assertTrue 'package installed Z' "[ -e '$tmpdir/testrootfs/z' ]"
luet install -y --relax --config $tmpdir/luet.yaml test/a@1.0
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
installst=$?
assertEquals 'install test successfully' "$installst" "0"
luet install -y --relax --config $tmpdir/luet.yaml test/c@1.0
installst=$?
assertEquals 'install test successfully' "$installst" "0"
assertTrue 'package installed C' "[ -e '$tmpdir/testrootfs/c' ]"
}
testUpgrade() {
rm -rf $tmpdir/testrootfs/z
assertTrue 'package Z corrupted' "[ ! -e '$tmpdir/testrootfs/z' ]"
upgrade=$(luet --config $tmpdir/luet.yaml upgrade --oscheck -y)
installst=$?
echo "$upgrade"
assertEquals 'install test successfully' "$installst" "0"
assertTrue 'package uninstalled B' "[ ! -e '$tmpdir/testrootfs/test5' ]"
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/newc' ]"
assertTrue 'package uninstalled A' "[ ! -e '$tmpdir/testrootfs/testaa' ]"
assertTrue 'package Z restored' "[ -e '$tmpdir/testrootfs/z' ]"
assertTrue 'package installed new A' "[ -e '$tmpdir/testrootfs/testlatest' ]"
assertNotContains 'does not contain test/c@1.0' "$upgrade" 'test/c-1.0'
assertNotContains 'does not attempt to download test/c@1.0' "$upgrade" 'test/c-1.0 downloaded'
}
# Load shUnit2.
. "$ROOT_DIR/tests/integration/shunit2"/shunit2

View File

@@ -1 +0,0 @@
This code provides helper functions for dealing with archive files.

File diff suppressed because it is too large Load Diff

View File

@@ -1,100 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
if format == OverlayWhiteoutFormat {
if inUserNS {
return nil, errors.New("specifying OverlayWhiteoutFormat is not allowed in userns")
}
return overlayWhiteoutConverter{}, nil
}
return nil, nil
}
type overlayWhiteoutConverter struct {
}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
hdr.Mode = 0600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
if hdr.Xattrs != nil {
delete(hdr.Xattrs, "trusted.overlay.opaque")
}
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
wo = &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
}
}
return
}
func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
if err != nil {
return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir)
}
// don't write the file itself
return false, err
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath)
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View File

@@ -1,7 +0,0 @@
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
return nil, nil
}

View File

@@ -1,115 +0,0 @@
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"errors"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/containerd/containerd/sys"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) string {
return p // already unix-style
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
// Currently go does not fill in the major/minors
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
}
}
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
inode = s.Ino
}
return
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
}
return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= unix.S_IFBLK
case tar.TypeChar:
mode |= unix.S_IFCHR
case tar.TypeFifo:
mode |= unix.S_IFIFO
}
err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
if errors.Is(err, syscall.EPERM) && sys.RunningInUserNS() {
// In most cases, cannot create a device if running in user namespace
err = nil
}
return err
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
return nil
}

View File

@@ -1,67 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"os"
"path/filepath"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/longpath"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return longpath.AddPrefix(srcPath)
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) string {
return filepath.ToSlash(p)
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
// perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows
permPart |= 0111
permPart &= 0755
return noPermPart | permPart
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
// do nothing. no notion of Rdev, Nlink in stat on Windows
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
// do nothing. no notion of Inode in stat on Windows
return
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
// no notion of file ownership mapping yet on Windows
return idtools.Identity{UID: 0, GID: 0}, nil
}

View File

@@ -1,445 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// ChangeType represents the change type.
type ChangeType int
const (
// ChangeModify represents the modify operation.
ChangeModify = iota
// ChangeAdd represents the add operation.
ChangeAdd
// ChangeDelete represents the delete operation.
ChangeDelete
)
func (c ChangeType) String() string {
switch c {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
}
return ""
}
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
func (c changesByPath) Len() int { return len(c) }
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// Gnu tar doesn't have sub-second mtime precision. The go tar
// writer (1.10+) does when using PAX format, but we round times to seconds
// to ensure archives have the same hashes for backwards compatibility.
// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
//
// Non-sub-second is problematic when we apply changes via tar
// files. We handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a.Equal(b) ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
func sameFsTimeSpec(a, b syscall.Timespec) bool {
return a.Sec == b.Sec &&
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
}
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
)
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
if sc != nil {
if skip, err := sc(path); skip {
return err
}
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
if deletedFile != "" {
change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
}
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
changedDirs[parent] = struct{}{}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
stat *system.StatT
children map[string]*FileInfo
capability []byte
added bool
}
// LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
parent := info
if path == string(os.PathSeparator) {
return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
// As this runs on the daemon side, file paths are OS specific.
return string(os.PathSeparator)
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
info.added = true
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild := oldChildren[name]
if oldChild != nil {
// change?
oldStat := oldChild.stat
newStat := newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if statDifferent(oldStat, newStat) ||
!bytes.Equal(oldChild.capability, newChild.capability) {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
newChild.added = true
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
// If there were changes inside this directory, we need to add it, even if the directory
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
// As this runs on the daemon side, file paths are OS specific.
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
change := Change{
Path: info.path(),
Kind: ChangeModify,
}
// Let's insert the directory entry before the recently added entries located inside this dir
*changes = append(*changes, change) // just to resize the slice, will be overwritten
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
}
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
root := &FileInfo{
name: string(os.PathSeparator),
children: make(map[string]*FileInfo),
}
return root
}
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
var (
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
emptyDir, err := ioutil.TempDir("", "empty")
if err != nil {
return nil, err
}
defer os.Remove(emptyDir)
oldDir = emptyDir
}
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
if err != nil {
return nil, err
}
return newRoot.Changes(oldRoot), nil
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var (
size int64
sf = make(map[uint64]struct{})
)
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, err := os.Lstat(file)
if err != nil {
logrus.Errorf("Can not stat %q: %s", file, err)
continue
}
if fileInfo != nil && !fileInfo.IsDir() {
if hasHardlinks(fileInfo) {
inode := getIno(fileInfo)
if _, ok := sf[inode]; !ok {
size += fileInfo.Size()
sf[inode] = struct{}{}
}
} else {
size += fileInfo.Size()
}
}
}
}
return size
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
sort.Sort(changesByPath(changes))
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
logrus.Debugf("Can't write whiteout header: %s", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
logrus.Debugf("failed close Changes writer: %s", err)
}
}()
return reader, nil
}

View File

@@ -1,286 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"bytes"
"fmt"
"os"
"path/filepath"
"sort"
"syscall"
"unsafe"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
// method in general returns the entire contents of two directory trees, we
// optimize some FS calls out on linux. In particular, we take advantage of the
// fact that getdents(2) returns the inode of each file in the directory being
// walked, which, when walking two trees in parallel to generate a list of
// changes, can be used to prune subtrees without ever having to lstat(2) them
// directly. Eliminating stat calls in this way can save up to seconds on large
// images.
type walker struct {
dir1 string
dir2 string
root1 *FileInfo
root2 *FileInfo
}
// collectFileInfoForChanges returns a complete representation of the trees
// rooted at dir1 and dir2, with one important exception: any subtree or
// leaf where the inode and device numbers are an exact match between dir1
// and dir2 will be pruned from the results. This method is *only* to be used
// to generating a list of changes between the two directories, as it does not
// reflect the full contents.
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
w := &walker{
dir1: dir1,
dir2: dir2,
root1: newRootFileInfo(),
root2: newRootFileInfo(),
}
i1, err := os.Lstat(w.dir1)
if err != nil {
return nil, nil, err
}
i2, err := os.Lstat(w.dir2)
if err != nil {
return nil, nil, err
}
if err := w.walk("/", i1, i2); err != nil {
return nil, nil, err
}
return w.root1, w.root2, nil
}
// Given a FileInfo, its path info, and a reference to the root of the tree
// being constructed, register this file with the tree.
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
if fi == nil {
return nil
}
parent := root.LookUp(filepath.Dir(path))
if parent == nil {
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
}
info := &FileInfo{
name: filepath.Base(path),
children: make(map[string]*FileInfo),
parent: parent,
}
cpath := filepath.Join(dir, path)
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
if err != nil {
return err
}
info.stat = stat
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
parent.children[info.name] = info
return nil
}
// Walk a subtree rooted at the same path in both trees being iterated. For
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
// Register these nodes with the return trees, unless we're still at the
// (already-created) roots:
if path != "/" {
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
return err
}
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
return err
}
}
is1Dir := i1 != nil && i1.IsDir()
is2Dir := i2 != nil && i2.IsDir()
sameDevice := false
if i1 != nil && i2 != nil {
si1 := i1.Sys().(*syscall.Stat_t)
si2 := i2.Sys().(*syscall.Stat_t)
if si1.Dev == si2.Dev {
sameDevice = true
}
}
// If these files are both non-existent, or leaves (non-dirs), we are done.
if !is1Dir && !is2Dir {
return nil
}
// Fetch the names of all the files contained in both directories being walked:
var names1, names2 []nameIno
if is1Dir {
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
if err != nil {
return err
}
}
if is2Dir {
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
if err != nil {
return err
}
}
// We have lists of the files contained in both parallel directories, sorted
// in the same order. Walk them in parallel, generating a unique merged list
// of all items present in either or both directories.
var names []string
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
ni1 := names1[ix1]
ni2 := names2[ix2]
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
case -1: // ni1 < ni2 -- advance ni1
// we will not encounter ni1 in names2
names = append(names, ni1.name)
ix1++
case 0: // ni1 == ni2
if ni1.ino != ni2.ino || !sameDevice {
names = append(names, ni1.name)
}
ix1++
ix2++
case 1: // ni1 > ni2 -- advance ni2
// we will not encounter ni2 in names1
names = append(names, ni2.name)
ix2++
}
}
for ix1 < len(names1) {
names = append(names, names1[ix1].name)
ix1++
}
for ix2 < len(names2) {
names = append(names, names2[ix2].name)
ix2++
}
// For each of the names present in either or both of the directories being
// iterated, stat the name under each root, and recurse the pair of them:
for _, name := range names {
fname := filepath.Join(path, name)
var cInfo1, cInfo2 os.FileInfo
if is1Dir {
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if is2Dir {
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
return err
}
}
return nil
}
// {name,inode} pairs used to support the early-pruning logic of the walker type
type nameIno struct {
name string
ino uint64
}
type nameInoSlice []nameIno
func (s nameInoSlice) Len() int { return len(s) }
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
// numbers further up the stack when reading directory contents. Unlike
// os.Readdirnames, which returns a list of filenames, this function returns a
// list of {filename,inode} pairs.
func readdirnames(dirname string) (names []nameIno, err error) {
var (
size = 100
buf = make([]byte, 4096)
nbuf int
bufp int
nb int
)
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
names = make([]nameIno, 0, size) // Empty with room to grow.
for {
// Refill the buffer if necessary
if bufp >= nbuf {
bufp = 0
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
if nbuf < 0 {
nbuf = 0
}
if err != nil {
return nil, os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
break // EOF
}
}
// Drain the buffer
nb, names = parseDirent(buf[bufp:nbuf], names)
bufp += nb
}
sl := nameInoSlice(names)
sort.Sort(sl)
return sl, nil
}
// parseDirent is a minor modification of unix.ParseDirent (linux version)
// which returns {name,inode} pairs instead of just names.
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
origlen := len(buf)
for len(buf) > 0 {
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
buf = buf[dirent.Reclen:]
if dirent.Ino == 0 { // File absent in directory.
continue
}
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
var name = string(bytes[0:clen(bytes[:])])
if name == "." || name == ".." { // Useless names
continue
}
names = append(names, nameIno{name, dirent.Ino})
}
return origlen - len(buf), names
}
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}

View File

@@ -1,97 +0,0 @@
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/system"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
oldRoot, err1 = collectFileInfo(oldDir)
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, nil, err
}
}
return oldRoot, newRoot, nil
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
relPath = filepath.Join(string(os.PathSeparator), relPath)
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
// Temporary workaround. If the returned path starts with two backslashes,
// trim it down to a single backslash. Only relevant on Windows.
if runtime.GOOS == "windows" {
if strings.HasPrefix(relPath, `\\`) {
relPath = relPath[1:]
}
}
if relPath == string(os.PathSeparator) {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
}
s, err := system.Lstat(path)
if err != nil {
return err
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}

View File

@@ -1,43 +0,0 @@
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
import (
"os"
"syscall"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
oldStat.UID() != newStat.UID() ||
oldStat.GID() != newStat.GID() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size or modification time for dirs, its not a good
// measure of change. See https://github.com/moby/moby/issues/9874
// for a description of the issue with modification time, and
// https://github.com/moby/moby/pull/11422 for the change.
// (Note that in the Windows implementation of this function,
// modification time IS taken as a change). See
// https://github.com/moby/moby/pull/37982 for more information.
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
}
func getIno(fi os.FileInfo) uint64 {
return fi.Sys().(*syscall.Stat_t).Ino
}
func hasHardlinks(fi os.FileInfo) bool {
return fi.Sys().(*syscall.Stat_t).Nlink > 1
}

View File

@@ -1,34 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"os"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Note there is slight difference between the Linux and Windows
// implementations here. Due to https://github.com/moby/moby/issues/9874,
// and the fix at https://github.com/moby/moby/pull/11422, Linux does not
// consider a change to the directory time as a change. Windows on NTFS
// does. See https://github.com/moby/moby/pull/37982 for more information.
if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) ||
oldStat.Mode() != newStat.Mode() ||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode().IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
}
func hasHardlinks(fi os.FileInfo) bool {
return false
}

View File

@@ -1,480 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// Errors used or returned by this file.
var (
ErrNotDirectory = errors.New("not a directory")
ErrDirNotExists = errors.New("no such directory")
ErrCannotCopyDir = errors.New("cannot copy directory")
ErrInvalidCopySource = errors.New("invalid copy source content")
)
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
// processing using any utility functions from the path or filepath stdlib
// packages) and appends a trailing `/.` or `/` if its corresponding original
// path (from before being processed by utility functions from the path or
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in the separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
// Ensure paths are in platform semantics
cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
originalPath = strings.Replace(originalPath, "/", string(sep), -1)
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
if !hasTrailingPathSeparator(cleanedPath, sep) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(sep)
}
cleanedPath += "."
}
if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
cleanedPath += string(sep)
}
return cleanedPath
}
// assertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func assertsDirectory(path string, sep byte) bool {
return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
}
// hasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func hasTrailingPathSeparator(path string, sep byte) bool {
return len(path) > 0 && path[len(path)-1] == sep
}
// specifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func specifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename by first cleaning the path but preserves a trailing "." if the
// original path specified the current directory.
func SplitPathDirEntry(path string) (dir, base string) {
cleanedPath := filepath.Clean(filepath.FromSlash(path))
if specifiesCurrentDir(path) {
cleanedPath += string(os.PathSeparator) + "."
}
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
sourcePath = normalizePath(sourcePath)
if _, err = os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return
}
// Separate the source path between its directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, opts)
}
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
// parameters to be sent to TarWithOptions (the TarOptions struct)
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
filter := []string{sourceBase}
return &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
}
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
RebaseName string
}
// CopyInfoSourcePath stats the given path to create a CopyInfo
// struct representing that resource for the source of an archive copy
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
}
return CopyInfo{
Path: resolvedPath,
Exists: true,
IsDir: stat.IsDir(),
RebaseName: rebaseName,
}, nil
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
path = normalizePath(path)
originalPath := path
stat, err := os.Lstat(path)
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
// The path exists and is not a symlink.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// While the path is a symlink.
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
if n > maxSymlinkIter {
// Don't follow symlinks more than this arbitrary number of times.
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
}
// The path is a symbolic link. We need to evaluate it so that the
// destination of the copy operation is the link target and not the
// link itself. This is notably different than CopyInfoSourcePath which
// only evaluates symlinks before the last appearing path separator.
// Also note that it is okay if the last path element is a broken
// symlink as the copy operation should create the target.
var linkTarget string
linkTarget, err = os.Readlink(path)
if err != nil {
return CopyInfo{}, err
}
if !system.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
}
path = linkTarget
stat, err = os.Lstat(path)
}
if err != nil {
// It's okay if the destination path doesn't exist. We can still
// continue the copy operation if the parent directory exists.
if !os.IsNotExist(err) {
return CopyInfo{}, err
}
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(path)
parentDirStat, err := os.Stat(dstParent)
if err != nil {
return CopyInfo{}, err
}
if !parentDirStat.IsDir() {
return CopyInfo{}, ErrNotDirectory
}
return CopyInfo{Path: path}, nil
}
// The path exists after resolving symlinks.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
// Ensure in platform semantics
srcInfo.Path = normalizePath(srcInfo.Path)
dstInfo.Path = normalizePath(dstInfo.Path)
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
_, srcBase := SplitPathDirEntry(srcInfo.Path)
switch {
case dstInfo.Exists && dstInfo.IsDir:
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
// you cannot copy a directory to an existing file location.
return "", nil, ErrCannotCopyDir
case dstInfo.Exists:
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
// the destination path instead, and when it is, the directory that is
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path, os.PathSeparator):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
// source.
return "", nil, ErrDirNotExists
default:
// The last remaining case is when the destination does not exist, is
// not asserted to be a directory, and the source content is not an
// archive of a directory. It this case, the destination file will need
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
// that all paths will start with.
oldBase = ""
}
rebased, w := io.Pipe()
go func() {
srcTar := tar.NewReader(srcContent)
rebasedTar := tar.NewWriter(w)
for {
hdr, err := srcTar.Next()
if err == io.EOF {
// Signals end of archive.
rebasedTar.Close()
w.Close()
return
}
if err != nil {
w.CloseWithError(err)
return
}
// srcContent tar stream, as served by TarWithOptions(), is
// definitely in PAX format, but tar.Next() mistakenly guesses it
// as USTAR, which creates a problem: if the newBase is >100
// characters long, WriteHeader() returns an error like
// "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
//
// To fix, set the format to PAX here. See docker/for-linux issue #484.
hdr.Format = tar.FormatPAX
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
if hdr.Typeflag == tar.TypeLink {
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
}
if err = rebasedTar.WriteHeader(hdr); err != nil {
w.CloseWithError(err)
return
}
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
}
}
}()
return rebased
}
// TODO @gupta-ak. These might have to be changed in the future to be
// continuity driver aware as well to support LCOW.
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
)
// Ensure in platform semantics
srcPath = normalizePath(srcPath)
dstPath = normalizePath(dstPath)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
content, err := TarResource(srcInfo)
if err != nil {
return err
}
defer content.Close()
return CopyTo(content, srcInfo, dstPath)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
if err != nil {
return err
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
}
defer copyArchive.Close()
options := &TarOptions{
NoLchown: true,
NoOverwriteDirNonDir: true,
}
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path, os.PathSeparator) &&
filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) &&
!specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path, os.PathSeparator) &&
!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@@ -1,11 +0,0 @@
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.ToSlash(path)
}

View File

@@ -1,9 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.FromSlash(path)
}

View File

@@ -1,260 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
unpackedPaths := make(map[string]struct{})
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
// Windows does not support filenames with colons in them. Ignore
// these files. This is not a problem though (although it might
// appear that it is). Let's suppose a client is running docker pull.
// The daemon it points to is Windows. Would it make sense for the
// client to be doing a docker pull Ubuntu for example (which has files
// with colons in the name under /usr/share/man/man3)? No, absolutely
// not as it would really only make sense that they were pulling a
// Windows image. However, for development, it is necessary to be able
// to pull Linux images which are in the repository.
//
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
continue
}
}
// Note as these operations are platform specific, so must the slash be.
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
// Not the root directory, ensure that the parent directory exists.
// This happened in some tests where an image had a tarfile without any
// parent directories.
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = system.MkdirAll(parentPath, 0600)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
return 0, err
}
}
if hdr.Name != WhiteoutOpaqueDir {
continue
}
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return 0, err
}
// Note as these operations are platform specific, so must the slash be.
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
trBuf.Reset(tr)
srcData := io.Reader(trBuf)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := remapIDs(idMapping, srcHdr); err != nil {
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
if runtime.GOOS != "windows" {
oldmask, err := system.Umask(0)
if err != nil {
return 0, err
}
defer system.Umask(oldmask)
}
if decompress {
decompLayer, err := DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompLayer.Close()
layer = decompLayer
}
return UnpackLayer(dest, layer, options)
}

View File

@@ -1,16 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = (1 << 30) - 2
return
}
return syscall.NsecToTimespec(time.UnixNano())
}

View File

@@ -1,16 +0,0 @@
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View File

@@ -1,23 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"

View File

@@ -1,59 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"bytes"
"io"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world"
// * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (io.Reader, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return
}

View File

@@ -99,7 +99,9 @@ func relativeToCwd(p string) (string, error) {
func (m *Manager) insertPlugin(p Plugin) {
for _, i := range m.Plugins {
if i.Executable == p.Executable {
// We don't want any ambiguity here.
// Binary plugins must be unique in PATH and Name
if i.Executable == p.Executable || i.Name == p.Name {
return
}
}

3
vendor/modules.txt vendored
View File

@@ -139,7 +139,6 @@ github.com/docker/docker/api/types/versions
github.com/docker/docker/api/types/volume
github.com/docker/docker/client
github.com/docker/docker/errdefs
github.com/docker/docker/pkg/archive
github.com/docker/docker/pkg/fileutils
github.com/docker/docker/pkg/homedir
github.com/docker/docker/pkg/idtools
@@ -341,7 +340,7 @@ github.com/morikuni/aec
# github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
## explicit
github.com/mudler/cobra-extensions
# github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d
# github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e
## explicit
github.com/mudler/go-pluggable
# github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290