mirror of
https://github.com/mudler/luet.git
synced 2025-09-02 07:45:02 +00:00
Compare commits
15 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
fd90e0d627 | ||
|
20d01e43c7 | ||
|
ed63236516 | ||
|
50b23095b2 | ||
|
9665bc1481 | ||
|
37f4289cdd | ||
|
01638567a7 | ||
|
fbe9b038dd | ||
|
0a90129e34 | ||
|
b05b00c615 | ||
|
938d41fe9e | ||
|
163bd77d27 | ||
|
309f5c0559 | ||
|
1f6d0cc66c | ||
|
07e37ea059 |
@@ -100,6 +100,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
helpers.CheckErr(err)
|
||||
force := viper.GetBool("force-push")
|
||||
imagePush := viper.GetBool("push-images")
|
||||
snapshotID, _ := cmd.Flags().GetString("snapshot-id")
|
||||
|
||||
opts := []installer.RepositoryOption{
|
||||
installer.WithSource(viper.GetString("packages")),
|
||||
@@ -163,7 +164,7 @@ Create a repository from the metadata description defined in the luet.yaml confi
|
||||
if metaName != "" {
|
||||
metaFile.SetFileName(metaName)
|
||||
}
|
||||
|
||||
repo.SetSnapshotID(snapshotID)
|
||||
repo.SetRepositoryFile(installer.REPOFILE_TREE_KEY, treeFile)
|
||||
repo.SetRepositoryFile(installer.REPOFILE_META_KEY, metaFile)
|
||||
|
||||
@@ -197,6 +198,7 @@ func init() {
|
||||
createrepoCmd.Flags().String("meta-compression", "none", "Compression alg: none, gzip, zstd")
|
||||
createrepoCmd.Flags().String("meta-filename", installer.REPOSITORY_METAFILE+".tar", "Repository metadata filename")
|
||||
createrepoCmd.Flags().Bool("from-repositories", false, "Consume the user-defined repositories to pull specfiles from")
|
||||
createrepoCmd.Flags().String("snapshot-id", "", "Unique ID to use when creating repository snapshots")
|
||||
|
||||
RootCmd.AddCommand(createrepoCmd)
|
||||
}
|
||||
|
@@ -52,18 +52,10 @@ var reinstallCmd = &cobra.Command{
|
||||
yes := viper.GetBool("yes")
|
||||
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
installed, _ := cmd.Flags().GetBool("installed")
|
||||
|
||||
util.SetSystemConfig(util.DefaultContext)
|
||||
|
||||
for _, a := range args {
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
if err != nil {
|
||||
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
|
||||
}
|
||||
toUninstall = append(toUninstall, pack)
|
||||
toAdd = append(toAdd, pack)
|
||||
}
|
||||
|
||||
util.SetSolverConfig(util.DefaultContext)
|
||||
|
||||
util.DefaultContext.Config.GetSolverOptions().Implementation = solver.SingleCoreSimple
|
||||
@@ -87,6 +79,25 @@ var reinstallCmd = &cobra.Command{
|
||||
})
|
||||
|
||||
system := &installer.System{Database: util.DefaultContext.Config.GetSystemDB(), Target: util.DefaultContext.Config.GetSystem().Rootfs}
|
||||
|
||||
if installed {
|
||||
for _, p := range system.Database.World() {
|
||||
toUninstall = append(toUninstall, p)
|
||||
c := p.Clone()
|
||||
c.SetVersion(">=0")
|
||||
toAdd = append(toAdd, c)
|
||||
}
|
||||
} else {
|
||||
for _, a := range args {
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
if err != nil {
|
||||
util.DefaultContext.Fatal("Invalid package string ", a, ": ", err.Error())
|
||||
}
|
||||
toUninstall = append(toUninstall, pack)
|
||||
toAdd = append(toAdd, pack)
|
||||
}
|
||||
}
|
||||
|
||||
err := inst.Swap(toUninstall, toAdd, system)
|
||||
if err != nil {
|
||||
util.DefaultContext.Fatal("Error: " + err.Error())
|
||||
@@ -107,6 +118,7 @@ func init() {
|
||||
reinstallCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
|
||||
reinstallCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
|
||||
reinstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
reinstallCmd.Flags().Bool("installed", false, "Reinstall installed packages")
|
||||
reinstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
reinstallCmd.Flags().Bool("download-only", false, "Download only")
|
||||
|
||||
|
@@ -1,5 +1,6 @@
|
||||
// Copyright © 2019 Ettore Di Giacinto <mudler@gentoo.org>
|
||||
// Daniele Rondina <geaaru@sabayonlinux.org>
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
@@ -24,7 +25,7 @@ import (
|
||||
)
|
||||
|
||||
func NewRepoUpdateCommand() *cobra.Command {
|
||||
var ans = &cobra.Command{
|
||||
var repoUpdate = &cobra.Command{
|
||||
Use: "update [repo1] [repo2] [OPTIONS]",
|
||||
Short: "Update a specific cached repository or all cached repositories.",
|
||||
Example: `
|
||||
@@ -72,8 +73,8 @@ $> luet repo update repo1 repo2
|
||||
},
|
||||
}
|
||||
|
||||
ans.Flags().BoolP("ignore-errors", "i", false, "Ignore errors on sync repositories.")
|
||||
ans.Flags().BoolP("force", "f", false, "Force resync.")
|
||||
repoUpdate.Flags().BoolP("ignore-errors", "i", false, "Ignore errors on sync repositories.")
|
||||
repoUpdate.Flags().BoolP("force", "f", true, "Force resync.")
|
||||
|
||||
return ans
|
||||
return repoUpdate
|
||||
}
|
||||
|
@@ -30,7 +30,7 @@ var cfgFile string
|
||||
var Verbose bool
|
||||
|
||||
const (
|
||||
LuetCLIVersion = "0.20.13"
|
||||
LuetCLIVersion = "0.21.2"
|
||||
LuetEnvPrefix = "LUET"
|
||||
)
|
||||
|
||||
|
@@ -102,7 +102,12 @@ func NewTreeImageCommand() *cobra.Command {
|
||||
}
|
||||
|
||||
ht := compiler.NewHashTree(reciper.GetDatabase())
|
||||
hashtree, err := ht.Query(luetCompiler, spec)
|
||||
|
||||
copy, err := compiler.CompilerFinalImages(luetCompiler)
|
||||
if err != nil {
|
||||
util.DefaultContext.Fatal("Error: " + err.Error())
|
||||
}
|
||||
hashtree, err := ht.Query(copy, spec)
|
||||
if err != nil {
|
||||
util.DefaultContext.Fatal("Error: " + err.Error())
|
||||
}
|
||||
|
@@ -43,6 +43,8 @@ var upgradeCmd = &cobra.Command{
|
||||
universe, _ := cmd.Flags().GetBool("universe")
|
||||
clean, _ := cmd.Flags().GetBool("clean")
|
||||
sync, _ := cmd.Flags().GetBool("sync")
|
||||
osCheck, _ := cmd.Flags().GetBool("oscheck")
|
||||
|
||||
yes := viper.GetBool("yes")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
@@ -67,6 +69,7 @@ var upgradeCmd = &cobra.Command{
|
||||
UpgradeNewRevisions: sync,
|
||||
PreserveSystemEssentialData: true,
|
||||
Ask: !yes,
|
||||
AutoOSCheck: osCheck,
|
||||
DownloadOnly: downloadOnly,
|
||||
PackageRepositories: util.DefaultContext.Config.SystemRepositories,
|
||||
Context: util.DefaultContext,
|
||||
@@ -97,6 +100,7 @@ func init() {
|
||||
upgradeCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
upgradeCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
upgradeCmd.Flags().Bool("download-only", false, "Download only")
|
||||
upgradeCmd.Flags().Bool("oscheck", false, "Perform automatically oschecks after upgrades")
|
||||
|
||||
RootCmd.AddCommand(upgradeCmd)
|
||||
}
|
||||
|
2
go.mod
2
go.mod
@@ -42,7 +42,7 @@ require (
|
||||
github.com/moby/moby v20.10.9+incompatible
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
|
||||
github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d
|
||||
github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e
|
||||
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
|
||||
github.com/onsi/ginkgo v1.16.4
|
||||
github.com/onsi/gomega v1.16.0
|
||||
|
4
go.sum
4
go.sum
@@ -810,8 +810,8 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P
|
||||
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
|
||||
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d h1:fKh+rvwZQCA+TPzK0EMwwbqhjvRHaQ6H8AsVU1Wt+NQ=
|
||||
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d/go.mod h1:puRUWSwyecW2V355tKncwPVPRAjQBduPsFjG0mrV/Nw=
|
||||
github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d h1:NKvvf/q1dWDde+yg5cMiU5EuYZ2jNuKs/9hb8xod8A0=
|
||||
github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d/go.mod h1:WmKcT8ONmhDQIqQ+HxU+tkGWjzBEyY/KFO8LTGCu4AI=
|
||||
github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e h1:CZI+kJW2+WjZXLWWnVzi6NDQ6SfwSfeNqq5d1iDiwyY=
|
||||
github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e/go.mod h1:WmKcT8ONmhDQIqQ+HxU+tkGWjzBEyY/KFO8LTGCu4AI=
|
||||
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290 h1:426hFyXMpXeqIeGJn2cGAW9ogvM2Jf+Jv23gtVPvBLM=
|
||||
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290/go.mod h1:uP5BBgFxq2wNWo7n1vnY5SSbgL0WDshVJrOO12tZ/lA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
|
@@ -2,6 +2,7 @@ package bus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/mudler/go-pluggable"
|
||||
"github.com/mudler/luet/pkg/api/core/types"
|
||||
)
|
||||
@@ -13,6 +14,10 @@ var (
|
||||
EventPackageInstall pluggable.EventType = "package.install"
|
||||
// EventPackageUnInstall is the event fired when a new package is being uninstalled
|
||||
EventPackageUnInstall pluggable.EventType = "package.uninstall"
|
||||
// EventPreUpgrade is the event fired before an upgrade is attempted
|
||||
EventPreUpgrade pluggable.EventType = "package.pre.upgrade"
|
||||
// EventPostUpgrade is the event fired after an upgrade is done
|
||||
EventPostUpgrade pluggable.EventType = "package.post.upgrade"
|
||||
|
||||
// Package build
|
||||
|
||||
@@ -62,6 +67,8 @@ var Manager *Bus = &Bus{
|
||||
EventPackageInstall,
|
||||
EventPackageUnInstall,
|
||||
EventPackagePreBuild,
|
||||
EventPreUpgrade,
|
||||
EventPostUpgrade,
|
||||
EventPackagePreBuildArtifact,
|
||||
EventPackagePostBuildArtifact,
|
||||
EventPackagePostBuild,
|
||||
|
@@ -138,7 +138,7 @@ func (sc *LuetSystemConfig) GetSystemRepoDatabaseDirPath() string {
|
||||
return dbpath
|
||||
}
|
||||
|
||||
func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
|
||||
func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (p string) {
|
||||
var cachepath string
|
||||
if sc.PkgsCachePath != "" {
|
||||
cachepath = sc.PkgsCachePath
|
||||
@@ -148,11 +148,13 @@ func (sc *LuetSystemConfig) GetSystemPkgsCacheDirPath() (ans string) {
|
||||
}
|
||||
|
||||
if filepath.IsAbs(cachepath) {
|
||||
ans = cachepath
|
||||
p = cachepath
|
||||
} else {
|
||||
ans = filepath.Join(sc.GetSystemRepoDatabaseDirPath(), cachepath)
|
||||
p = filepath.Join(sc.GetSystemRepoDatabaseDirPath(), cachepath)
|
||||
}
|
||||
|
||||
sc.PkgsCachePath = cachepath // Be consistent with the path we set
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -1053,30 +1053,7 @@ func (cs *LuetCompiler) resolveMultiStageImages(concurrency int, keepPermissions
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateFinalArtifact *bool, generateDependenciesFinalArtifact *bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
cs.Options.Context.Info(":package: Compiling", p.GetPackage().HumanReadableString(), ".... :coffee:")
|
||||
|
||||
//Before multistage : join - same as multistage, but keep artifacts, join them, create a new one and generate a final image.
|
||||
// When the image is there, use it as a source here, in place of GetImage().
|
||||
if err := cs.resolveFinalImages(concurrency, keepPermissions, p); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving join images")
|
||||
}
|
||||
|
||||
if err := cs.resolveMultiStageImages(concurrency, keepPermissions, p); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving multi-stage images")
|
||||
}
|
||||
|
||||
cs.Options.Context.Debug(fmt.Sprintf("%s: has images %t, empty package: %t", p.GetPackage().HumanReadableString(), p.HasImageSource(), p.EmptyPackage()))
|
||||
if !p.HasImageSource() && !p.EmptyPackage() {
|
||||
return nil,
|
||||
fmt.Errorf(
|
||||
"%s is invalid: package has no dependencies and no seed image supplied while it has steps defined",
|
||||
p.GetPackage().GetFingerPrint(),
|
||||
)
|
||||
}
|
||||
|
||||
ht := NewHashTree(cs.Database)
|
||||
|
||||
func CompilerFinalImages(cs *LuetCompiler) (*LuetCompiler, error) {
|
||||
// When computing the hash tree, we need to take into consideration
|
||||
// that packages that require final images have to be seen as packages without deps
|
||||
// This is because we don't really want to calculate the deptree of them as
|
||||
@@ -1103,7 +1080,36 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
|
||||
memDB.CreatePackage(copy)
|
||||
}
|
||||
copy.Database = memDB
|
||||
return copy, nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateFinalArtifact *bool, generateDependenciesFinalArtifact *bool, p *compilerspec.LuetCompilationSpec) (*artifact.PackageArtifact, error) {
|
||||
cs.Options.Context.Info(":package: Compiling", p.GetPackage().HumanReadableString(), ".... :coffee:")
|
||||
|
||||
//Before multistage : join - same as multistage, but keep artifacts, join them, create a new one and generate a final image.
|
||||
// When the image is there, use it as a source here, in place of GetImage().
|
||||
if err := cs.resolveFinalImages(concurrency, keepPermissions, p); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving join images")
|
||||
}
|
||||
|
||||
if err := cs.resolveMultiStageImages(concurrency, keepPermissions, p); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving multi-stage images")
|
||||
}
|
||||
|
||||
cs.Options.Context.Debug(fmt.Sprintf("%s: has images %t, empty package: %t", p.GetPackage().HumanReadableString(), p.HasImageSource(), p.EmptyPackage()))
|
||||
if !p.HasImageSource() && !p.EmptyPackage() {
|
||||
return nil,
|
||||
fmt.Errorf(
|
||||
"%s is invalid: package has no dependencies and no seed image supplied while it has steps defined",
|
||||
p.GetPackage().GetFingerPrint(),
|
||||
)
|
||||
}
|
||||
|
||||
ht := NewHashTree(cs.Database)
|
||||
copy, err := CompilerFinalImages(cs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packageHashTree, err := ht.Query(copy, p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed querying hashtree")
|
||||
|
@@ -64,11 +64,13 @@ func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.
|
||||
c.context.Spinner()
|
||||
defer c.context.SpinnerStop()
|
||||
|
||||
resultingArtifact := a.ShallowCopy()
|
||||
artifactName := path.Base(a.Path)
|
||||
|
||||
downloaded := false
|
||||
|
||||
resultingArtifact, err := c.CacheGet(a)
|
||||
if err == nil {
|
||||
return resultingArtifact, nil
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// Files are in URI/packagename:version (GetPackageImageName() method)
|
||||
// use downloadAndExtract .. and egenrate an archive to consume. Checksum should be already checked while downloading the image
|
||||
@@ -79,71 +81,57 @@ func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.
|
||||
// We discard checksum, that are checked while during pull and unpack by containerd
|
||||
resultingArtifact.Checksums = artifact.Checksums{}
|
||||
|
||||
// Check if file is already in cache
|
||||
fileName, err := c.Cache.Get(resultingArtifact)
|
||||
// Check if file is already in cache
|
||||
if err == nil {
|
||||
resultingArtifact = a
|
||||
resultingArtifact.Path = fileName
|
||||
resultingArtifact.Checksums = artifact.Checksums{}
|
||||
c.context.Debug("Use artifact", artifactName, "from cache.")
|
||||
} else {
|
||||
temp, err := c.context.Config.GetSystem().TempDir("image")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(temp)
|
||||
|
||||
temp, err := c.context.Config.GetSystem().TempDir("image")
|
||||
tempArtifact, err := c.context.Config.GetSystem().TempFile("artifact")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(tempArtifact.Name())
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
|
||||
imageName := fmt.Sprintf("%s:%s", uri, a.CompileSpec.GetPackage().ImageID())
|
||||
c.context.Info("Downloading image", imageName)
|
||||
|
||||
// imageName := fmt.Sprintf("%s/%s", uri, artifact.GetCompileSpec().GetPackage().GetPackageImageName())
|
||||
info, err := docker.DownloadAndExtractDockerImage(c.context, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
c.context.Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
defer os.RemoveAll(temp)
|
||||
|
||||
tempArtifact, err := c.context.Config.GetSystem().TempFile("artifact")
|
||||
c.context.Info(
|
||||
fmt.Sprintf("Image: %s. Pulled: %s. Size: %s",
|
||||
imageName,
|
||||
info.Target.Digest,
|
||||
units.BytesSize(float64(info.Target.Size)),
|
||||
),
|
||||
)
|
||||
c.context.Debug("\nCompressing result ", filepath.Join(temp), "to", tempArtifact.Name())
|
||||
|
||||
resultingArtifact.Path = tempArtifact.Name() // First set to cache file
|
||||
err = resultingArtifact.Compress(temp, 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(tempArtifact.Name())
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
|
||||
imageName := fmt.Sprintf("%s:%s", uri, a.CompileSpec.GetPackage().ImageID())
|
||||
c.context.Info("Downloading image", imageName)
|
||||
|
||||
// imageName := fmt.Sprintf("%s/%s", uri, artifact.GetCompileSpec().GetPackage().GetPackageImageName())
|
||||
info, err := docker.DownloadAndExtractDockerImage(c.context, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
c.context.Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
c.context.Info(fmt.Sprintf("Pulled: %s", info.Target.Digest))
|
||||
c.context.Info(fmt.Sprintf("Size: %s", units.BytesSize(float64(info.Target.Size))))
|
||||
c.context.Debug("\nCompressing result ", filepath.Join(temp), "to", tempArtifact.Name())
|
||||
|
||||
resultingArtifact.Path = tempArtifact.Name() // First set to cache file
|
||||
err = resultingArtifact.Compress(temp, 1)
|
||||
if err != nil {
|
||||
c.context.Error(fmt.Sprintf("Failed compressing package %s: %s", imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
_, _, err = c.Cache.Put(resultingArtifact)
|
||||
if err != nil {
|
||||
c.context.Error(fmt.Sprintf("Failed storing package %s from cache: %s", imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
fileName, err := c.Cache.Get(resultingArtifact)
|
||||
if err != nil {
|
||||
c.context.Error(fmt.Sprintf("Failed getting package %s from cache: %s", imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
resultingArtifact.Path = fileName // Cache is persistent. tempArtifact is not
|
||||
|
||||
downloaded = true
|
||||
break
|
||||
c.context.Error(fmt.Sprintf("Failed compressing package %s: %s", imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
|
||||
if !downloaded {
|
||||
return nil, errors.Wrap(err, "no image available from repositories")
|
||||
_, _, err = c.Cache.Put(resultingArtifact)
|
||||
if err != nil {
|
||||
c.context.Error(fmt.Sprintf("Failed storing package %s from cache: %s", imageName, err.Error()))
|
||||
continue
|
||||
}
|
||||
downloaded = true
|
||||
|
||||
return c.CacheGet(resultingArtifact)
|
||||
}
|
||||
|
||||
if !downloaded {
|
||||
return nil, errors.Wrap(err, "no image available from repositories")
|
||||
}
|
||||
|
||||
return resultingArtifact, nil
|
||||
@@ -194,3 +182,29 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
|
||||
return file.Name(), err
|
||||
}
|
||||
|
||||
func (c *DockerClient) CacheGet(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
resultingArtifact := a.ShallowCopy()
|
||||
// TODO:
|
||||
// Files are in URI/packagename:version (GetPackageImageName() method)
|
||||
// use downloadAndExtract .. and egenrate an archive to consume. Checksum should be already checked while downloading the image
|
||||
// with the above functions, because Docker images already contain such metadata
|
||||
// - Check how verification is done when calling DownloadArtifact outside, similarly we need to check DownloadFile, and how verification
|
||||
// is done in such cases (see repository.go)
|
||||
|
||||
// We discard checksum, that are checked while during pull and unpack by containerd
|
||||
resultingArtifact.Checksums = artifact.Checksums{}
|
||||
|
||||
// Check if file is already in cache
|
||||
fileName, err := c.Cache.Get(resultingArtifact)
|
||||
// Check if file is already in cache
|
||||
if err == nil {
|
||||
artifactName := path.Base(a.Path)
|
||||
c.context.Debug("Use artifact", artifactName, "from cache.")
|
||||
resultingArtifact = a
|
||||
resultingArtifact.Path = fileName
|
||||
resultingArtifact.Checksums = artifact.Checksums{}
|
||||
}
|
||||
|
||||
return resultingArtifact, err
|
||||
}
|
||||
|
@@ -166,32 +166,33 @@ func (c *HttpClient) DownloadFile(p string) (string, error) {
|
||||
return file.Name(), nil
|
||||
}
|
||||
|
||||
func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
func (c *HttpClient) CacheGet(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
newart := a.ShallowCopy()
|
||||
artifactName := path.Base(a.Path)
|
||||
|
||||
fileName, err := c.Cache.Get(a)
|
||||
|
||||
newart.Path = fileName
|
||||
|
||||
return newart, err
|
||||
}
|
||||
|
||||
func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
artifactName := path.Base(a.Path)
|
||||
|
||||
newart, err := c.CacheGet(a)
|
||||
// Check if file is already in cache
|
||||
if err == nil {
|
||||
newart.Path = fileName
|
||||
c.context.Debug("Use artifact", artifactName, "from cache.")
|
||||
} else {
|
||||
d, err := c.DownloadFile(artifactName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed downloading %s", artifactName)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(d)
|
||||
newart.Path = d
|
||||
c.Cache.Put(newart)
|
||||
|
||||
fileName, err := c.Cache.Get(newart)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed getting file from cache %v", newart)
|
||||
}
|
||||
|
||||
newart.Path = fileName
|
||||
return newart, nil
|
||||
}
|
||||
|
||||
return newart, nil
|
||||
d, err := c.DownloadFile(artifactName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed downloading %s", artifactName)
|
||||
}
|
||||
|
||||
defer os.RemoveAll(d)
|
||||
newart.Path = d
|
||||
c.Cache.Put(newart)
|
||||
|
||||
return c.CacheGet(newart)
|
||||
}
|
||||
|
@@ -44,32 +44,32 @@ func (c *LocalClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.P
|
||||
var err error
|
||||
|
||||
artifactName := path.Base(a.Path)
|
||||
newart := a.ShallowCopy()
|
||||
|
||||
fileName, err := c.Cache.Get(a)
|
||||
newart, err := c.CacheGet(a)
|
||||
// Check if file is already in cache
|
||||
if err == nil {
|
||||
newart.Path = fileName
|
||||
c.context.Debug("Use artifact", artifactName, "from cache.")
|
||||
} else {
|
||||
d, err := c.DownloadFile(artifactName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed downloading %s", artifactName)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
newart.Path = d
|
||||
c.Cache.Put(newart)
|
||||
|
||||
fileName, err := c.Cache.Get(newart)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed getting file from cache %v", newart)
|
||||
}
|
||||
|
||||
newart.Path = fileName
|
||||
return newart, nil
|
||||
}
|
||||
|
||||
return newart, nil
|
||||
d, err := c.DownloadFile(artifactName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed downloading %s", artifactName)
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
newart.Path = d
|
||||
c.Cache.Put(newart)
|
||||
|
||||
return c.CacheGet(newart)
|
||||
}
|
||||
|
||||
func (c *LocalClient) CacheGet(a *artifact.PackageArtifact) (*artifact.PackageArtifact, error) {
|
||||
newart := a.ShallowCopy()
|
||||
fileName, err := c.Cache.Get(a)
|
||||
|
||||
newart.Path = fileName
|
||||
|
||||
return newart, err
|
||||
}
|
||||
|
||||
func (c *LocalClient) DownloadFile(name string) (string, error) {
|
||||
|
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/mudler/luet/pkg/api/core/bus"
|
||||
"github.com/mudler/luet/pkg/api/core/types"
|
||||
artifact "github.com/mudler/luet/pkg/api/core/types/artifact"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
"github.com/mudler/luet/pkg/helpers/match"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -53,6 +52,7 @@ type LuetInstallerOptions struct {
|
||||
DownloadOnly bool
|
||||
Relaxed bool
|
||||
PackageRepositories types.LuetRepositories
|
||||
AutoOSCheck bool
|
||||
|
||||
Context *types.Context
|
||||
}
|
||||
@@ -267,13 +267,30 @@ func (l *LuetInstaller) swap(o Option, syncedRepos Repositories, toRemove pkg.Pa
|
||||
return nil
|
||||
}
|
||||
|
||||
ops := l.getOpsWithOptions(toRemove, match, Option{
|
||||
ops, err := l.getOpsWithOptions(toRemove, match, Option{
|
||||
Force: o.Force,
|
||||
NoDeps: false,
|
||||
OnlyDeps: o.OnlyDeps,
|
||||
RunFinalizers: false,
|
||||
CheckFileConflicts: false,
|
||||
}, o, syncedRepos, packages, assertions, allRepos)
|
||||
}, o, syncedRepos, packages, assertions, allRepos, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing installer options")
|
||||
}
|
||||
|
||||
l.Options.Context.Info("Computed operations")
|
||||
|
||||
for _, o := range ops {
|
||||
toUninstall := ""
|
||||
toInstall := ""
|
||||
for _, u := range o.Uninstall {
|
||||
toUninstall += fmt.Sprintf(" %s", u.Package.HumanReadableString())
|
||||
}
|
||||
for _, u := range o.Install {
|
||||
toInstall += fmt.Sprintf(" %s", u.Package.HumanReadableString())
|
||||
}
|
||||
l.Options.Context.Info(fmt.Sprintf("%s -> %s", toUninstall, toInstall))
|
||||
}
|
||||
|
||||
err = l.runOps(ops, s)
|
||||
if err != nil {
|
||||
@@ -317,8 +334,8 @@ type installOperation struct {
|
||||
// installerOp is the operation that is sent to the
|
||||
// upgradeWorker's channel (todo)
|
||||
type installerOp struct {
|
||||
Uninstall operation
|
||||
Install installOperation
|
||||
Uninstall []operation
|
||||
Install []installOperation
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) runOps(ops []installerOp, s *System) error {
|
||||
@@ -349,13 +366,12 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
|
||||
|
||||
for p := range c {
|
||||
|
||||
if p.Uninstall.Package != nil {
|
||||
for _, pp := range p.Uninstall {
|
||||
l.Options.Context.Debug("Replacing package inplace")
|
||||
toUninstall, uninstall, err := l.generateUninstallFn(p.Uninstall.Option, s, p.Uninstall.Package)
|
||||
toUninstall, uninstall, err := l.generateUninstallFn(pp.Option, s, pp.Package)
|
||||
if err != nil {
|
||||
l.Options.Context.Error("Failed to generate Uninstall function for" + err.Error())
|
||||
l.Options.Context.Debug("Skipping uninstall, fail to generate uninstall function, error: " + err.Error())
|
||||
continue
|
||||
//return errors.Wrap(err, "while computing uninstall")
|
||||
}
|
||||
systemLock.Lock()
|
||||
err = uninstall()
|
||||
@@ -364,22 +380,21 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
|
||||
if err != nil {
|
||||
l.Options.Context.Error("Failed uninstall for ", packsToList(toUninstall))
|
||||
continue
|
||||
//return errors.Wrap(err, "uninstalling "+packsToList(toUninstall))
|
||||
}
|
||||
}
|
||||
if p.Install.Package != nil {
|
||||
artMatch := p.Install.Matches[p.Install.Package.GetFingerPrint()]
|
||||
ass := p.Install.Assertions.Search(p.Install.Package.GetFingerPrint())
|
||||
packageToInstall, _ := p.Install.Packages.Find(p.Install.Package.GetPackageName())
|
||||
for _, pp := range p.Install {
|
||||
artMatch := pp.Matches[pp.Package.GetFingerPrint()]
|
||||
ass := pp.Assertions.Search(pp.Package.GetFingerPrint())
|
||||
packageToInstall, _ := pp.Packages.Find(pp.Package.GetPackageName())
|
||||
|
||||
systemLock.Lock()
|
||||
err := l.install(
|
||||
p.Install.Option,
|
||||
p.Install.Reposiories,
|
||||
map[string]ArtifactMatch{p.Install.Package.GetFingerPrint(): artMatch},
|
||||
pp.Option,
|
||||
pp.Reposiories,
|
||||
map[string]ArtifactMatch{pp.Package.GetFingerPrint(): artMatch},
|
||||
pkg.Packages{packageToInstall},
|
||||
solver.PackagesAssertions{*ass},
|
||||
p.Install.Database,
|
||||
pp.Database,
|
||||
s,
|
||||
)
|
||||
systemLock.Unlock()
|
||||
@@ -395,35 +410,140 @@ func (l *LuetInstaller) installerOpWorker(i int, wg *sync.WaitGroup, systemLock
|
||||
// checks wheter we can uninstall and install in place and compose installer worker ops
|
||||
func (l *LuetInstaller) getOpsWithOptions(
|
||||
toUninstall pkg.Packages, installMatch map[string]ArtifactMatch, installOpt, uninstallOpt Option,
|
||||
syncedRepos Repositories, toInstall pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase) []installerOp {
|
||||
syncedRepos Repositories, toInstall pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) ([]installerOp, error) {
|
||||
|
||||
l.Options.Context.Debug("Computing installation order")
|
||||
resOps := []installerOp{}
|
||||
|
||||
insertPackage := func(install []pkg.Package, uninstall ...pkg.Package) {
|
||||
if len(install) == 0 && len(uninstall) == 0 {
|
||||
return
|
||||
}
|
||||
uOpts := []operation{}
|
||||
for _, u := range uninstall {
|
||||
uOpts = append(uOpts, operation{Package: u, Option: uninstallOpt})
|
||||
}
|
||||
iOpts := []installOperation{}
|
||||
for _, u := range install {
|
||||
iOpts = append(iOpts, installOperation{
|
||||
operation: operation{
|
||||
Package: u,
|
||||
Option: installOpt,
|
||||
},
|
||||
Matches: installMatch,
|
||||
Packages: toInstall,
|
||||
Reposiories: syncedRepos,
|
||||
Assertions: solution,
|
||||
Database: allRepos,
|
||||
})
|
||||
}
|
||||
resOps = append(resOps, installerOp{
|
||||
Uninstall: uOpts,
|
||||
Install: iOpts,
|
||||
})
|
||||
}
|
||||
|
||||
removals := make(map[string]interface{})
|
||||
additions := make(map[string]interface{})
|
||||
|
||||
remove := func(p pkg.Package) {
|
||||
removals[p.GetPackageName()] = nil
|
||||
}
|
||||
|
||||
add := func(p pkg.Package) {
|
||||
additions[p.GetPackageName()] = nil
|
||||
}
|
||||
|
||||
added := func(p pkg.Package) bool {
|
||||
_, exists := additions[p.GetPackageName()]
|
||||
return exists
|
||||
}
|
||||
|
||||
removed := func(p pkg.Package) bool {
|
||||
_, exists := removals[p.GetPackageName()]
|
||||
return exists
|
||||
}
|
||||
|
||||
findToBeInstalled := func(p pkg.Package) pkg.Package {
|
||||
for _, m := range installMatch {
|
||||
if m.Package.GetPackageName() == p.GetPackageName() {
|
||||
return m.Package
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
fileIndex := s.FileIndex()
|
||||
|
||||
for _, match := range installMatch {
|
||||
if pack, err := toUninstall.Find(match.Package.GetPackageName()); err == nil {
|
||||
resOps = append(resOps, installerOp{
|
||||
Uninstall: operation{Package: pack, Option: uninstallOpt},
|
||||
Install: installOperation{
|
||||
operation: operation{
|
||||
Package: match.Package,
|
||||
Option: installOpt,
|
||||
},
|
||||
Matches: installMatch,
|
||||
Packages: toInstall,
|
||||
Reposiories: syncedRepos,
|
||||
Assertions: solution,
|
||||
Database: allRepos,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
resOps = append(resOps, installerOp{
|
||||
Install: installOperation{
|
||||
operation: operation{Package: match.Package, Option: installOpt},
|
||||
Matches: installMatch,
|
||||
Reposiories: syncedRepos,
|
||||
Packages: toInstall,
|
||||
Assertions: solution,
|
||||
Database: allRepos,
|
||||
},
|
||||
})
|
||||
a, err := l.getPackage(match, l.Options.Context)
|
||||
if err != nil && !l.Options.Force {
|
||||
return nil, errors.Wrap(err, "Failed downloading package")
|
||||
}
|
||||
files, err := a.FileList()
|
||||
if err != nil && !l.Options.Force {
|
||||
return nil, errors.Wrapf(err, "Could not get filelist for %s", a.CompileSpec.Package.HumanReadableString())
|
||||
}
|
||||
|
||||
l.Options.Context.Debug(match.Package.HumanReadableString(), "files", len(files))
|
||||
|
||||
foundPackages := make(map[string]pkg.Package)
|
||||
FILES:
|
||||
for _, f := range files {
|
||||
if p, exists := fileIndex[f]; exists {
|
||||
if _, exists := foundPackages[p.HumanReadableString()]; exists {
|
||||
continue FILES
|
||||
}
|
||||
|
||||
_, err := toUninstall.Find(p.GetPackageName())
|
||||
if err == nil {
|
||||
l.Options.Context.Debug(p.HumanReadableString(), "files", f, "matches")
|
||||
|
||||
// Packages that is being installed have a file that
|
||||
// is going to be removed by another package
|
||||
foundPackages[p.HumanReadableString()] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
l.Options.Context.Debug(match.Package.HumanReadableString(), "found", len(foundPackages), "packages")
|
||||
if len(foundPackages) > 0 {
|
||||
if pack, err := toUninstall.Find(match.Package.GetPackageName()); err == nil {
|
||||
foundPackages[pack.HumanReadableString()] = pack
|
||||
}
|
||||
|
||||
toInstall := []pkg.Package{}
|
||||
if !added(match.Package) {
|
||||
toInstall = append(toInstall, match.Package)
|
||||
add(match.Package)
|
||||
}
|
||||
toRemove := []pkg.Package{}
|
||||
for _, p := range foundPackages {
|
||||
if !removed(p) {
|
||||
toRemove = append(toRemove, p)
|
||||
if pp := findToBeInstalled(p); pp != nil && !added(pp) {
|
||||
toInstall = append(toInstall, pp)
|
||||
add(pp)
|
||||
}
|
||||
remove(p)
|
||||
}
|
||||
}
|
||||
// toInstall needs to have:
|
||||
// equivalent of what was found in foundPackages AND
|
||||
// was not already added to installation
|
||||
insertPackage(toInstall, toRemove...)
|
||||
} else if pack, err := toUninstall.Find(match.Package.GetPackageName()); err == nil {
|
||||
if !removed(pack) {
|
||||
toInstall := []pkg.Package{}
|
||||
if !added(match.Package) {
|
||||
toInstall = append(toInstall, match.Package)
|
||||
add(match.Package)
|
||||
}
|
||||
insertPackage(toInstall, pack)
|
||||
remove(pack)
|
||||
}
|
||||
} else if !added(match.Package) {
|
||||
insertPackage([]pkg.Package{match.Package})
|
||||
add(match.Package)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -434,24 +554,24 @@ func (l *LuetInstaller) getOpsWithOptions(
|
||||
if match.Package.GetPackageName() == p.GetPackageName() {
|
||||
found = true
|
||||
}
|
||||
|
||||
}
|
||||
if !found {
|
||||
resOps = append(resOps, installerOp{
|
||||
Uninstall: operation{Package: p, Option: uninstallOpt},
|
||||
})
|
||||
if _, ok := removals[p.GetPackageName()]; !ok {
|
||||
resOps = append(resOps, installerOp{
|
||||
Uninstall: []operation{{Package: p, Option: uninstallOpt}},
|
||||
})
|
||||
removals[p.GetPackageName()] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return resOps
|
||||
return resOps, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) checkAndUpgrade(r Repositories, s *System) error {
|
||||
// Spinner(32)
|
||||
uninstall, toInstall, err := l.computeUpgrade(r, s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed computing upgrade")
|
||||
}
|
||||
// SpinnerStop()
|
||||
|
||||
if len(toInstall) == 0 && len(uninstall) == 0 {
|
||||
l.Options.Context.Info("Nothing to upgrade")
|
||||
@@ -487,7 +607,34 @@ func (l *LuetInstaller) checkAndUpgrade(r Repositories, s *System) error {
|
||||
}
|
||||
}
|
||||
|
||||
return l.swap(o, r, uninstall, toInstall, s)
|
||||
bus.Manager.Publish(bus.EventPreUpgrade, struct{ Uninstall, Install pkg.Packages }{Uninstall: uninstall, Install: toInstall})
|
||||
|
||||
err = l.swap(o, r, uninstall, toInstall, s)
|
||||
|
||||
bus.Manager.Publish(bus.EventPostUpgrade, struct {
|
||||
Error error
|
||||
Uninstall, Install pkg.Packages
|
||||
}{Uninstall: uninstall, Install: toInstall, Error: err})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l.Options.AutoOSCheck {
|
||||
l.Options.Context.Info("Performing automatic oscheck")
|
||||
packs := s.OSCheck()
|
||||
if len(packs) > 0 {
|
||||
p := ""
|
||||
for _, r := range packs {
|
||||
p += " " + r.HumanReadableString()
|
||||
}
|
||||
l.Options.Context.Info("Following packages requires reinstallation: " + p)
|
||||
return l.swap(o, r, packs, packs, s)
|
||||
}
|
||||
l.Options.Context.Info("OSCheck done")
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
@@ -566,6 +713,21 @@ func (l *LuetInstaller) Install(cp pkg.Packages, s *System) error {
|
||||
|
||||
func (l *LuetInstaller) download(syncedRepos Repositories, toDownload map[string]ArtifactMatch) error {
|
||||
|
||||
// Don't attempt to download stuff that is already in cache
|
||||
missArtifacts := false
|
||||
for _, m := range toDownload {
|
||||
c := m.Repository.Client(l.Options.Context)
|
||||
_, err := c.CacheGet(m.Artifact)
|
||||
if err != nil {
|
||||
missArtifacts = true
|
||||
}
|
||||
}
|
||||
|
||||
if !missArtifacts {
|
||||
l.Options.Context.Debug("Packages already in cache, skipping download")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Download packages into cache in parallel.
|
||||
all := make(chan ArtifactMatch)
|
||||
|
||||
@@ -595,7 +757,6 @@ func (l *LuetInstaller) download(syncedRepos Repositories, toDownload map[string
|
||||
}
|
||||
close(all)
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -784,9 +945,11 @@ func (l *LuetInstaller) checkFileconflicts(toInstall map[string]ArtifactMatch, c
|
||||
l.Options.Context.Info("Checking for file conflicts..")
|
||||
defer s.Clean() // Release memory
|
||||
|
||||
filesToInstall := []string{}
|
||||
filesToInstall := map[string]interface{}{}
|
||||
for _, m := range toInstall {
|
||||
a, err := l.downloadPackage(m, l.Options.Context)
|
||||
l.Options.Context.Debug("Checking file conflicts for", m.Package.HumanReadableString())
|
||||
|
||||
a, err := l.getPackage(m, l.Options.Context)
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Failed downloading package")
|
||||
}
|
||||
@@ -796,7 +959,7 @@ func (l *LuetInstaller) checkFileconflicts(toInstall map[string]ArtifactMatch, c
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if helpers.Contains(filesToInstall, f) {
|
||||
if _, ok := filesToInstall[f]; ok {
|
||||
return fmt.Errorf(
|
||||
"file conflict between packages to be installed",
|
||||
)
|
||||
@@ -815,9 +978,10 @@ func (l *LuetInstaller) checkFileconflicts(toInstall map[string]ArtifactMatch, c
|
||||
)
|
||||
}
|
||||
}
|
||||
filesToInstall[f] = nil
|
||||
}
|
||||
filesToInstall = append(filesToInstall, files...)
|
||||
}
|
||||
l.Options.Context.Info("Done checking for file conflicts..")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -882,11 +1046,11 @@ func (l *LuetInstaller) install(o Option, syncedRepos Repositories, toInstall ma
|
||||
return s.ExecuteFinalizers(l.Options.Context, toFinalize)
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) downloadPackage(a ArtifactMatch, ctx *types.Context) (*artifact.PackageArtifact, error) {
|
||||
func (l *LuetInstaller) getPackage(a ArtifactMatch, ctx *types.Context) (artifact *artifact.PackageArtifact, err error) {
|
||||
|
||||
cli := a.Repository.Client(ctx)
|
||||
|
||||
artifact, err := cli.DownloadArtifact(a.Artifact)
|
||||
artifact, err = cli.DownloadArtifact(a.Artifact)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error on download artifact")
|
||||
}
|
||||
@@ -900,7 +1064,7 @@ func (l *LuetInstaller) downloadPackage(a ArtifactMatch, ctx *types.Context) (*a
|
||||
|
||||
func (l *LuetInstaller) installPackage(m ArtifactMatch, s *System) error {
|
||||
|
||||
a, err := l.downloadPackage(m, l.Options.Context)
|
||||
a, err := l.getPackage(m, l.Options.Context)
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Failed downloading package")
|
||||
}
|
||||
@@ -925,7 +1089,7 @@ func (l *LuetInstaller) downloadWorker(i int, wg *sync.WaitGroup, pb *pterm.Prog
|
||||
|
||||
for p := range c {
|
||||
// TODO: Keep trace of what was added from the tar, and save it into system
|
||||
_, err := l.downloadPackage(p, ctx)
|
||||
_, err := l.getPackage(p, ctx)
|
||||
if err != nil {
|
||||
l.Options.Context.Error("Failed downloading package "+p.Package.GetName(), err.Error())
|
||||
return errors.Wrap(err, "Failed downloading package "+p.Package.GetName())
|
||||
|
@@ -654,6 +654,7 @@ urls:
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
inst := NewLuetInstaller(LuetInstallerOptions{
|
||||
Concurrency: 1, Context: ctx,
|
||||
Relaxed: true,
|
||||
PackageRepositories: types.LuetRepositories{*repo2.LuetRepository},
|
||||
})
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
@@ -702,6 +703,254 @@ urls:
|
||||
|
||||
})
|
||||
|
||||
It("Compute the correct upgrade order", func() {
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/upgrade_complex")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(4))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec2, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec4, err := c.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec5, err := c.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.2"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err = ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec4.SetOutputPath(tmpdir)
|
||||
spec5.SetOutputPath(tmpdir)
|
||||
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec4, spec5))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_complex")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(ctx, tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
fakeroot, err := ioutil.TempDir("", "fakeroot")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(fakeroot) // clean up
|
||||
|
||||
repo2, err := NewLuetSystemRepositoryFromYaml([]byte(`
|
||||
name: "test"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "`+tmpdir+`"
|
||||
`), pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
inst := NewLuetInstaller(LuetInstallerOptions{
|
||||
Concurrency: 1, Context: ctx,
|
||||
Relaxed: true,
|
||||
PackageRepositories: types.LuetRepositories{*repo2.LuetRepository},
|
||||
})
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
bolt, err := ioutil.TempDir("", "db")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(bolt) // clean up
|
||||
|
||||
systemDB := pkg.NewBoltDatabase(filepath.Join(bolt, "db.db"))
|
||||
system := &System{Database: systemDB, Target: fakeroot}
|
||||
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(system.Database.GetPackages())).To(Equal(1))
|
||||
p, err := system.Database.GetPackage(system.Database.GetPackages()[0])
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p.GetName()).To(Equal("b"))
|
||||
|
||||
files, err := systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(files).To(Equal([]string{"test5", "test6"}))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
files, err = systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"})
|
||||
Expect(files).To(Equal([]string{"test3", "test4"}))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test4"))).To(BeTrue())
|
||||
|
||||
err = inst.Upgrade(system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
|
||||
})
|
||||
|
||||
It("Compute the correct upgrade order with a package replacing multiple ones", func() {
|
||||
tmpdir, err := ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
generalRecipe := tree.NewCompilerRecipe(pkg.NewInMemoryDatabase(false))
|
||||
|
||||
err = generalRecipe.Load("../../tests/fixtures/upgrade_complex_multiple")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(generalRecipe.GetDatabase().GetPackages())).To(Equal(6))
|
||||
|
||||
c := compiler.NewLuetCompiler(backend.NewSimpleDockerBackend(ctx), generalRecipe.GetDatabase(), options.Concurrency(2))
|
||||
|
||||
spec, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec2, err := c.FromPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec3, err := c.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
spec4, err := c.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec5, err := c.FromPackage(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.2"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
spec6, err := c.FromPackage(&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.2"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(spec.GetPackage().GetPath()).ToNot(Equal(""))
|
||||
|
||||
tmpdir, err = ioutil.TempDir("", "tree")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(tmpdir) // clean up
|
||||
|
||||
spec.SetOutputPath(tmpdir)
|
||||
spec2.SetOutputPath(tmpdir)
|
||||
spec4.SetOutputPath(tmpdir)
|
||||
spec5.SetOutputPath(tmpdir)
|
||||
spec3.SetOutputPath(tmpdir)
|
||||
spec6.SetOutputPath(tmpdir)
|
||||
|
||||
_, errs := c.CompileParallel(false, compilerspec.NewLuetCompilationspecs(spec, spec2, spec3, spec4, spec5, spec6))
|
||||
|
||||
Expect(errs).To(BeEmpty())
|
||||
|
||||
repo, err := stubRepo(tmpdir, "../../tests/fixtures/upgrade_complex_multiple")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(repo.GetName()).To(Equal("test"))
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).ToNot(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).ToNot(BeTrue())
|
||||
err = repo.Write(ctx, tmpdir, false, false)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(spec.Rel("repository.yaml"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(TREE_TARBALL + ".gz"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(spec.Rel(REPOSITORY_METAFILE + ".tar"))).To(BeTrue())
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
fakeroot, err := ioutil.TempDir("", "fakeroot")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(fakeroot) // clean up
|
||||
|
||||
repo2, err := NewLuetSystemRepositoryFromYaml([]byte(`
|
||||
name: "test"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "`+tmpdir+`"
|
||||
`), pkg.NewInMemoryDatabase(false))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
inst := NewLuetInstaller(LuetInstallerOptions{
|
||||
Concurrency: 1, Context: ctx,
|
||||
Relaxed: true,
|
||||
PackageRepositories: types.LuetRepositories{*repo2.LuetRepository},
|
||||
})
|
||||
Expect(repo.GetUrls()[0]).To(Equal(tmpdir))
|
||||
Expect(repo.GetType()).To(Equal("disk"))
|
||||
|
||||
bolt, err := ioutil.TempDir("", "db")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer os.RemoveAll(bolt) // clean up
|
||||
|
||||
systemDB := pkg.NewBoltDatabase(filepath.Join(bolt, "db.db"))
|
||||
system := &System{Database: systemDB, Target: fakeroot}
|
||||
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
_, err = systemDB.FindPackage(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(system.Database.GetPackages())).To(Equal(1))
|
||||
p, err := system.Database.GetPackage(system.Database.GetPackages()[0])
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p.GetName()).To(Equal("b"))
|
||||
|
||||
files, err := systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"})
|
||||
Expect(files).To(Equal([]string{"test5", "test6"}))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "c", Category: "test", Version: "1.1"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test1"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test2"))).To(BeTrue())
|
||||
|
||||
err = inst.Install([]pkg.Package{&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}}, system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
files, err = systemDB.GetPackageFiles(&pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"})
|
||||
Expect(files).To(Equal([]string{"test3", "test4"}))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test4"))).To(BeTrue())
|
||||
|
||||
err = inst.Upgrade(system)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test1"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test2"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test3"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test4"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test5"))).To(BeTrue())
|
||||
Expect(fileHelper.Exists(filepath.Join(fakeroot, "test6"))).To(BeTrue())
|
||||
|
||||
})
|
||||
|
||||
It("Handles package drops", func() {
|
||||
//repo:=NewLuetSystemRepository()
|
||||
|
||||
|
@@ -23,6 +23,7 @@ import (
|
||||
type Client interface {
|
||||
DownloadArtifact(*artifact.PackageArtifact) (*artifact.PackageArtifact, error)
|
||||
DownloadFile(string) (string, error)
|
||||
CacheGet(*artifact.PackageArtifact) (*artifact.PackageArtifact, error)
|
||||
}
|
||||
|
||||
type Repositories []*LuetSystemRepository
|
||||
|
@@ -74,7 +74,7 @@ type LuetSystemRepository struct {
|
||||
PushImages bool `json:"-"`
|
||||
ForcePush bool `json:"-"`
|
||||
|
||||
imagePrefix string
|
||||
imagePrefix, snapshotID string
|
||||
}
|
||||
|
||||
type LuetSystemRepositoryMetadata struct {
|
||||
@@ -379,7 +379,7 @@ func (r *LuetSystemRepository) SetPriority(n int) {
|
||||
}
|
||||
|
||||
func (r *LuetSystemRepository) initialize(ctx *types.Context, src string) error {
|
||||
generator, err := r.getGenerator(ctx)
|
||||
generator, err := r.getGenerator(ctx, r.snapshotID)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while constructing repository generator")
|
||||
}
|
||||
@@ -430,6 +430,11 @@ func (r *LuetSystemRepository) SetType(p string) {
|
||||
r.LuetRepository.Type = p
|
||||
}
|
||||
|
||||
// Sets snapshot ID
|
||||
func (r *LuetSystemRepository) SetSnapshotID(i string) {
|
||||
r.snapshotID = i
|
||||
}
|
||||
|
||||
func (r *LuetSystemRepository) GetVerify() bool {
|
||||
return r.LuetRepository.Verify
|
||||
}
|
||||
@@ -705,11 +710,15 @@ type RepositoryGenerator interface {
|
||||
Initialize(string, pkg.PackageDatabase) ([]*artifact.PackageArtifact, error)
|
||||
}
|
||||
|
||||
func (r *LuetSystemRepository) getGenerator(ctx *types.Context) (RepositoryGenerator, error) {
|
||||
func (r *LuetSystemRepository) getGenerator(ctx *types.Context, snapshotID string) (RepositoryGenerator, error) {
|
||||
if snapshotID == "" {
|
||||
snapshotID = time.Now().Format("20060102150405")
|
||||
}
|
||||
|
||||
var rg RepositoryGenerator
|
||||
switch r.GetType() {
|
||||
case DiskRepositoryType, HttpRepositoryType:
|
||||
rg = &localRepositoryGenerator{context: ctx}
|
||||
rg = &localRepositoryGenerator{context: ctx, snapshotID: snapshotID}
|
||||
case DockerRepositoryType:
|
||||
rg = &dockerRepositoryGenerator{
|
||||
b: r.Backend,
|
||||
@@ -717,6 +726,7 @@ func (r *LuetSystemRepository) getGenerator(ctx *types.Context) (RepositoryGener
|
||||
imagePush: r.PushImages,
|
||||
force: r.ForcePush,
|
||||
context: ctx,
|
||||
snapshotID: snapshotID,
|
||||
}
|
||||
default:
|
||||
return nil, errors.New("invalid repository type")
|
||||
@@ -726,7 +736,7 @@ func (r *LuetSystemRepository) getGenerator(ctx *types.Context) (RepositoryGener
|
||||
|
||||
// Write writes the repository metadata to the supplied destination
|
||||
func (r *LuetSystemRepository) Write(ctx *types.Context, dst string, resetRevision, force bool) error {
|
||||
rg, err := r.getGenerator(ctx)
|
||||
rg, err := r.getGenerator(ctx, r.snapshotID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -846,7 +856,20 @@ func (r *LuetSystemRepository) Sync(ctx *types.Context, force bool) (*LuetSystem
|
||||
var repoUpdated bool = false
|
||||
var treefs, metafs string
|
||||
|
||||
repobasedir := ctx.Config.GetSystem().GetRepoDatabaseDirPath(r.GetName())
|
||||
|
||||
toTimeSync := false
|
||||
dat, err := ioutil.ReadFile(filepath.Join(repobasedir, "SYNCTIME"))
|
||||
if err == nil {
|
||||
parsed, _ := time.Parse(time.RFC3339, string(dat))
|
||||
if time.Now().After(parsed.Add(24 * time.Hour)) {
|
||||
toTimeSync = true
|
||||
ctx.Debug(r.Name, "is old, refresh is suggested")
|
||||
}
|
||||
}
|
||||
|
||||
ctx.Debug("Sync of the repository", r.Name, "in progress...")
|
||||
|
||||
c := r.Client(ctx)
|
||||
if c == nil {
|
||||
return nil, errors.New("no client could be generated from repository")
|
||||
@@ -854,20 +877,29 @@ func (r *LuetSystemRepository) Sync(ctx *types.Context, force bool) (*LuetSystem
|
||||
|
||||
repositoryReferenceID := r.referenceID()
|
||||
|
||||
// Retrieve remote repository.yaml for retrieve revision and date
|
||||
file, err := c.DownloadFile(repositoryReferenceID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "while downloading "+repositoryReferenceID)
|
||||
}
|
||||
var downloadedRepoMeta *LuetSystemRepository
|
||||
var file string
|
||||
repoFile := filepath.Join(repobasedir, repositoryReferenceID)
|
||||
|
||||
repobasedir := ctx.Config.GetSystem().GetRepoDatabaseDirPath(r.GetName())
|
||||
downloadedRepoMeta, err := r.ReadSpecFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
_, repoExistsErr := os.Stat(repoFile)
|
||||
if toTimeSync || force || os.IsNotExist(repoExistsErr) {
|
||||
// Retrieve remote repository.yaml for retrieve revision and date
|
||||
file, err = c.DownloadFile(repositoryReferenceID)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "while downloading "+repositoryReferenceID)
|
||||
}
|
||||
downloadedRepoMeta, err = r.ReadSpecFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(file)
|
||||
} else {
|
||||
downloadedRepoMeta, err = r.ReadSpecFile(repoFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repoUpdated = true
|
||||
}
|
||||
// Remove temporary file that contains repository.yaml
|
||||
// Example: /tmp/HttpClient236052003
|
||||
defer os.RemoveAll(file)
|
||||
|
||||
if r.Cached {
|
||||
if !force {
|
||||
@@ -958,8 +990,8 @@ func (r *LuetSystemRepository) Sync(ctx *types.Context, force bool) (*LuetSystem
|
||||
downloadedRepoMeta.GetRevision(),
|
||||
time.Unix(tsec, 0).String()))
|
||||
|
||||
} else {
|
||||
ctx.Info("Repository", downloadedRepoMeta.GetName(), "is already up to date.")
|
||||
now := time.Now().Format(time.RFC3339)
|
||||
ioutil.WriteFile(filepath.Join(repobasedir, "SYNCTIME"), []byte(now), os.ModePerm)
|
||||
}
|
||||
|
||||
meta, err := NewLuetSystemRepositoryMetadata(
|
||||
@@ -983,11 +1015,14 @@ func (r *LuetSystemRepository) Sync(ctx *types.Context, force bool) (*LuetSystem
|
||||
// e.g. locally we can override the type (disk), or priority
|
||||
// while remotely it could be advertized differently
|
||||
r.fill(downloadedRepoMeta)
|
||||
ctx.Info(
|
||||
fmt.Sprintf(":information_source: Repository: %s Priority: %d Type: %s",
|
||||
downloadedRepoMeta.GetName(),
|
||||
downloadedRepoMeta.GetPriority(),
|
||||
downloadedRepoMeta.GetType()))
|
||||
|
||||
if !repoUpdated {
|
||||
ctx.Info(
|
||||
fmt.Sprintf(":information_source: Repository: %s Priority: %d Type: %s",
|
||||
downloadedRepoMeta.GetName(),
|
||||
downloadedRepoMeta.GetPriority(),
|
||||
downloadedRepoMeta.GetType()))
|
||||
}
|
||||
return downloadedRepoMeta, nil
|
||||
}
|
||||
|
||||
|
@@ -37,10 +37,10 @@ import (
|
||||
)
|
||||
|
||||
type dockerRepositoryGenerator struct {
|
||||
b compiler.CompilerBackend
|
||||
imagePrefix string
|
||||
imagePush, force bool
|
||||
context *types.Context
|
||||
b compiler.CompilerBackend
|
||||
imagePrefix, snapshotID string
|
||||
imagePush, force bool
|
||||
context *types.Context
|
||||
}
|
||||
|
||||
func (l *dockerRepositoryGenerator) Initialize(path string, db pkg.PackageDatabase) ([]*artifact.PackageArtifact, error) {
|
||||
@@ -270,8 +270,7 @@ func (d *dockerRepositoryGenerator) Generate(r *LuetSystemRepository, imagePrefi
|
||||
// Create a named snapshot and push it.
|
||||
// It edits the metadata pointing at the repository files associated with the snapshot
|
||||
// And copies the new files
|
||||
id := time.Now().Format("20060102150405")
|
||||
artifacts, snapshotRepoFile, err := r.Snapshot(id, repoTemp)
|
||||
artifacts, snapshotRepoFile, err := r.Snapshot(d.snapshotID, repoTemp)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "while creating snapshot")
|
||||
}
|
||||
|
@@ -32,7 +32,10 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type localRepositoryGenerator struct{ context *types.Context }
|
||||
type localRepositoryGenerator struct {
|
||||
context *types.Context
|
||||
snapshotID string
|
||||
}
|
||||
|
||||
func (l *localRepositoryGenerator) Initialize(path string, db pkg.PackageDatabase) ([]*artifact.PackageArtifact, error) {
|
||||
return buildPackageIndex(l.context, path, db)
|
||||
@@ -124,7 +127,7 @@ func (g *localRepositoryGenerator) Generate(r *LuetSystemRepository, dst string,
|
||||
// Create named snapshot.
|
||||
// It edits the metadata pointing at the repository files associated with the snapshot
|
||||
// And copies the new files
|
||||
if _, _, err := r.Snapshot(time.Now().Format("20060102150405"), dst); err != nil {
|
||||
if _, _, err := r.Snapshot(g.snapshotID, dst); err != nil {
|
||||
return errors.Wrap(err, "while creating snapshot")
|
||||
}
|
||||
|
||||
|
@@ -14,9 +14,10 @@ import (
|
||||
)
|
||||
|
||||
type System struct {
|
||||
Database pkg.PackageDatabase
|
||||
Target string
|
||||
fileIndex map[string]pkg.Package
|
||||
Database pkg.PackageDatabase
|
||||
Target string
|
||||
fileIndex map[string]pkg.Package
|
||||
fileIndexPackages map[string]pkg.Package
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
@@ -30,7 +31,9 @@ func (s *System) OSCheck() (notFound pkg.Packages) {
|
||||
defer s.Unlock()
|
||||
for f, p := range s.fileIndex {
|
||||
if _, err := os.Lstat(filepath.Join(s.Target, f)); err != nil {
|
||||
notFound = append(notFound, p)
|
||||
if _, err := s.Database.FindPackage(p); err == nil {
|
||||
notFound = append(notFound, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
notFound = notFound.Unique()
|
||||
@@ -74,14 +77,24 @@ func (s *System) buildFileIndex() {
|
||||
// XXX: Replace with cache
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
// Check if cache is empty or if it got modified
|
||||
if s.fileIndex == nil { //|| len(s.Database.GetPackages()) != len(s.fileIndex) {
|
||||
|
||||
if s.fileIndex == nil {
|
||||
s.fileIndex = make(map[string]pkg.Package)
|
||||
}
|
||||
|
||||
if s.fileIndexPackages == nil {
|
||||
s.fileIndexPackages = make(map[string]pkg.Package)
|
||||
}
|
||||
|
||||
// Check if cache is empty or if it got modified
|
||||
if len(s.Database.GetPackages()) != len(s.fileIndexPackages) {
|
||||
s.fileIndexPackages = make(map[string]pkg.Package)
|
||||
for _, p := range s.Database.World() {
|
||||
files, _ := s.Database.GetPackageFiles(p)
|
||||
for _, f := range files {
|
||||
s.fileIndex[f] = p
|
||||
}
|
||||
s.fileIndexPackages[p.GetPackageName()] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -92,6 +105,13 @@ func (s *System) Clean() {
|
||||
s.fileIndex = nil
|
||||
}
|
||||
|
||||
func (s *System) FileIndex() map[string]pkg.Package {
|
||||
s.buildFileIndex()
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.fileIndex
|
||||
}
|
||||
|
||||
func (s *System) ExistsPackageFile(file string) (bool, pkg.Package, error) {
|
||||
s.buildFileIndex()
|
||||
s.Lock()
|
||||
|
4
tests/fixtures/upgrade_complex/cat/a/a-1.2/build.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex/cat/a/a-1.2/build.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
4
tests/fixtures/upgrade_complex/cat/a/a-1.2/definition.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex/cat/a/a-1.2/definition.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.2"
|
||||
|
4
tests/fixtures/upgrade_complex/cat/a/a/build.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex/cat/a/a/build.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
3
tests/fixtures/upgrade_complex/cat/a/a/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_complex/cat/a/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.1"
|
4
tests/fixtures/upgrade_complex/cat/b-1.1/build.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex/cat/b-1.1/build.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
3
tests/fixtures/upgrade_complex/cat/b-1.1/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_complex/cat/b-1.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.1"
|
4
tests/fixtures/upgrade_complex/cat/b/build.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex/cat/b/build.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
3
tests/fixtures/upgrade_complex/cat/b/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_complex/cat/b/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
5
tests/fixtures/upgrade_complex_multiple/cat/a/a-1.2/build.yaml
vendored
Normal file
5
tests/fixtures/upgrade_complex_multiple/cat/a/a-1.2/build.yaml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- echo artifact5 > /test2
|
4
tests/fixtures/upgrade_complex_multiple/cat/a/a-1.2/definition.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex_multiple/cat/a/a-1.2/definition.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.2"
|
||||
|
4
tests/fixtures/upgrade_complex_multiple/cat/a/a/build.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex_multiple/cat/a/a/build.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
3
tests/fixtures/upgrade_complex_multiple/cat/a/a/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_complex_multiple/cat/a/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.1"
|
4
tests/fixtures/upgrade_complex_multiple/cat/b-1.1/build.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex_multiple/cat/b-1.1/build.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
3
tests/fixtures/upgrade_complex_multiple/cat/b-1.1/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_complex_multiple/cat/b-1.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.1"
|
4
tests/fixtures/upgrade_complex_multiple/cat/b/build.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex_multiple/cat/b/build.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
3
tests/fixtures/upgrade_complex_multiple/cat/b/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_complex_multiple/cat/b/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
3
tests/fixtures/upgrade_complex_multiple/cat/c/c-1.2/build.yaml
vendored
Normal file
3
tests/fixtures/upgrade_complex_multiple/cat/c/c-1.2/build.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact5 > /test1
|
4
tests/fixtures/upgrade_complex_multiple/cat/c/c-1.2/definition.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex_multiple/cat/c/c-1.2/definition.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.2"
|
||||
|
4
tests/fixtures/upgrade_complex_multiple/cat/c/c/build.yaml
vendored
Normal file
4
tests/fixtures/upgrade_complex_multiple/cat/c/c/build.yaml
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
image: "alpine"
|
||||
steps:
|
||||
- echo artifact3 > /test1
|
||||
- echo artifact4 > /test2
|
3
tests/fixtures/upgrade_complex_multiple/cat/c/c/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_complex_multiple/cat/c/c/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.1"
|
10
tests/fixtures/upgrade_integration_oscheck/c/build.yaml
vendored
Normal file
10
tests/fixtures/upgrade_integration_oscheck/c/build.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo c > /c
|
||||
- echo c > /cd
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "a"
|
||||
version: ">=1.0"
|
9
tests/fixtures/upgrade_integration_oscheck/c/definition.yaml
vendored
Normal file
9
tests/fixtures/upgrade_integration_oscheck/c/definition.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
category: "test"
|
||||
name: "c"
|
||||
version: "1.0"
|
||||
# Boom?
|
||||
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "a"
|
||||
version: ">=0.1"
|
11
tests/fixtures/upgrade_integration_oscheck/cat/a/a/build.yaml
vendored
Normal file
11
tests/fixtures/upgrade_integration_oscheck/cat/a/a/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact3 > /test3
|
||||
- echo artifact4 > /test4
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
8
tests/fixtures/upgrade_integration_oscheck/cat/a/a/definition.yaml
vendored
Normal file
8
tests/fixtures/upgrade_integration_oscheck/cat/a/a/definition.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.1"
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: ">=0.1"
|
||||
|
11
tests/fixtures/upgrade_integration_oscheck/cat/a/a1.0/build.yaml
vendored
Normal file
11
tests/fixtures/upgrade_integration_oscheck/cat/a/a1.0/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact3 > /testaa
|
||||
- echo artifact4 > /testaa2
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
3
tests/fixtures/upgrade_integration_oscheck/cat/a/a1.0/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_integration_oscheck/cat/a/a1.0/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.0"
|
11
tests/fixtures/upgrade_integration_oscheck/cat/a/alatest/build.yaml
vendored
Normal file
11
tests/fixtures/upgrade_integration_oscheck/cat/a/alatest/build.yaml
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact3 > /testlatest
|
||||
- echo artifact4 > /testlatest2
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
3
tests/fixtures/upgrade_integration_oscheck/cat/a/alatest/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_integration_oscheck/cat/a/alatest/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "a"
|
||||
version: "1.2"
|
9
tests/fixtures/upgrade_integration_oscheck/cat/b-1.1/build.yaml
vendored
Normal file
9
tests/fixtures/upgrade_integration_oscheck/cat/b-1.1/build.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /newc
|
||||
- echo artifact6 > /newnewc
|
||||
- chmod +x generate.sh
|
||||
- ./generate.sh
|
3
tests/fixtures/upgrade_integration_oscheck/cat/b-1.1/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_integration_oscheck/cat/b-1.1/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.1"
|
1
tests/fixtures/upgrade_integration_oscheck/cat/b-1.1/generate.sh
vendored
Normal file
1
tests/fixtures/upgrade_integration_oscheck/cat/b-1.1/generate.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
echo generated > /sonewc
|
9
tests/fixtures/upgrade_integration_oscheck/cat/b/build.yaml
vendored
Normal file
9
tests/fixtures/upgrade_integration_oscheck/cat/b/build.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo artifact5 > /test5
|
||||
- echo artifact6 > /test6
|
||||
- chmod +x generate.sh
|
||||
- ./generate.sh
|
3
tests/fixtures/upgrade_integration_oscheck/cat/b/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_integration_oscheck/cat/b/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "b"
|
||||
version: "1.0"
|
1
tests/fixtures/upgrade_integration_oscheck/cat/b/generate.sh
vendored
Normal file
1
tests/fixtures/upgrade_integration_oscheck/cat/b/generate.sh
vendored
Normal file
@@ -0,0 +1 @@
|
||||
echo generated > /artifact42
|
9
tests/fixtures/upgrade_integration_oscheck/z/build.yaml
vendored
Normal file
9
tests/fixtures/upgrade_integration_oscheck/z/build.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
prelude:
|
||||
- echo foo > /test
|
||||
- echo bar > /test2
|
||||
steps:
|
||||
- echo z > /z
|
||||
requires:
|
||||
- category: "test"
|
||||
name: "a"
|
||||
version: ">=1.0"
|
3
tests/fixtures/upgrade_integration_oscheck/z/definition.yaml
vendored
Normal file
3
tests/fixtures/upgrade_integration_oscheck/z/definition.yaml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
category: "test"
|
||||
name: "z"
|
||||
version: "1.0"
|
@@ -85,17 +85,17 @@ EOF
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test/b@1.0
|
||||
luet install -y --relax --config $tmpdir/luet.yaml test/b@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/test5' ]"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/a@1.0
|
||||
luet install -y --relax --config $tmpdir/luet.yaml test/a@1.0
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
luet install -y --config $tmpdir/luet.yaml test/c@1.0
|
||||
luet install -y --relax --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed C' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
|
131
tests/integration/05_upgrade_oscheck.sh
Executable file
131
tests/integration/05_upgrade_oscheck.sh
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/b@1.0
|
||||
buildst=$?
|
||||
assertTrue 'create package B 1.0' "[ -e '$tmpdir/testbuild/b-test-1.0.package.tar.gz' ]"
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/b@1.1
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package B 1.1' "[ -e '$tmpdir/testbuild/b-test-1.1.package.tar.gz' ]"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/a@1.0
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package A 1.0' "[ -e '$tmpdir/testbuild/a-test-1.0.package.tar.gz' ]"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/a@1.1
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
|
||||
assertTrue 'create package A 1.1' "[ -e '$tmpdir/testbuild/a-test-1.1.package.tar.gz' ]"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/a@1.2
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package A 1.2' "[ -e '$tmpdir/testbuild/a-test-1.2.package.tar.gz' ]"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/z@1.0
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package Z 1.0' "[ -e '$tmpdir/testbuild/z-test-1.0.package.tar.gz' ]"
|
||||
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" --destination $tmpdir/testbuild --compression gzip test/c@1.0
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create package C 1.0' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.gz' ]"
|
||||
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/upgrade_integration_oscheck" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type http
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --relax --config $tmpdir/luet.yaml test/b@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/test5' ]"
|
||||
|
||||
luet install -y --relax --config $tmpdir/luet.yaml test/z@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed Z' "[ -e '$tmpdir/testrootfs/z' ]"
|
||||
|
||||
luet install -y --relax --config $tmpdir/luet.yaml test/a@1.0
|
||||
assertTrue 'package installed A' "[ -e '$tmpdir/testrootfs/testaa' ]"
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
|
||||
luet install -y --relax --config $tmpdir/luet.yaml test/c@1.0
|
||||
installst=$?
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package installed C' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testUpgrade() {
|
||||
rm -rf $tmpdir/testrootfs/z
|
||||
assertTrue 'package Z corrupted' "[ ! -e '$tmpdir/testrootfs/z' ]"
|
||||
|
||||
upgrade=$(luet --config $tmpdir/luet.yaml upgrade --oscheck -y)
|
||||
installst=$?
|
||||
echo "$upgrade"
|
||||
assertEquals 'install test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled B' "[ ! -e '$tmpdir/testrootfs/test5' ]"
|
||||
assertTrue 'package installed B' "[ -e '$tmpdir/testrootfs/newc' ]"
|
||||
assertTrue 'package uninstalled A' "[ ! -e '$tmpdir/testrootfs/testaa' ]"
|
||||
assertTrue 'package Z restored' "[ -e '$tmpdir/testrootfs/z' ]"
|
||||
assertTrue 'package installed new A' "[ -e '$tmpdir/testrootfs/testlatest' ]"
|
||||
assertNotContains 'does not contain test/c@1.0' "$upgrade" 'test/c-1.0'
|
||||
assertNotContains 'does not attempt to download test/c@1.0' "$upgrade" 'test/c-1.0 downloaded'
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
1
vendor/github.com/docker/docker/pkg/archive/README.md
generated
vendored
1
vendor/github.com/docker/docker/pkg/archive/README.md
generated
vendored
@@ -1 +0,0 @@
|
||||
This code provides helper functions for dealing with archive files.
|
1322
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
1322
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
File diff suppressed because it is too large
Load Diff
100
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
100
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
@@ -1,100 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
|
||||
if format == OverlayWhiteoutFormat {
|
||||
if inUserNS {
|
||||
return nil, errors.New("specifying OverlayWhiteoutFormat is not allowed in userns")
|
||||
}
|
||||
return overlayWhiteoutConverter{}, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type overlayWhiteoutConverter struct {
|
||||
}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
|
||||
// convert whiteouts to AUFS format
|
||||
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
|
||||
// we just rename the file and make it normal
|
||||
dir, filename := filepath.Split(hdr.Name)
|
||||
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
|
||||
hdr.Mode = 0600
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
hdr.Size = 0
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
// convert opaque dirs to AUFS format by writing an empty file with the prefix
|
||||
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||
if hdr.Xattrs != nil {
|
||||
delete(hdr.Xattrs, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
// create a header for the whiteout file
|
||||
// it should inherit some properties from the parent, but be a regular file
|
||||
wo = &tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: hdr.Mode & int64(os.ModePerm),
|
||||
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
||||
Size: 0,
|
||||
Uid: hdr.Uid,
|
||||
Uname: hdr.Uname,
|
||||
Gid: hdr.Gid,
|
||||
Gname: hdr.Gname,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
||||
base := filepath.Base(path)
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
||||
if base == WhiteoutOpaqueDir {
|
||||
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir)
|
||||
}
|
||||
// don't write the file itself
|
||||
return false, err
|
||||
}
|
||||
|
||||
// if a file was deleted and we are using overlay, we need to create a character device
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath)
|
||||
}
|
||||
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// don't write the file itself
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/archive/archive_other.go
generated
vendored
@@ -1,7 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
|
||||
return nil, nil
|
||||
}
|
115
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
115
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
@@ -1,115 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return srcPath
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific. On Linux, we
|
||||
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||
// a trailing "." or "/" which may be important.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) string {
|
||||
return p // already unix-style
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
// Currently go does not fill in the major/minors
|
||||
if s.Mode&unix.S_IFBLK != 0 ||
|
||||
s.Mode&unix.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
|
||||
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
inode = s.Ino
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
}
|
||||
return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
}
|
||||
|
||||
err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
||||
if errors.Is(err, syscall.EPERM) && sys.RunningInUserNS() {
|
||||
// In most cases, cannot create a device if running in user namespace
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
67
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
67
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
@@ -1,67 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return longpath.AddPrefix(srcPath)
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return filepath.Join(srcPath, include)
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) string {
|
||||
return filepath.ToSlash(p)
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
// perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
|
||||
permPart := perm & os.ModePerm
|
||||
noPermPart := perm &^ os.ModePerm
|
||||
// Add the x bit: make everything +x from windows
|
||||
permPart |= 0111
|
||||
permPart &= 0755
|
||||
|
||||
return noPermPart | permPart
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
// do nothing. no notion of Rdev, Nlink in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
// do nothing. no notion of Inode in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
|
||||
// no notion of file ownership mapping yet on Windows
|
||||
return idtools.Identity{UID: 0, GID: 0}, nil
|
||||
}
|
445
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
445
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
@@ -1,445 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChangeType represents the change type.
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
// ChangeModify represents the modify operation.
|
||||
ChangeModify = iota
|
||||
// ChangeAdd represents the add operation.
|
||||
ChangeAdd
|
||||
// ChangeDelete represents the delete operation.
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
func (c ChangeType) String() string {
|
||||
switch c {
|
||||
case ChangeModify:
|
||||
return "C"
|
||||
case ChangeAdd:
|
||||
return "A"
|
||||
case ChangeDelete:
|
||||
return "D"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Change represents a change, it wraps the change type and path.
|
||||
// It describes changes of the files in the path respect to the
|
||||
// parent layers. The change could be modify, add, delete.
|
||||
// This is used for layer diff.
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||
}
|
||||
|
||||
// for sort.Sort
|
||||
type changesByPath []Change
|
||||
|
||||
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||
func (c changesByPath) Len() int { return len(c) }
|
||||
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||
|
||||
// Gnu tar doesn't have sub-second mtime precision. The go tar
|
||||
// writer (1.10+) does when using PAX format, but we round times to seconds
|
||||
// to ensure archives have the same hashes for backwards compatibility.
|
||||
// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
|
||||
//
|
||||
// Non-sub-second is problematic when we apply changes via tar
|
||||
// files. We handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a.Equal(b) ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
||||
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||
return a.Sec == b.Sec &&
|
||||
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||
}
|
||||
|
||||
// Changes walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
|
||||
}
|
||||
|
||||
func aufsMetadataSkip(path string) (skip bool, err error) {
|
||||
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
|
||||
if err != nil {
|
||||
skip = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
f := filepath.Base(path)
|
||||
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(f, WhiteoutPrefix) {
|
||||
originalFile := f[len(WhiteoutPrefix):]
|
||||
return filepath.Join(filepath.Dir(path), originalFile), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type skipChange func(string) (bool, error)
|
||||
type deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
|
||||
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
|
||||
var (
|
||||
changes []Change
|
||||
changedDirs = make(map[string]struct{})
|
||||
)
|
||||
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(rw, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if sc != nil {
|
||||
if skip, err := sc(path); skip {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
deletedFile, err := dc(rw, path, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
if deletedFile != "" {
|
||||
change.Path = deletedFile
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
change.Kind = ChangeAdd
|
||||
|
||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||
for _, layer := range layers {
|
||||
stat, err := os.Stat(filepath.Join(layer, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the top layer, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
change.Kind = ChangeModify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||
// This block is here to ensure the change is recorded even if the
|
||||
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||
if f.IsDir() {
|
||||
changedDirs[path] = struct{}{}
|
||||
}
|
||||
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||
parent := filepath.Dir(path)
|
||||
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||
changedDirs[parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// FileInfo describes the information of a file.
|
||||
type FileInfo struct {
|
||||
parent *FileInfo
|
||||
name string
|
||||
stat *system.StatT
|
||||
children map[string]*FileInfo
|
||||
capability []byte
|
||||
added bool
|
||||
}
|
||||
|
||||
// LookUp looks up the file information of a file.
|
||||
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
parent := info
|
||||
if path == string(os.PathSeparator) {
|
||||
return info
|
||||
}
|
||||
|
||||
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||
for _, elem := range pathElements {
|
||||
if elem != "" {
|
||||
child := parent.children[elem]
|
||||
if child == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child
|
||||
}
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func (info *FileInfo) path() string {
|
||||
if info.parent == nil {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
return string(os.PathSeparator)
|
||||
}
|
||||
return filepath.Join(info.parent.path(), info.name)
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
sizeAtEntry := len(*changes)
|
||||
|
||||
if oldInfo == nil {
|
||||
// add
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeAdd,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
info.added = true
|
||||
}
|
||||
|
||||
// We make a copy so we can modify it to detect additions
|
||||
// also, we only recurse on the old dir if the new info is a directory
|
||||
// otherwise any previous delete/change is considered recursive
|
||||
oldChildren := make(map[string]*FileInfo)
|
||||
if oldInfo != nil && info.isDir() {
|
||||
for k, v := range oldInfo.children {
|
||||
oldChildren[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for name, newChild := range info.children {
|
||||
oldChild := oldChildren[name]
|
||||
if oldChild != nil {
|
||||
// change?
|
||||
oldStat := oldChild.stat
|
||||
newStat := newChild.stat
|
||||
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||
// when copying a file into a container. However, that is not generally a problem
|
||||
// because any content change will change mtime, and any status change should
|
||||
// be visible when actually comparing the stat fields. The only time this
|
||||
// breaks down is if some code intentionally hides a change by setting
|
||||
// back mtime
|
||||
if statDifferent(oldStat, newStat) ||
|
||||
!bytes.Equal(oldChild.capability, newChild.capability) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
newChild.added = true
|
||||
}
|
||||
|
||||
// Remove from copy so we can detect deletions
|
||||
delete(oldChildren, name)
|
||||
}
|
||||
|
||||
newChild.addChanges(oldChild, changes)
|
||||
}
|
||||
for _, oldChild := range oldChildren {
|
||||
// delete
|
||||
change := Change{
|
||||
Path: oldChild.path(),
|
||||
Kind: ChangeDelete,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
// If there were changes inside this directory, we need to add it, even if the directory
|
||||
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
// Let's insert the directory entry before the recently added entries located inside this dir
|
||||
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
||||
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||
(*changes)[sizeAtEntry] = change
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Changes add changes to file information.
|
||||
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||
var changes []Change
|
||||
|
||||
info.addChanges(oldInfo, &changes)
|
||||
|
||||
return changes
|
||||
}
|
||||
|
||||
func newRootFileInfo() *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
root := &FileInfo{
|
||||
name: string(os.PathSeparator),
|
||||
children: make(map[string]*FileInfo),
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
)
|
||||
if oldDir == "" {
|
||||
emptyDir, err := ioutil.TempDir("", "empty")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(emptyDir)
|
||||
oldDir = emptyDir
|
||||
}
|
||||
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newRoot.Changes(oldRoot), nil
|
||||
}
|
||||
|
||||
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||
func ChangesSize(newDir string, changes []Change) int64 {
|
||||
var (
|
||||
size int64
|
||||
sf = make(map[uint64]struct{})
|
||||
)
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
file := filepath.Join(newDir, change.Path)
|
||||
fileInfo, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if fileInfo != nil && !fileInfo.IsDir() {
|
||||
if hasHardlinks(fileInfo) {
|
||||
inode := getIno(fileInfo)
|
||||
if _, ok := sf[inode]; !ok {
|
||||
size += fileInfo.Size()
|
||||
sf[inode] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
size += fileInfo.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
||||
sort.Sort(changesByPath(changes))
|
||||
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeDelete {
|
||||
whiteOutDir := filepath.Dir(change.Path)
|
||||
whiteOutBase := filepath.Base(change.Path)
|
||||
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||
timestamp := time.Now()
|
||||
hdr := &tar.Header{
|
||||
Name: whiteOut[1:],
|
||||
Size: 0,
|
||||
ModTime: timestamp,
|
||||
AccessTime: timestamp,
|
||||
ChangeTime: timestamp,
|
||||
}
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
logrus.Debugf("Can't write whiteout header: %s", err)
|
||||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := ta.TarWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close layer: %s", err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
logrus.Debugf("failed close Changes writer: %s", err)
|
||||
}
|
||||
}()
|
||||
return reader, nil
|
||||
}
|
286
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
286
vendor/github.com/docker/docker/pkg/archive/changes_linux.go
generated
vendored
@@ -1,286 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||
// method in general returns the entire contents of two directory trees, we
|
||||
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||
// fact that getdents(2) returns the inode of each file in the directory being
|
||||
// walked, which, when walking two trees in parallel to generate a list of
|
||||
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||
// images.
|
||||
type walker struct {
|
||||
dir1 string
|
||||
dir2 string
|
||||
root1 *FileInfo
|
||||
root2 *FileInfo
|
||||
}
|
||||
|
||||
// collectFileInfoForChanges returns a complete representation of the trees
|
||||
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||
// leaf where the inode and device numbers are an exact match between dir1
|
||||
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||
// to generating a list of changes between the two directories, as it does not
|
||||
// reflect the full contents.
|
||||
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||
w := &walker{
|
||||
dir1: dir1,
|
||||
dir2: dir2,
|
||||
root1: newRootFileInfo(),
|
||||
root2: newRootFileInfo(),
|
||||
}
|
||||
|
||||
i1, err := os.Lstat(w.dir1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
i2, err := os.Lstat(w.dir2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := w.walk("/", i1, i2); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return w.root1, w.root2, nil
|
||||
}
|
||||
|
||||
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||
// being constructed, register this file with the tree.
|
||||
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||
if fi == nil {
|
||||
return nil
|
||||
}
|
||||
parent := root.LookUp(filepath.Dir(path))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
|
||||
}
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(path),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
cpath := filepath.Join(dir, path)
|
||||
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = stat
|
||||
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||
parent.children[info.name] = info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||
// Register these nodes with the return trees, unless we're still at the
|
||||
// (already-created) roots:
|
||||
if path != "/" {
|
||||
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
is1Dir := i1 != nil && i1.IsDir()
|
||||
is2Dir := i2 != nil && i2.IsDir()
|
||||
|
||||
sameDevice := false
|
||||
if i1 != nil && i2 != nil {
|
||||
si1 := i1.Sys().(*syscall.Stat_t)
|
||||
si2 := i2.Sys().(*syscall.Stat_t)
|
||||
if si1.Dev == si2.Dev {
|
||||
sameDevice = true
|
||||
}
|
||||
}
|
||||
|
||||
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||
if !is1Dir && !is2Dir {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch the names of all the files contained in both directories being walked:
|
||||
var names1, names2 []nameIno
|
||||
if is1Dir {
|
||||
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We have lists of the files contained in both parallel directories, sorted
|
||||
// in the same order. Walk them in parallel, generating a unique merged list
|
||||
// of all items present in either or both directories.
|
||||
var names []string
|
||||
ix1 := 0
|
||||
ix2 := 0
|
||||
|
||||
for {
|
||||
if ix1 >= len(names1) {
|
||||
break
|
||||
}
|
||||
if ix2 >= len(names2) {
|
||||
break
|
||||
}
|
||||
|
||||
ni1 := names1[ix1]
|
||||
ni2 := names2[ix2]
|
||||
|
||||
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||
case -1: // ni1 < ni2 -- advance ni1
|
||||
// we will not encounter ni1 in names2
|
||||
names = append(names, ni1.name)
|
||||
ix1++
|
||||
case 0: // ni1 == ni2
|
||||
if ni1.ino != ni2.ino || !sameDevice {
|
||||
names = append(names, ni1.name)
|
||||
}
|
||||
ix1++
|
||||
ix2++
|
||||
case 1: // ni1 > ni2 -- advance ni2
|
||||
// we will not encounter ni2 in names1
|
||||
names = append(names, ni2.name)
|
||||
ix2++
|
||||
}
|
||||
}
|
||||
for ix1 < len(names1) {
|
||||
names = append(names, names1[ix1].name)
|
||||
ix1++
|
||||
}
|
||||
for ix2 < len(names2) {
|
||||
names = append(names, names2[ix2].name)
|
||||
ix2++
|
||||
}
|
||||
|
||||
// For each of the names present in either or both of the directories being
|
||||
// iterated, stat the name under each root, and recurse the pair of them:
|
||||
for _, name := range names {
|
||||
fname := filepath.Join(path, name)
|
||||
var cInfo1, cInfo2 os.FileInfo
|
||||
if is1Dir {
|
||||
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||
type nameIno struct {
|
||||
name string
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type nameInoSlice []nameIno
|
||||
|
||||
func (s nameInoSlice) Len() int { return len(s) }
|
||||
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||
|
||||
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||
// numbers further up the stack when reading directory contents. Unlike
|
||||
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||
// list of {filename,inode} pairs.
|
||||
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||
var (
|
||||
size = 100
|
||||
buf = make([]byte, 4096)
|
||||
nbuf int
|
||||
bufp int
|
||||
nb int
|
||||
)
|
||||
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||
for {
|
||||
// Refill the buffer if necessary
|
||||
if bufp >= nbuf {
|
||||
bufp = 0
|
||||
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||
if nbuf < 0 {
|
||||
nbuf = 0
|
||||
}
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("readdirent", err)
|
||||
}
|
||||
if nbuf <= 0 {
|
||||
break // EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Drain the buffer
|
||||
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||
bufp += nb
|
||||
}
|
||||
|
||||
sl := nameInoSlice(names)
|
||||
sort.Sort(sl)
|
||||
return sl, nil
|
||||
}
|
||||
|
||||
// parseDirent is a minor modification of unix.ParseDirent (linux version)
|
||||
// which returns {name,inode} pairs instead of just names.
|
||||
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||
origlen := len(buf)
|
||||
for len(buf) > 0 {
|
||||
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
buf = buf[dirent.Reclen:]
|
||||
if dirent.Ino == 0 { // File absent in directory.
|
||||
continue
|
||||
}
|
||||
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||
var name = string(bytes[0:clen(bytes[:])])
|
||||
if name == "." || name == ".." { // Useless names
|
||||
continue
|
||||
}
|
||||
names = append(names, nameIno{name, dirent.Ino})
|
||||
}
|
||||
return origlen - len(buf), names
|
||||
}
|
||||
|
||||
func clen(n []byte) int {
|
||||
for i := 0; i < len(n); i++ {
|
||||
if n[i] == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(n)
|
||||
}
|
97
vendor/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
97
vendor/github.com/docker/docker/pkg/archive/changes_other.go
generated
vendored
@@ -1,97 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
err1, err2 error
|
||||
errs = make(chan error, 2)
|
||||
)
|
||||
go func() {
|
||||
oldRoot, err1 = collectFileInfo(oldDir)
|
||||
errs <- err1
|
||||
}()
|
||||
go func() {
|
||||
newRoot, err2 = collectFileInfo(newDir)
|
||||
errs <- err2
|
||||
}()
|
||||
|
||||
// block until both routines have returned
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-errs; err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return oldRoot, newRoot, nil
|
||||
}
|
||||
|
||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||
root := newRootFileInfo()
|
||||
|
||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(sourceDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||
|
||||
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||
// Temporary workaround. If the returned path starts with two backslashes,
|
||||
// trim it down to a single backslash. Only relevant on Windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(relPath, `\\`) {
|
||||
relPath = relPath[1:]
|
||||
}
|
||||
}
|
||||
|
||||
if relPath == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := root.LookUp(filepath.Dir(relPath))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(relPath),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
s, err := system.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = s
|
||||
|
||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||
|
||||
parent.children[info.name] = info
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root, nil
|
||||
}
|
43
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
43
vendor/github.com/docker/docker/pkg/archive/changes_unix.go
generated
vendored
@@ -1,43 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.UID() != newStat.UID() ||
|
||||
oldStat.GID() != newStat.GID() ||
|
||||
oldStat.Rdev() != newStat.Rdev() ||
|
||||
// Don't look at size or modification time for dirs, its not a good
|
||||
// measure of change. See https://github.com/moby/moby/issues/9874
|
||||
// for a description of the issue with modification time, and
|
||||
// https://github.com/moby/moby/pull/11422 for the change.
|
||||
// (Note that in the Windows implementation of this function,
|
||||
// modification time IS taken as a change). See
|
||||
// https://github.com/moby/moby/pull/37982 for more information.
|
||||
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) uint64 {
|
||||
return fi.Sys().(*syscall.Stat_t).Ino
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||
}
|
34
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
34
vendor/github.com/docker/docker/pkg/archive/changes_windows.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
// Note there is slight difference between the Linux and Windows
|
||||
// implementations here. Due to https://github.com/moby/moby/issues/9874,
|
||||
// and the fix at https://github.com/moby/moby/pull/11422, Linux does not
|
||||
// consider a change to the directory time as a change. Windows on NTFS
|
||||
// does. See https://github.com/moby/moby/pull/37982 for more information.
|
||||
|
||||
if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) ||
|
||||
oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode().IsDir()
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) (inode uint64) {
|
||||
return
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return false
|
||||
}
|
480
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
480
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
@@ -1,480 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Errors used or returned by this file.
|
||||
var (
|
||||
ErrNotDirectory = errors.New("not a directory")
|
||||
ErrDirNotExists = errors.New("no such directory")
|
||||
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||
)
|
||||
|
||||
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||
// processing using any utility functions from the path or filepath stdlib
|
||||
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||
// path (from before being processed by utility functions from the path or
|
||||
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||
// path already ends in a `.` path segment, then another is not added. If the
|
||||
// clean path already ends in the separator, then another is not added.
|
||||
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
|
||||
// Ensure paths are in platform semantics
|
||||
cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
|
||||
originalPath = strings.Replace(originalPath, "/", string(sep), -1)
|
||||
|
||||
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) {
|
||||
// Add a separator if it doesn't already end with one (a cleaned
|
||||
// path would only end in a separator if it is the root).
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
cleanedPath += "."
|
||||
}
|
||||
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
|
||||
return cleanedPath
|
||||
}
|
||||
|
||||
// assertsDirectory returns whether the given path is
|
||||
// asserted to be a directory, i.e., the path ends with
|
||||
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||
func assertsDirectory(path string, sep byte) bool {
|
||||
return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
|
||||
}
|
||||
|
||||
// hasTrailingPathSeparator returns whether the given
|
||||
// path ends with the system's path separator character.
|
||||
func hasTrailingPathSeparator(path string, sep byte) bool {
|
||||
return len(path) > 0 && path[len(path)-1] == sep
|
||||
}
|
||||
|
||||
// specifiesCurrentDir returns whether the given path specifies
|
||||
// a "current directory", i.e., the last path segment is `.`.
|
||||
func specifiesCurrentDir(path string) bool {
|
||||
return filepath.Base(path) == "."
|
||||
}
|
||||
|
||||
// SplitPathDirEntry splits the given path between its directory name and its
|
||||
// basename by first cleaning the path but preserves a trailing "." if the
|
||||
// original path specified the current directory.
|
||||
func SplitPathDirEntry(path string) (dir, base string) {
|
||||
cleanedPath := filepath.Clean(filepath.FromSlash(path))
|
||||
|
||||
if specifiesCurrentDir(path) {
|
||||
cleanedPath += string(os.PathSeparator) + "."
|
||||
}
|
||||
|
||||
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||
}
|
||||
|
||||
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||
// asserted to be a directory but exists as another type of file.
|
||||
//
|
||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||
// requires a directory as the source path. TarResource accepts either a
|
||||
// directory or a file path and correctly sets the Tar options.
|
||||
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
|
||||
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||
}
|
||||
|
||||
// TarResourceRebase is like TarResource but renames the first path element of
|
||||
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
|
||||
sourcePath = normalizePath(sourcePath)
|
||||
if _, err = os.Lstat(sourcePath); err != nil {
|
||||
// Catches the case where the source does not exist or is not a
|
||||
// directory if asserted to be a directory, as this also causes an
|
||||
// error.
|
||||
return
|
||||
}
|
||||
|
||||
// Separate the source path between its directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
|
||||
|
||||
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||
return TarWithOptions(sourceDir, opts)
|
||||
}
|
||||
|
||||
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
|
||||
// parameters to be sent to TarWithOptions (the TarOptions struct)
|
||||
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
|
||||
filter := []string{sourceBase}
|
||||
return &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
IncludeSourceDir: true,
|
||||
RebaseNames: map[string]string{
|
||||
sourceBase: rebaseName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CopyInfo holds basic info about the source
|
||||
// or destination path of a copy operation.
|
||||
type CopyInfo struct {
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
RebaseName string
|
||||
}
|
||||
|
||||
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the source of an archive copy
|
||||
// operation. The given path should be an absolute local path. A source path
|
||||
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||
// on Unix). As it is to be a copy source, the path must exist.
|
||||
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
||||
// normalize the file path and then evaluate the symbol link
|
||||
// we will use the target file instead of the symbol link if
|
||||
// followLink is set
|
||||
path = normalizePath(path)
|
||||
|
||||
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
return CopyInfo{
|
||||
Path: resolvedPath,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
RebaseName: rebaseName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the destination of an archive copy
|
||||
// operation. The given path should be an absolute local path.
|
||||
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||
path = normalizePath(path)
|
||||
originalPath := path
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
|
||||
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||
// The path exists and is not a symlink.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// While the path is a symlink.
|
||||
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||
if n > maxSymlinkIter {
|
||||
// Don't follow symlinks more than this arbitrary number of times.
|
||||
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||
}
|
||||
|
||||
// The path is a symbolic link. We need to evaluate it so that the
|
||||
// destination of the copy operation is the link target and not the
|
||||
// link itself. This is notably different than CopyInfoSourcePath which
|
||||
// only evaluates symlinks before the last appearing path separator.
|
||||
// Also note that it is okay if the last path element is a broken
|
||||
// symlink as the copy operation should create the target.
|
||||
var linkTarget string
|
||||
|
||||
linkTarget, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
if !system.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
path = linkTarget
|
||||
stat, err = os.Lstat(path)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// It's okay if the destination path doesn't exist. We can still
|
||||
// continue the copy operation if the parent directory exists.
|
||||
if !os.IsNotExist(err) {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
|
||||
parentDirStat, err := os.Stat(dstParent)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
if !parentDirStat.IsDir() {
|
||||
return CopyInfo{}, ErrNotDirectory
|
||||
}
|
||||
|
||||
return CopyInfo{Path: path}, nil
|
||||
}
|
||||
|
||||
// The path exists after resolving symlinks.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||
// contain the archived resource described by srcInfo, to the destination
|
||||
// described by dstInfo. Returns the possibly modified content archive along
|
||||
// with the path to the destination directory which it should be extracted to.
|
||||
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
|
||||
// Ensure in platform semantics
|
||||
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||
|
||||
// Separate the destination path between its directory and base
|
||||
// components in case the source archive contents need to be rebased.
|
||||
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||
|
||||
switch {
|
||||
case dstInfo.Exists && dstInfo.IsDir:
|
||||
// The destination exists as a directory. No alteration
|
||||
// to srcContent is needed as its contents can be
|
||||
// simply extracted to the destination directory.
|
||||
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||
case dstInfo.Exists && srcInfo.IsDir:
|
||||
// The destination exists as some type of file and the source
|
||||
// content is a directory. This is an error condition since
|
||||
// you cannot copy a directory to an existing file location.
|
||||
return "", nil, ErrCannotCopyDir
|
||||
case dstInfo.Exists:
|
||||
// The destination exists as some type of file and the source content
|
||||
// is also a file. The source content entry will have to be renamed to
|
||||
// have a basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case srcInfo.IsDir:
|
||||
// The destination does not exist and the source content is an archive
|
||||
// of a directory. The archive should be extracted to the parent of
|
||||
// the destination path instead, and when it is, the directory that is
|
||||
// created as a result should take the name of the destination path.
|
||||
// The source content entries will have to be renamed to have a
|
||||
// basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case assertsDirectory(dstInfo.Path, os.PathSeparator):
|
||||
// The destination does not exist and is asserted to be created as a
|
||||
// directory, but the source content is not a directory. This is an
|
||||
// error condition since you cannot create a directory from a file
|
||||
// source.
|
||||
return "", nil, ErrDirNotExists
|
||||
default:
|
||||
// The last remaining case is when the destination does not exist, is
|
||||
// not asserted to be a directory, and the source content is not an
|
||||
// archive of a directory. It this case, the destination file will need
|
||||
// to be created when the archive is extracted and the source content
|
||||
// entry will have to be renamed to have a basename which matches the
|
||||
// destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
|
||||
if oldBase == string(os.PathSeparator) {
|
||||
// If oldBase specifies the root directory, use an empty string as
|
||||
// oldBase instead so that newBase doesn't replace the path separator
|
||||
// that all paths will start with.
|
||||
oldBase = ""
|
||||
}
|
||||
|
||||
rebased, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
srcTar := tar.NewReader(srcContent)
|
||||
rebasedTar := tar.NewWriter(w)
|
||||
|
||||
for {
|
||||
hdr, err := srcTar.Next()
|
||||
if err == io.EOF {
|
||||
// Signals end of archive.
|
||||
rebasedTar.Close()
|
||||
w.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
// srcContent tar stream, as served by TarWithOptions(), is
|
||||
// definitely in PAX format, but tar.Next() mistakenly guesses it
|
||||
// as USTAR, which creates a problem: if the newBase is >100
|
||||
// characters long, WriteHeader() returns an error like
|
||||
// "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
|
||||
//
|
||||
// To fix, set the format to PAX here. See docker/for-linux issue #484.
|
||||
hdr.Format = tar.FormatPAX
|
||||
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
|
||||
}
|
||||
|
||||
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rebased
|
||||
}
|
||||
|
||||
// TODO @gupta-ak. These might have to be changed in the future to be
|
||||
// continuity driver aware as well to support LCOW.
|
||||
|
||||
// CopyResource performs an archive copy from the given source path to the
|
||||
// given destination path. The source path MUST exist and the destination
|
||||
// path's parent directory must exist.
|
||||
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||
var (
|
||||
srcInfo CopyInfo
|
||||
err error
|
||||
)
|
||||
|
||||
// Ensure in platform semantics
|
||||
srcPath = normalizePath(srcPath)
|
||||
dstPath = normalizePath(dstPath)
|
||||
|
||||
// Clean the source and destination paths.
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
|
||||
|
||||
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
return CopyTo(content, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
// CopyTo handles extracting the given content whose
|
||||
// entries should be sourced from srcInfo to dstPath.
|
||||
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
|
||||
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||
// ensure that at least the parent directory exists.
|
||||
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer copyArchive.Close()
|
||||
|
||||
options := &TarOptions{
|
||||
NoLchown: true,
|
||||
NoOverwriteDirNonDir: true,
|
||||
}
|
||||
|
||||
return Untar(copyArchive, dstDir, options)
|
||||
}
|
||||
|
||||
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
||||
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
||||
// link target of any symbol link file, else it will only resolve symlink of directory
|
||||
// but return symbol link file itself without resolving.
|
||||
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
||||
if followLink {
|
||||
resolvedPath, err = filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
||||
} else {
|
||||
dirPath, basePath := filepath.Split(path)
|
||||
|
||||
// if not follow symbol link, then resolve symbol link of parent dir
|
||||
var resolvedDirPath string
|
||||
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
}
|
||||
return resolvedPath, rebaseName, nil
|
||||
}
|
||||
|
||||
// GetRebaseName normalizes and compares path and resolvedPath,
|
||||
// return completed resolved path and rebased file name
|
||||
func GetRebaseName(path, resolvedPath string) (string, string) {
|
||||
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||
// we can manually join it with them
|
||||
var rebaseName string
|
||||
if specifiesCurrentDir(path) &&
|
||||
!specifiesCurrentDir(resolvedPath) {
|
||||
resolvedPath += string(filepath.Separator) + "."
|
||||
}
|
||||
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
|
||||
resolvedPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
// In the case where the path had a trailing separator and a symlink
|
||||
// evaluation has changed the last path component, we will need to
|
||||
// rebase the name in the archive that is being copied to match the
|
||||
// originally requested name.
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
return resolvedPath, rebaseName
|
||||
}
|
11
vendor/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
11
vendor/github.com/docker/docker/pkg/archive/copy_unix.go
generated
vendored
@@ -1,11 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
9
vendor/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
9
vendor/github.com/docker/docker/pkg/archive/copy_windows.go
generated
vendored
@@ -1,9 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
260
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
260
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
@@ -1,260 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
|
||||
tr := tar.NewReader(layer)
|
||||
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
unpackedPaths := make(map[string]struct{})
|
||||
|
||||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
|
||||
aufsTempdir := ""
|
||||
aufsHardlinks := make(map[string]*tar.Header)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
size += hdr.Size
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
// Windows does not support filenames with colons in them. Ignore
|
||||
// these files. This is not a problem though (although it might
|
||||
// appear that it is). Let's suppose a client is running docker pull.
|
||||
// The daemon it points to is Windows. Would it make sense for the
|
||||
// client to be doing a docker pull Ubuntu for example (which has files
|
||||
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||
// not as it would really only make sense that they were pulling a
|
||||
// Windows image. However, for development, it is necessary to be able
|
||||
// to pull Linux images which are in the repository.
|
||||
//
|
||||
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertently.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.Contains(hdr.Name, ":") {
|
||||
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = system.MkdirAll(parentPath, 0600)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||
// We don't want this directory, but we need the files in them so that
|
||||
// such hardlinks can be resolved.
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||
basename := filepath.Base(hdr.Name)
|
||||
aufsHardlinks[basename] = hdr
|
||||
if aufsTempdir == "" {
|
||||
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
}
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Name != WhiteoutOpaqueDir {
|
||||
continue
|
||||
}
|
||||
}
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
rel, err := filepath.Rel(dest, path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||
}
|
||||
base := filepath.Base(path)
|
||||
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
dir := filepath.Dir(path)
|
||||
if base == WhiteoutOpaqueDir {
|
||||
_, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = nil // parent was deleted
|
||||
}
|
||||
return err
|
||||
}
|
||||
if path == dir {
|
||||
return nil
|
||||
}
|
||||
if _, exists := unpackedPaths[path]; !exists {
|
||||
err := os.RemoveAll(path)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trBuf.Reset(tr)
|
||||
srcData := io.Reader(trBuf)
|
||||
srcHdr := hdr
|
||||
|
||||
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||
// we manually retarget these into the temporary files we extracted them into
|
||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||
linkBasename := filepath.Base(hdr.Linkname)
|
||||
srcHdr = aufsHardlinks[linkBasename]
|
||||
if srcHdr == nil {
|
||||
return 0, fmt.Errorf("Invalid aufs hardlink")
|
||||
}
|
||||
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
srcData = tmpFile
|
||||
}
|
||||
|
||||
if err := remapIDs(idMapping, srcHdr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
unpackedPaths[path] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, options, false)
|
||||
}
|
||||
|
||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// We need to be able to set any perms
|
||||
if runtime.GOOS != "windows" {
|
||||
oldmask, err := system.Umask(0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer system.Umask(oldmask)
|
||||
}
|
||||
|
||||
if decompress {
|
||||
decompLayer, err := DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompLayer.Close()
|
||||
layer = decompLayer
|
||||
}
|
||||
return UnpackLayer(dest, layer, options)
|
||||
}
|
16
vendor/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
16
vendor/github.com/docker/docker/pkg/archive/time_linux.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
if time.IsZero() {
|
||||
// Return UTIME_OMIT special value
|
||||
ts.Sec = 0
|
||||
ts.Nsec = (1 << 30) - 2
|
||||
return
|
||||
}
|
||||
return syscall.NsecToTimespec(time.UnixNano())
|
||||
}
|
16
vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
16
vendor/github.com/docker/docker/pkg/archive/time_unsupported.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
nsec := int64(0)
|
||||
if !time.IsZero() {
|
||||
nsec = time.UnixNano()
|
||||
}
|
||||
return syscall.NsecToTimespec(nsec)
|
||||
}
|
23
vendor/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
23
vendor/github.com/docker/docker/pkg/archive/whiteouts.go
generated
vendored
@@ -1,23 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||
// filesystems these files are generated/handled on tar creation/extraction.
|
||||
|
||||
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||
// filename this means that file has been removed from the base layer.
|
||||
const WhiteoutPrefix = ".wh."
|
||||
|
||||
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||
// for removing an actual file. Normally these files are excluded from exported
|
||||
// archives.
|
||||
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||
|
||||
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||
// layers. Normally these should not go into exported archives and all changed
|
||||
// hardlinks should be copied to the top layer.
|
||||
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||
|
||||
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||
// readdir calls to this directory do not follow to lower layers.
|
||||
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
59
vendor/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
59
vendor/github.com/docker/docker/pkg/archive/wrap.go
generated
vendored
@@ -1,59 +0,0 @@
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Generate generates a new archive from the content provided
|
||||
// as input.
|
||||
//
|
||||
// `files` is a sequence of path/content pairs. A new file is
|
||||
// added to the archive for each pair.
|
||||
// If the last pair is incomplete, the file is created with an
|
||||
// empty content. For example:
|
||||
//
|
||||
// Generate("foo.txt", "hello world", "emptyfile")
|
||||
//
|
||||
// The above call will return an archive with 2 files:
|
||||
// * ./foo.txt with content "hello world"
|
||||
// * ./empty with empty content
|
||||
//
|
||||
// FIXME: stream content instead of buffering
|
||||
// FIXME: specify permissions and other archive metadata
|
||||
func Generate(input ...string) (io.Reader, error) {
|
||||
files := parseStringPairs(input...)
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, file := range files {
|
||||
name, content := file[0], file[1]
|
||||
hdr := &tar.Header{
|
||||
Name: name,
|
||||
Size: int64(len(content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write([]byte(content)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func parseStringPairs(input ...string) (output [][2]string) {
|
||||
output = make([][2]string, 0, len(input)/2+1)
|
||||
for i := 0; i < len(input); i += 2 {
|
||||
var pair [2]string
|
||||
pair[0] = input[i]
|
||||
if i+1 < len(input) {
|
||||
pair[1] = input[i+1]
|
||||
}
|
||||
output = append(output, pair)
|
||||
}
|
||||
return
|
||||
}
|
4
vendor/github.com/mudler/go-pluggable/manager.go
generated
vendored
4
vendor/github.com/mudler/go-pluggable/manager.go
generated
vendored
@@ -99,7 +99,9 @@ func relativeToCwd(p string) (string, error) {
|
||||
|
||||
func (m *Manager) insertPlugin(p Plugin) {
|
||||
for _, i := range m.Plugins {
|
||||
if i.Executable == p.Executable {
|
||||
// We don't want any ambiguity here.
|
||||
// Binary plugins must be unique in PATH and Name
|
||||
if i.Executable == p.Executable || i.Name == p.Name {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
3
vendor/modules.txt
vendored
3
vendor/modules.txt
vendored
@@ -139,7 +139,6 @@ github.com/docker/docker/api/types/versions
|
||||
github.com/docker/docker/api/types/volume
|
||||
github.com/docker/docker/client
|
||||
github.com/docker/docker/errdefs
|
||||
github.com/docker/docker/pkg/archive
|
||||
github.com/docker/docker/pkg/fileutils
|
||||
github.com/docker/docker/pkg/homedir
|
||||
github.com/docker/docker/pkg/idtools
|
||||
@@ -341,7 +340,7 @@ github.com/morikuni/aec
|
||||
# github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
|
||||
## explicit
|
||||
github.com/mudler/cobra-extensions
|
||||
# github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d
|
||||
# github.com/mudler/go-pluggable v0.0.0-20211206135551-9263b05c562e
|
||||
## explicit
|
||||
github.com/mudler/go-pluggable
|
||||
# github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290
|
||||
|
Reference in New Issue
Block a user