Compare commits

...

19 Commits

Author SHA1 Message Date
Ettore Di Giacinto
29ec19a8a1 Tag 0.17.0 2021-07-11 11:55:35 +02:00
Ettore Di Giacinto
440e07c418 Display version banner only on specific commands
Avoid cumbersome text on areas that can be parsed by machines

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-11 11:00:59 +02:00
Ettore Di Giacinto
acf74f5896 config_protect: don't protect files during uninstall by default
Otherwise during uninstall we would retain the files which are
protected. We introduced a specific flag for it to pass during
uninstall, but for now we choose semplicity and expected default first.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-11 10:40:36 +02:00
Ettore Di Giacinto
1ee1894ffa ci: split integration/unit tests 2021-07-09 17:15:17 +02:00
Ettore Di Giacinto
c8573f9535 Adapt hash tests
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 17:15:17 +02:00
Ettore Di Giacinto
76b70ebeb4 Add dirhash to package signatures
While this breaks current hashing, it ties also the spec content to the
hash, in this way if we change something in the spec folder, it breaks
the hashing for the package.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 12:37:38 +02:00
Ettore Di Giacinto
2efb17a06c Add specific field to resolve final images
This commit starts deprecation of `join` keyword in favor of
`requires_final_images` as boolean in the compilation spec.

The change is driven by two reasons: syntax and guarantee unique hashes.

- the hashtree when computing a hash it analizes the requires field of
  each spec, ignoring the join field
- the join field doesn't add much value. Having it separate suggests
  that a spec can contain both `requires` and `join`, but that's not
  actually true. We just act differently on the same list.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 12:37:38 +02:00
Ettore Di Giacinto
64ab3711ca Display version and license on each run
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-07-09 12:25:55 +02:00
Ettore Di Giacinto
eb5d7ba35b Merge branch 'master' into develop 2021-07-09 12:25:04 +02:00
Daniele Rondina
b6b91cfd7a compiler: Avoid generation of delta if there are only prelude steps (#232) 2021-07-09 12:23:32 +02:00
Daniele Rondina
4d8a9a544b cmd/database/create: Avoid creation of the same package multiple times (#222) 2021-06-25 13:09:32 +02:00
Ettore Di Giacinto
654b5b48cd Tag 0.16.7 2021-06-17 07:55:01 +02:00
Ettore Di Giacinto
92e18d5782 Support priv/unpriv image extraction
Optionally add back privileged extraction which can be enabled with
LUET_PRIVILEGED_EXTRACT=true

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-06-16 23:30:28 +02:00
Ettore Di Giacinto
8780e4f16f Unpack using our moby fork
Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-06-16 19:16:02 +02:00
Ettore Di Giacinto
a7b4ae67c9 Tag 0.16.6 2021-06-04 16:23:51 +02:00
Ettore Di Giacinto
68edfd58e7 Keep emitting plugin events 2021-06-04 12:08:57 +02:00
Ettore Di Giacinto
0658020c60 Update go.mod and vendor 2021-06-04 11:15:10 +02:00
Ettore Di Giacinto
c3b552103f Return a specific struct for the image
And drop imgworker
2021-06-04 11:09:41 +02:00
Darren Shepherd
796967cc9d Allow luet install to work inside a non-privileged container
This switches from using the containerd snapshotter to go-containerregistry
library which requires no additional privileges beyond root file system
access.

Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
2021-06-04 10:29:57 +02:00
27 changed files with 1243 additions and 252 deletions

View File

@@ -1,8 +1,46 @@
on: push on: push
concurrency:
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
cancel-in-progress: true
name: Build and release on push name: Build and release on push
jobs: jobs:
release: tests-integration:
name: Test and Release name: Integration tests
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.14.x
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Login to quay with img
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Tests with Img backend
run: |
sudo -E env "PATH=$PATH" \
env "LUET_BACKEND=img" \
make test-integration
- name: Tests
run: |
sudo -E \
env "PATH=$PATH" \
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-integration
tests-unit:
name: Unit tests
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Install Go - name: Install Go
@@ -22,13 +60,6 @@ jobs:
sudo chmod a+x "/usr/bin/img" sudo chmod a+x "/usr/bin/img"
- name: Build test - name: Build test
run: sudo -E env "PATH=$PATH" make multiarch-build-small run: sudo -E env "PATH=$PATH" make multiarch-build-small
- name: Login to quay with img
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Tests with Img backend
run: |
sudo -E env "PATH=$PATH" \
env "LUET_BACKEND=img" \
make test-integration
- name: Tests - name: Tests
run: | run: |
sudo -E \ sudo -E \
@@ -36,7 +67,28 @@ jobs:
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \ env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \ env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \ env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
make test-integration test-coverage make test-coverage
release:
name: Test and Release
needs: ["tests-integration","tests-unit"]
runs-on: ubuntu-latest
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: 1.14.x
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Login to quay
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Build - name: Build
run: sudo -E env "PATH=$PATH" make multiarch-build-small && sudo chmod -R 777 release/ run: sudo -E env "PATH=$PATH" make multiarch-build-small && sudo chmod -R 777 release/
- name: Release - name: Release

View File

@@ -2,7 +2,31 @@
on: pull_request on: pull_request
name: Build and Test name: Build and Test
jobs: jobs:
test: tests-integration:
strategy:
matrix:
go-version: [1.14.x]
platform: [ubuntu-latest]
runs-on: ${{ matrix.platform }}
steps:
- name: Install Go
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Checkout code
uses: actions/checkout@v2
- name: setup-docker
uses: docker-practice/actions-setup-docker@0.0.1
- name: Install deps
run: |
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
sudo chmod a+x "/usr/bin/img"
- name: Tests with Img backend
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
- name: Tests
run: sudo -E env "PATH=$PATH" make test-integration
tests-unit:
strategy: strategy:
matrix: matrix:
go-version: [1.14.x] go-version: [1.14.x]
@@ -24,7 +48,5 @@ jobs:
sudo chmod a+x "/usr/bin/img" sudo chmod a+x "/usr/bin/img"
- name: Build - name: Build
run: sudo -E env "PATH=$PATH" make multiarch-build-small run: sudo -E env "PATH=$PATH" make multiarch-build-small
- name: Tests with Img backend
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
- name: Tests - name: Tests
run: sudo -E env "PATH=$PATH" make test-integration test-coverage run: sudo -E env "PATH=$PATH" make test-coverage

View File

@@ -74,6 +74,12 @@ For reference, inspect a "metadata.yaml" file generated while running "luet buil
files := art.Files files := art.Files
// Check if the package is already present
if p, err := systemDB.FindPackage(art.CompileSpec.GetPackage()); err == nil && p.GetName() != "" {
Fatal("Package", art.CompileSpec.GetPackage().HumanReadableString(),
" already present.")
}
if _, err := systemDB.CreatePackage(art.CompileSpec.GetPackage()); err != nil { if _, err := systemDB.CreatePackage(art.CompileSpec.GetPackage()); err != nil {
Fatal("Failed to create ", a, ": ", err.Error()) Fatal("Failed to create ", a, ": ", err.Error())
} }

View File

@@ -41,8 +41,14 @@ var Verbose bool
var LockedCommands = []string{"install", "uninstall", "upgrade"} var LockedCommands = []string{"install", "uninstall", "upgrade"}
const ( const (
LuetCLIVersion = "0.16.5" LuetCLIVersion = "0.17.0"
LuetEnvPrefix = "LUET" LuetEnvPrefix = "LUET"
license = `
Luet Copyright (C) 2019-2021 Ettore Di Giacinto
This program comes with ABSOLUTELY NO WARRANTY.
This is free software, and you are welcome to redistribute it
under certain conditions.
`
) )
// Build time and commit information. // Build time and commit information.
@@ -53,6 +59,47 @@ var (
BuildCommit string BuildCommit string
) )
func version() string {
return fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime)
}
var noBannerCommands = []string{"search", "exec", "tree", "database", "box"}
func displayVersionBanner() {
display := true
if len(os.Args) > 1 {
for _, c := range noBannerCommands {
if os.Args[1] == c {
display = false
}
}
}
if display {
Info("Luet version", version())
Info(license)
}
}
func handleLock() {
if os.Getenv("LUET_NOLOCK") != "true" {
if len(os.Args) > 1 {
for _, lockedCmd := range LockedCommands {
if os.Args[1] == lockedCmd {
s := single.New("luet")
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
Fatal("another instance of the app is already running, exiting")
} else if err != nil {
// Another error occurred, might be worth handling it as well
Fatal("failed to acquire exclusive app lock:", err.Error())
}
defer s.TryUnlock()
break
}
}
}
}
}
// RootCmd represents the base command when called without any subcommands // RootCmd represents the base command when called without any subcommands
var RootCmd = &cobra.Command{ var RootCmd = &cobra.Command{
Use: "luet", Use: "luet",
@@ -80,8 +127,9 @@ To build a package, from a tree definition:
$ luet build --tree tree/path package $ luet build --tree tree/path package
`, `,
Version: fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime), Version: version(),
PersistentPreRun: func(cmd *cobra.Command, args []string) { PersistentPreRun: func(cmd *cobra.Command, args []string) {
err := LoadConfig(config.LuetCfg) err := LoadConfig(config.LuetCfg)
if err != nil { if err != nil {
Fatal("failed to load configuration:", err.Error()) Fatal("failed to load configuration:", err.Error())
@@ -155,23 +203,8 @@ func LoadConfig(c *config.LuetConfig) error {
// This is called by main.main(). It only needs to happen once to the rootCmd. // This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() { func Execute() {
if os.Getenv("LUET_NOLOCK") != "true" { handleLock()
if len(os.Args) > 1 { displayVersionBanner()
for _, lockedCmd := range LockedCommands {
if os.Args[1] == lockedCmd {
s := single.New("luet")
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
Fatal("another instance of the app is already running, exiting")
} else if err != nil {
// Another error occurred, might be worth handling it as well
Fatal("failed to acquire exclusive app lock:", err.Error())
}
defer s.TryUnlock()
break
}
}
}
}
if err := RootCmd.Execute(); err != nil { if err := RootCmd.Execute(); err != nil {
fmt.Println(err) fmt.Println(err)

View File

@@ -67,10 +67,12 @@ var uninstallCmd = &cobra.Command{
dbpath := LuetCfg.Viper.GetString("system.database_path") dbpath := LuetCfg.Viper.GetString("system.database_path")
rootfs := LuetCfg.Viper.GetString("system.rootfs") rootfs := LuetCfg.Viper.GetString("system.rootfs")
engine := LuetCfg.Viper.GetString("system.database_engine") engine := LuetCfg.Viper.GetString("system.database_engine")
keepProtected, _ := cmd.Flags().GetBool("keep-protected-files")
LuetCfg.System.DatabaseEngine = engine LuetCfg.System.DatabaseEngine = engine
LuetCfg.System.DatabasePath = dbpath LuetCfg.System.DatabasePath = dbpath
LuetCfg.System.Rootfs = rootfs LuetCfg.System.Rootfs = rootfs
LuetCfg.ConfigProtectSkip = !keepProtected
LuetCfg.GetSolverOptions().Type = stype LuetCfg.GetSolverOptions().Type = stype
LuetCfg.GetSolverOptions().LearnRate = float32(rate) LuetCfg.GetSolverOptions().LearnRate = float32(rate)
@@ -123,6 +125,7 @@ func init() {
uninstallCmd.Flags().Bool("full-clean", false, "(experimental) Uninstall packages and all the other deps/revdeps of it.") uninstallCmd.Flags().Bool("full-clean", false, "(experimental) Uninstall packages and all the other deps/revdeps of it.")
uninstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)") uninstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
uninstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions") uninstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
uninstallCmd.Flags().BoolP("keep-protected-files", "k", false, "Keep package protected files around")
RootCmd.AddCommand(uninstallCmd) RootCmd.AddCommand(uninstallCmd)
} }

7
go.mod
View File

@@ -11,9 +11,10 @@ require (
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765 github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24 github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
github.com/docker/distribution v2.7.1+incompatible github.com/docker/distribution v2.7.1+incompatible
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/docker/go-units v0.4.0 github.com/docker/go-units v0.4.0
github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd
github.com/genuinetools/img v0.5.11 github.com/genuinetools/img v0.5.11
@@ -22,7 +23,7 @@ require (
github.com/google/renameio v1.0.0 github.com/google/renameio v1.0.0
github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/go-multierror v1.0.0
github.com/hashicorp/go-version v1.2.1 github.com/hashicorp/go-version v1.2.1
github.com/imdario/mergo v0.3.8 github.com/imdario/mergo v0.3.9
github.com/jedib0t/go-pretty v4.3.0+incompatible github.com/jedib0t/go-pretty v4.3.0+incompatible
github.com/jedib0t/go-pretty/v6 v6.0.5 github.com/jedib0t/go-pretty/v6 v6.0.5
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3 github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
@@ -55,8 +56,10 @@ require (
go.uber.org/atomic v1.5.1 // indirect go.uber.org/atomic v1.5.1 // indirect
go.uber.org/multierr v1.4.0 go.uber.org/multierr v1.4.0
go.uber.org/zap v1.13.0 go.uber.org/zap v1.13.0
golang.org/x/mod v0.4.2
google.golang.org/grpc v1.29.1 google.golang.org/grpc v1.29.1
gopkg.in/yaml.v2 v2.3.0 gopkg.in/yaml.v2 v2.3.0
gotest.tools/v3 v3.0.2 // indirect
helm.sh/helm/v3 v3.3.4 helm.sh/helm/v3 v3.3.4
) )

469
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -774,22 +774,31 @@ func (cs *LuetCompiler) getSpecHash(pkgs pkg.DefaultPackages, salt string) (stri
return fmt.Sprintf("%x", h.Sum(nil)), nil return fmt.Sprintf("%x", h.Sum(nil)), nil
} }
func (cs *LuetCompiler) resolveJoinImages(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) error { func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) error {
joinTag := ">:loop: join<" joinTag := ">:loop: final images<"
if len(p.Join) != 0 { var fromPackages pkg.DefaultPackages
Info(joinTag, "Generating a joint parent image from final packages")
if len(p.Join) > 0 {
fromPackages = p.Join
Warning(joinTag, `
Attention! the 'join' keyword is going to be deprecated in Luet >=0.18.x.
Use 'requires_final_images: true' instead in the build.yaml file`)
} else if p.RequiresFinalImages {
Info(joinTag, "Generating a parent image from final packages")
fromPackages = p.Package.GetRequires()
} else { } else {
// No source image to resolve
return nil return nil
} }
// First compute a hash and check if image is available. if it is, then directly consume that // First compute a hash and check if image is available. if it is, then directly consume that
overallFp, err := cs.getSpecHash(p.Join, "join") overallFp, err := cs.getSpecHash(fromPackages, "join")
if err != nil { if err != nil {
return errors.Wrap(err, "could not generate image hash") return errors.Wrap(err, "could not generate image hash")
} }
Info(joinTag, "Searching existing image with hash ", overallFp) Info(joinTag, "Searching existing image with hash", overallFp)
image := cs.findImageHash(overallFp, p) image := cs.findImageHash(overallFp, p)
if image != "" { if image != "" {
@@ -811,12 +820,12 @@ func (cs *LuetCompiler) resolveJoinImages(concurrency int, keepPermissions bool,
} }
defer os.RemoveAll(joinDir) // clean up defer os.RemoveAll(joinDir) // clean up
for _, p := range p.Join { //highly dependent on the order for _, p := range fromPackages {
Info(joinTag, ":arrow_right_hook:", p.HumanReadableString(), ":leaves:") Info(joinTag, ":arrow_right_hook:", p.HumanReadableString(), ":leaves:")
} }
current := 0 current := 0
for _, c := range p.Join { for _, c := range fromPackages {
current++ current++
if c != nil && c.Name != "" && c.Version != "" { if c != nil && c.Name != "" && c.Version != "" {
joinTag2 := fmt.Sprintf("%s %d/%d ⤑ :hammer: build %s", joinTag, current, len(p.Join), c.HumanReadableString()) joinTag2 := fmt.Sprintf("%s %d/%d ⤑ :hammer: build %s", joinTag, current, len(p.Join), c.HumanReadableString())
@@ -924,7 +933,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
//Before multistage : join - same as multistage, but keep artifacts, join them, create a new one and generate a final image. //Before multistage : join - same as multistage, but keep artifacts, join them, create a new one and generate a final image.
// When the image is there, use it as a source here, in place of GetImage(). // When the image is there, use it as a source here, in place of GetImage().
if err := cs.resolveJoinImages(concurrency, keepPermissions, p); err != nil { if err := cs.resolveFinalImages(concurrency, keepPermissions, p); err != nil {
return nil, errors.Wrap(err, "while resolving join images") return nil, errors.Wrap(err, "while resolving join images")
} }
@@ -1028,7 +1037,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
Assert: assertion, Assert: assertion,
}) })
if err := cs.resolveJoinImages(concurrency, keepPermissions, compileSpec); err != nil { if err := cs.resolveFinalImages(concurrency, keepPermissions, compileSpec); err != nil {
return nil, errors.Wrap(err, "while resolving join images") return nil, errors.Wrap(err, "while resolving join images")
} }

View File

@@ -46,13 +46,13 @@ var _ = Describe("ImageHashTree", func() {
packageHash, err := hashtree.Query(compiler, spec) packageHash, err := hashtree.Query(compiler, spec)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(packageHash.Target.Hash.BuildHash).To(Equal("4db24406e8db30a3310a1cf8c4d4e19597745e6d41b189dc51a73ac4a50cc9e6")) Expect(packageHash.Target.Hash.BuildHash).To(Equal("53993e5a02da4c21ad845371c872f5836fe45ff3a4e3c5ccb6296d0faee2b107"))
Expect(packageHash.Target.Hash.PackageHash).To(Equal("4c867c9bab6c71d9420df75806e7a2f171dbc08487852ab4e2487bab04066cf2")) Expect(packageHash.Target.Hash.PackageHash).To(Equal("a786d3fd29d0b8bdfe5f304c8bf8be909d5c764cd7059c0e63294a8bff17f3ef"))
Expect(packageHash.BuilderImageHash).To(Equal("builder-e6f9c5552a67c463215b0a9e4f7c7fc8")) Expect(packageHash.BuilderImageHash).To(Equal("builder-0cd3c0d07fc9be568377b3bf1b699e06"))
}) })
}) })
expectedPackageHash := "15811d83d0f8360318c54d91dcae3714f8efb39bf872572294834880f00ee7a8" expectedPackageHash := "0d568ac04c4ca528a4e5b67978f2ad3a75d31d443ab20f9d7683b9608cc0d494"
Context("complex package definition", func() { Context("complex package definition", func() {
BeforeEach(func() { BeforeEach(func() {
@@ -73,23 +73,26 @@ var _ = Describe("ImageHashTree", func() {
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(expectedPackageHash)) Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(expectedPackageHash))
Expect(packageHash.SourceHash).To(Equal(expectedPackageHash)) Expect(packageHash.SourceHash).To(Equal(expectedPackageHash))
Expect(packageHash.BuilderImageHash).To(Equal("builder-3d739cab442aec15a6da238481df73c5")) Expect(packageHash.BuilderImageHash).To(Equal("builder-0f45c345f59103e84fc8bebbf02f2e2b"))
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281")) //Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
Expect(packageHash.Target.Hash.PackageHash).To(Equal("99c4ebb4bc4754985fcc28677badf90f525aa231b1db0fe75659f11b86dc20e8")) Expect(packageHash.Target.Hash.PackageHash).To(Equal("2e8159583ac825acada763358290cfbea919a33873a926cab84f4f1a67ecf111"))
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"} a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
hash, err := packageHash.DependencyBuildImage(a) hash, err := packageHash.DependencyBuildImage(a)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(hash).To(Equal("f48f28ab62f1379a4247ec763681ccede68ea1e5c25aae8fb72459c0b2f8742e"))
Expect(hash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint()) assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
Expect(assertionA.Hash.PackageHash).To(Equal(expectedPackageHash)) Expect(assertionA.Hash.PackageHash).To(Equal(expectedPackageHash))
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"} b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint()) assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
Expect(assertionB.Hash.PackageHash).To(Equal("f48f28ab62f1379a4247ec763681ccede68ea1e5c25aae8fb72459c0b2f8742e"))
Expect(assertionB.Hash.PackageHash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
hashB, err := packageHash.DependencyBuildImage(b) hashB, err := packageHash.DependencyBuildImage(b)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(hashB).To(Equal("2668e418eab6861404834ad617713e39b8e58f68016a1fbfcc9384efdd037376"))
Expect(hashB).To(Equal("315075265aeb2e3c04c5428d31911f53c194ec9fa3db1421e8478f44b1e0def8"))
}) })
}) })
@@ -112,33 +115,35 @@ var _ = Describe("ImageHashTree", func() {
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).ToNot(Equal(expectedPackageHash)) Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).ToNot(Equal(expectedPackageHash))
sourceHash := "1d91b13d0246fa000085a1071c63397d21546300b17f69493f22315a64b717d4" sourceHash := "66ec001fe72052d0e605ca96f607ae39ea4f8b53f0b7f762e377622d9c654de3"
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(sourceHash)) Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(sourceHash))
Expect(packageHash.SourceHash).To(Equal(sourceHash)) Expect(packageHash.SourceHash).To(Equal(sourceHash))
Expect(packageHash.SourceHash).ToNot(Equal(expectedPackageHash)) Expect(packageHash.SourceHash).ToNot(Equal(expectedPackageHash))
Expect(packageHash.BuilderImageHash).To(Equal("builder-03ee108a7c56b17ee568ace0800dd16d")) Expect(packageHash.BuilderImageHash).To(Equal("builder-ffc02fd8aaa916d0e17249885b3226b1"))
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281")) //Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
Expect(packageHash.Target.Hash.PackageHash).To(Equal("7677da23b2cc866c2d07aa4a58fbf703340f2f78c0efbb1ba9faf8979f250c87")) Expect(packageHash.Target.Hash.PackageHash).To(Equal("b9c0286ebf6d28be831926ec7da9cb3cda6b489722d656aefc363ebd7173f937"))
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"} a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
hash, err := packageHash.DependencyBuildImage(a) hash, err := packageHash.DependencyBuildImage(a)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(hash).To(Equal("f48f28ab62f1379a4247ec763681ccede68ea1e5c25aae8fb72459c0b2f8742e"))
Expect(hash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint()) assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
Expect(assertionA.Hash.PackageHash).To(Equal("1d91b13d0246fa000085a1071c63397d21546300b17f69493f22315a64b717d4")) Expect(assertionA.Hash.PackageHash).To(Equal("66ec001fe72052d0e605ca96f607ae39ea4f8b53f0b7f762e377622d9c654de3"))
Expect(assertionA.Hash.PackageHash).ToNot(Equal(expectedPackageHash)) Expect(assertionA.Hash.PackageHash).ToNot(Equal(expectedPackageHash))
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"} b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint()) assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
Expect(assertionB.Hash.PackageHash).To(Equal("f48f28ab62f1379a4247ec763681ccede68ea1e5c25aae8fb72459c0b2f8742e")) Expect(assertionB.Hash.PackageHash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
hashB, err := packageHash.DependencyBuildImage(b) hashB, err := packageHash.DependencyBuildImage(b)
Expect(err).ToNot(HaveOccurred()) Expect(err).ToNot(HaveOccurred())
Expect(hashB).To(Equal("2668e418eab6861404834ad617713e39b8e58f68016a1fbfcc9384efdd037376")) Expect(hashB).To(Equal("315075265aeb2e3c04c5428d31911f53c194ec9fa3db1421e8478f44b1e0def8"))
}) })
}) })

View File

@@ -26,6 +26,7 @@ import (
pkg "github.com/mudler/luet/pkg/package" pkg "github.com/mudler/luet/pkg/package"
"github.com/mudler/luet/pkg/solver" "github.com/mudler/luet/pkg/solver"
"github.com/otiai10/copy" "github.com/otiai10/copy"
dirhash "golang.org/x/mod/sumdb/dirhash"
yaml "gopkg.in/yaml.v2" yaml "gopkg.in/yaml.v2"
) )
@@ -115,39 +116,42 @@ type LuetCompilationSpec struct {
Copy []CopyField `json:"copy"` Copy []CopyField `json:"copy"`
Join pkg.DefaultPackages `json:"join"` Join pkg.DefaultPackages `json:"join"`
RequiresFinalImages bool `json:"requires_final_images" yaml:"requires_final_images"`
} }
// Signature is a portion of the spec that yields a signature for the hash // Signature is a portion of the spec that yields a signature for the hash
type Signature struct { type Signature struct {
Image string Image string
Steps []string Steps []string
PackageDir string PackageDir string
Prelude []string Prelude []string
Seed string Seed string
Env []string Env []string
Retrieve []string Retrieve []string
Unpack bool Unpack bool
Includes []string Includes []string
Excludes []string Excludes []string
Copy []CopyField Copy []CopyField
Join pkg.DefaultPackages Join pkg.DefaultPackages
RequiresFinalImages bool
} }
func (cs *LuetCompilationSpec) signature() Signature { func (cs *LuetCompilationSpec) signature() Signature {
return Signature{ return Signature{
Image: cs.Image, Image: cs.Image,
Steps: cs.Steps, Steps: cs.Steps,
PackageDir: cs.PackageDir, PackageDir: cs.PackageDir,
Prelude: cs.Prelude, Prelude: cs.Prelude,
Seed: cs.Seed, Seed: cs.Seed,
Env: cs.Env, Env: cs.Env,
Retrieve: cs.Retrieve, Retrieve: cs.Retrieve,
Unpack: cs.Unpack, Unpack: cs.Unpack,
Includes: cs.Includes, Includes: cs.Includes,
Excludes: cs.Excludes, Excludes: cs.Excludes,
Copy: cs.Copy, Copy: cs.Copy,
Join: cs.Join, Join: cs.Join,
RequiresFinalImages: cs.RequiresFinalImages,
} }
} }
@@ -242,7 +246,7 @@ func (cs *LuetCompilationSpec) SetSeedImage(s string) {
} }
func (cs *LuetCompilationSpec) EmptyPackage() bool { func (cs *LuetCompilationSpec) EmptyPackage() bool {
return len(cs.BuildSteps()) == 0 && len(cs.GetPreBuildSteps()) == 0 && !cs.UnpackedPackage() return len(cs.BuildSteps()) == 0 && !cs.UnpackedPackage()
} }
func (cs *LuetCompilationSpec) UnpackedPackage() bool { func (cs *LuetCompilationSpec) UnpackedPackage() bool {
@@ -266,7 +270,11 @@ func (cs *LuetCompilationSpec) Hash() (string, error) {
// build a signature, we want to be part of the hash only the fields that are relevant for build purposes // build a signature, we want to be part of the hash only the fields that are relevant for build purposes
signature := cs.signature() signature := cs.signature()
h, err := hashstructure.Hash(signature, hashstructure.FormatV2, nil) h, err := hashstructure.Hash(signature, hashstructure.FormatV2, nil)
return fmt.Sprint(h), err sum, err := dirhash.HashDir(cs.Package.Path, "", dirhash.DefaultHash)
if err != nil {
return "", err
}
return fmt.Sprint(h, sum), err
} }
func (cs *LuetCompilationSpec) CopyRetrieves(dest string) error { func (cs *LuetCompilationSpec) CopyRetrieves(dest string) error {

View File

@@ -18,16 +18,27 @@ package docker
import ( import (
"context" "context"
"encoding/hex" "encoding/hex"
"fmt"
"os" "os"
"strings" "strings"
"github.com/containerd/containerd/images"
"github.com/mudler/luet/pkg/helpers/imgworker" "github.com/mudler/luet/pkg/helpers/imgworker"
continerdarchive "github.com/containerd/containerd/archive"
"github.com/docker/cli/cli/trust" "github.com/docker/cli/cli/trust"
"github.com/docker/distribution/reference" "github.com/docker/distribution/reference"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/registry" "github.com/docker/docker/registry"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/mudler/luet/pkg/bus"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/theupdateframework/notary/tuf/data" "github.com/theupdateframework/notary/tuf/data"
) )
@@ -95,18 +106,31 @@ func trustedResolveDigest(ctx context.Context, ref reference.NamedTagged, authCo
return reference.WithDigest(ref, dgst) return reference.WithDigest(ref, dgst)
} }
// DownloadAndExtractDockerImage is a re-adaption type staticAuth struct {
// from genuinetools/img https://github.com/genuinetools/img/blob/54d0ca981c1260546d43961a538550eef55c87cf/pull.go auth *types.AuthConfig
func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) { }
if verify { func (s staticAuth) Authorization() (*authn.AuthConfig, error) {
img, err := verifyImage(image, auth) if s.auth == nil {
if err != nil { return nil, nil
return nil, errors.Wrapf(err, "failed verifying image")
}
image = img
} }
return &authn.AuthConfig{
Username: s.auth.Username,
Password: s.auth.Password,
Auth: s.auth.Auth,
IdentityToken: s.auth.IdentityToken,
RegistryToken: s.auth.RegistryToken,
}, nil
}
// UnpackEventData is the data structure to pass for the bus events
type UnpackEventData struct {
Image string
Dest string
}
// privilegedExtractImage uses the imgworker (which requires privileges) to extract a container image
func privilegedExtractImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
defer os.RemoveAll(temp) defer os.RemoveAll(temp)
c, err := imgworker.New(temp, auth) c, err := imgworker.New(temp, auth)
if err != nil { if err != nil {
@@ -117,14 +141,121 @@ func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthCon
listedImage, err := c.Pull(image) listedImage, err := c.Pull(image)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed listing images") return nil, errors.Wrapf(err, "failed listing images")
} }
os.RemoveAll(dest) os.RemoveAll(dest)
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
err = c.Unpack(image, dest) err = c.Unpack(image, dest)
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
return listedImage, err return listedImage, err
} }
// UnarchiveLayers extract layers with archive.Untar from docker instead of containerd
func UnarchiveLayers(temp string, img v1.Image, image, dest string, auth *types.AuthConfig, verify bool) (int64, error) {
layers, err := img.Layers()
if err != nil {
return 0, fmt.Errorf("reading layers from '%s' image failed: %v", image, err)
}
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
var size int64
for _, l := range layers {
s, err := l.Size()
if err != nil {
return 0, fmt.Errorf("reading layer size from '%s' image failed: %v", image, err)
}
size += s
layerReader, err := l.Uncompressed()
if err != nil {
return 0, fmt.Errorf("reading uncompressed layer from '%s' image failed: %v", image, err)
}
defer layerReader.Close()
// Unpack the tarfile to the rootfs path.
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
if err := archive.Untar(layerReader, dest, &archive.TarOptions{
NoLchown: false,
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
}); err != nil {
return 0, fmt.Errorf("extracting '%s' image to directory %s failed: %v", image, dest, err)
}
}
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
return size, nil
}
// DownloadAndExtractDockerImage extracts a container image natively. It supports privileged/unprivileged mode
func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
if verify {
img, err := verifyImage(image, auth)
if err != nil {
return nil, errors.Wrapf(err, "failed verifying image")
}
image = img
}
if os.Getenv("LUET_PRIVILEGED_EXTRACT") == "true" {
return privilegedExtractImage(temp, image, dest, auth, verify)
}
ref, err := name.ParseReference(image)
if err != nil {
return nil, err
}
img, err := remote.Image(ref, remote.WithAuth(staticAuth{auth}))
if err != nil {
return nil, err
}
m, err := img.Manifest()
if err != nil {
return nil, err
}
mt, err := img.MediaType()
if err != nil {
return nil, err
}
d, err := img.Digest()
if err != nil {
return nil, err
}
reader := mutate.Extract(img)
defer reader.Close()
defer os.RemoveAll(temp)
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
c, err := continerdarchive.Apply(context.TODO(), dest, reader)
if err != nil {
return nil, err
}
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
return &imgworker.ListedImage{
Image: images.Image{
Name: image,
Labels: m.Annotations,
Target: specs.Descriptor{
MediaType: string(mt),
Digest: digest.Digest(d.String()),
Size: c,
},
},
ContentSize: c,
}, nil
}
func StripInvalidStringsFromImage(s string) string { func StripInvalidStringsFromImage(s string) string {
return strings.ReplaceAll(s, "+", "-") return strings.ReplaceAll(s, "+", "-")
} }

View File

@@ -30,7 +30,6 @@ import (
"github.com/mudler/luet/pkg/config" "github.com/mudler/luet/pkg/config"
"github.com/mudler/luet/pkg/helpers/docker" "github.com/mudler/luet/pkg/helpers/docker"
fileHelper "github.com/mudler/luet/pkg/helpers/file" fileHelper "github.com/mudler/luet/pkg/helpers/file"
"github.com/mudler/luet/pkg/helpers/imgworker"
. "github.com/mudler/luet/pkg/logger" . "github.com/mudler/luet/pkg/logger"
) )
@@ -141,7 +140,6 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
var file *os.File = nil var file *os.File = nil
var err error var err error
var temp, contentstore string var temp, contentstore string
var info *imgworker.ListedImage
// Files should be in URI/repository:<file> // Files should be in URI/repository:<file>
ok := false ok := false
@@ -165,7 +163,7 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
imageName := fmt.Sprintf("%s:%s", uri, docker.StripInvalidStringsFromImage(name)) imageName := fmt.Sprintf("%s:%s", uri, docker.StripInvalidStringsFromImage(name))
Info("Downloading", imageName) Info("Downloading", imageName)
info, err = docker.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify) info, err := docker.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
if err != nil { if err != nil {
Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error())) Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
continue continue

View File

@@ -92,7 +92,7 @@ testUnInstall() {
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]" assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
# TODO: we need remove it or not?? # TODO: we need remove it or not??
assertTrue 'config protect created' "[ -e '$tmpdir/testrootfs/etc/a/._cfg0001_conf' ]" assertTrue 'config protect created' "[ -e '$tmpdir/testrootfs/etc/a/._cfg0001_conf' ]"
assertTrue 'config protect maintains the protected files' "[ -e '$tmpdir/testrootfs/etc/a/conf' ]" assertTrue 'config protect maintains the protected files' "[ ! -e '$tmpdir/testrootfs/etc/a/conf' ]"
} }

View File

@@ -87,7 +87,7 @@ testInstall() {
testUnInstall() { testUnInstall() {
luet uninstall -y --full --config $tmpdir/luet.yaml test/a luet uninstall -y --full --keep-protected-files --config $tmpdir/luet.yaml test/a
installst=$? installst=$?
assertEquals 'uninstall test successfully' "$installst" "0" assertEquals 'uninstall test successfully' "$installst" "0"
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]" assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"

View File

@@ -86,7 +86,7 @@ testInstall() {
testUnInstall() { testUnInstall() {
luet uninstall -y --full --config $tmpdir/luet.yaml test/a luet uninstall -y --full --keep-protected-files --config $tmpdir/luet.yaml test/a
installst=$? installst=$?
assertEquals 'uninstall test successfully' "$installst" "0" assertEquals 'uninstall test successfully' "$installst" "0"
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]" assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"

View File

@@ -51,9 +51,9 @@ testBuild() {
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]" assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]" assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]" assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
assertContains 'Does use the upstream cache without specifying it test/c' "$build_output" "Images available remotely for test/c-1.0 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:ac34beb3fc2752bab54549db095dd6994d7531b88e1ff7f902d01ae80fdd030d" assertContains 'Does use the upstream cache without specifying it test/c' "$build_output" "Images available remotely for test/c-1.0 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:7a46e5c8893b574d9f984eaade75b0e68cc891bb6d01860ba91a62f9e4de2b62"
assertContains 'Does use the upstream cache without specifying it test/z' "$build_output" "Images available remotely for test/z-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:d8b8881d50ae83646728bb28ad678fc14e4e003e4b0d0a66f8e6167c6116e024" assertContains 'Does use the upstream cache without specifying it test/z' "$build_output" "Images available remotely for test/z-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:4277934ece4de17856b87294301cdfcdf3c6f956e682ff64697be67178b8a4b1"
assertContains 'Does use the upstream cache without specifying it test/interpolated' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:2aaccd929ebe5683f95c70c6ec8d68f240ceea8633f3350b94904ad73da5fd47" assertContains 'Does use the upstream cache without specifying it test/interpolated' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:132c097844b4e809efa8ae234452893b0214a1c860371e21564d89655ab10a56"
} }
testRepo() { testRepo() {

View File

@@ -56,8 +56,8 @@ EOF
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]" assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]" assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]" assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
assertNotContains 'Does NOT use the upstream cache without specifying it' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:2aaccd929ebe5683f95c70c6ec8d68f240ceea8633f3350b94904ad73da5fd47" assertNotContains 'Does NOT use the upstream cache without specifying it' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:132c097844b4e809efa8ae234452893b0214a1c860371e21564d89655ab10a56"
assertContains 'Does generate a new hash as values changed build.yaml for test/interpolated-1.0+2 package image' "$build_output" "Building image luet/cache:c34b533cf76886c332fe9b2d3208f04265360a465a90c996cb4fcdaf959dee36 done" assertContains 'Does generate a new hash as values changed build.yaml for test/interpolated-1.0+2 package image' "$build_output" "Building image luet/cache:a7edf5c9ab219e406b64b2692ce08d56a8bcd212a43bb70df3737f13a008dfd4 done"
} }
testRepo() { testRepo() {

View File

@@ -56,7 +56,7 @@ EOF
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]" assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]" assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]" assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
assertContains 'Does use the upstream cache without specifying it' "$build_output" "Downloading image quay.io/mocaccinoos/integration-test-cache:4db24406e8db30a3310a1cf8c4d4e19597745e6d41b189dc51a73ac4a50cc9e6" assertContains 'Does use the upstream cache without specifying it (test/c-1.0)' "$build_output" "quay.io/mocaccinoos/integration-test-cache:a786d3fd29d0b8bdfe5f304c8bf8be909d5c764cd7059c0e63294a8bff17f3ef"
} }
testRepo() { testRepo() {

58
vendor/github.com/docker/cli/AUTHORS generated vendored
View File

@@ -8,6 +8,7 @@ Aaron.L.Xu <likexu@harmonycloud.cn>
Abdur Rehman <abdur_rehman@mentor.com> Abdur Rehman <abdur_rehman@mentor.com>
Abhinandan Prativadi <abhi@docker.com> Abhinandan Prativadi <abhi@docker.com>
Abin Shahab <ashahab@altiscale.com> Abin Shahab <ashahab@altiscale.com>
Abreto FU <public@abreto.email>
Ace Tang <aceapril@126.com> Ace Tang <aceapril@126.com>
Addam Hardy <addam.hardy@gmail.com> Addam Hardy <addam.hardy@gmail.com>
Adolfo Ochagavía <aochagavia92@gmail.com> Adolfo Ochagavía <aochagavia92@gmail.com>
@@ -17,12 +18,15 @@ Adrien Folie <folie.adrien@gmail.com>
Ahmet Alp Balkan <ahmetb@microsoft.com> Ahmet Alp Balkan <ahmetb@microsoft.com>
Aidan Feldman <aidan.feldman@gmail.com> Aidan Feldman <aidan.feldman@gmail.com>
Aidan Hobson Sayers <aidanhs@cantab.net> Aidan Hobson Sayers <aidanhs@cantab.net>
AJ Bowen <aj@gandi.net> AJ Bowen <aj@soulshake.net>
Akihiro Suda <suda.akihiro@lab.ntt.co.jp> Akhil Mohan <akhil.mohan@mayadata.io>
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
Akim Demaille <akim.demaille@docker.com> Akim Demaille <akim.demaille@docker.com>
Alan Thompson <cloojure@gmail.com> Alan Thompson <cloojure@gmail.com>
Albert Callarisa <shark234@gmail.com> Albert Callarisa <shark234@gmail.com>
Albin Kerouanton <albin@akerouanton.name>
Aleksa Sarai <asarai@suse.de> Aleksa Sarai <asarai@suse.de>
Aleksander Piotrowski <apiotrowski312@gmail.com>
Alessandro Boch <aboch@tetrationanalytics.com> Alessandro Boch <aboch@tetrationanalytics.com>
Alex Mavrogiannis <alex.mavrogiannis@docker.com> Alex Mavrogiannis <alex.mavrogiannis@docker.com>
Alex Mayer <amayer5125@gmail.com> Alex Mayer <amayer5125@gmail.com>
@@ -40,6 +44,7 @@ Amir Goldstein <amir73il@aquasec.com>
Amit Krishnan <amit.krishnan@oracle.com> Amit Krishnan <amit.krishnan@oracle.com>
Amit Shukla <amit.shukla@docker.com> Amit Shukla <amit.shukla@docker.com>
Amy Lindburg <amy.lindburg@docker.com> Amy Lindburg <amy.lindburg@docker.com>
Anca Iordache <anca.iordache@docker.com>
Anda Xu <anda.xu@docker.com> Anda Xu <anda.xu@docker.com>
Andrea Luzzardi <aluzzardi@gmail.com> Andrea Luzzardi <aluzzardi@gmail.com>
Andreas Köhler <andi5.py@gmx.net> Andreas Köhler <andi5.py@gmx.net>
@@ -49,6 +54,7 @@ Andrew Macpherson <hopscotch23@gmail.com>
Andrew McDonnell <bugs@andrewmcdonnell.net> Andrew McDonnell <bugs@andrewmcdonnell.net>
Andrew Po <absourd.noise@gmail.com> Andrew Po <absourd.noise@gmail.com>
Andrey Petrov <andrey.petrov@shazow.net> Andrey Petrov <andrey.petrov@shazow.net>
Andrii Berehuliak <berkusandrew@gmail.com>
André Martins <aanm90@gmail.com> André Martins <aanm90@gmail.com>
Andy Goldstein <agoldste@redhat.com> Andy Goldstein <agoldste@redhat.com>
Andy Rothfusz <github@developersupport.net> Andy Rothfusz <github@developersupport.net>
@@ -61,7 +67,9 @@ Antonis Kalipetis <akalipetis@gmail.com>
Anusha Ragunathan <anusha.ragunathan@docker.com> Anusha Ragunathan <anusha.ragunathan@docker.com>
Ao Li <la9249@163.com> Ao Li <la9249@163.com>
Arash Deshmeh <adeshmeh@ca.ibm.com> Arash Deshmeh <adeshmeh@ca.ibm.com>
Arko Dasgupta <arko.dasgupta@docker.com>
Arnaud Porterie <arnaud.porterie@docker.com> Arnaud Porterie <arnaud.porterie@docker.com>
Arthur Peka <arthur.peka@outlook.com>
Ashwini Oruganti <ashwini.oruganti@gmail.com> Ashwini Oruganti <ashwini.oruganti@gmail.com>
Azat Khuyiyakhmetov <shadow_uz@mail.ru> Azat Khuyiyakhmetov <shadow_uz@mail.ru>
Bardia Keyoumarsi <bkeyouma@ucsc.edu> Bardia Keyoumarsi <bkeyouma@ucsc.edu>
@@ -87,6 +95,7 @@ Brent Salisbury <brent.salisbury@docker.com>
Bret Fisher <bret@bretfisher.com> Bret Fisher <bret@bretfisher.com>
Brian (bex) Exelbierd <bexelbie@redhat.com> Brian (bex) Exelbierd <bexelbie@redhat.com>
Brian Goff <cpuguy83@gmail.com> Brian Goff <cpuguy83@gmail.com>
Brian Wieder <brian@4wieders.com>
Bryan Bess <squarejaw@bsbess.com> Bryan Bess <squarejaw@bsbess.com>
Bryan Boreham <bjboreham@gmail.com> Bryan Boreham <bjboreham@gmail.com>
Bryan Murphy <bmurphy1976@gmail.com> Bryan Murphy <bmurphy1976@gmail.com>
@@ -95,6 +104,7 @@ Cameron Spear <cameronspear@gmail.com>
Cao Weiwei <cao.weiwei30@zte.com.cn> Cao Weiwei <cao.weiwei30@zte.com.cn>
Carlo Mion <mion00@gmail.com> Carlo Mion <mion00@gmail.com>
Carlos Alexandro Becker <caarlos0@gmail.com> Carlos Alexandro Becker <caarlos0@gmail.com>
Carlos de Paula <me@carlosedp.com>
Ce Gao <ce.gao@outlook.com> Ce Gao <ce.gao@outlook.com>
Cedric Davies <cedricda@microsoft.com> Cedric Davies <cedricda@microsoft.com>
Cezar Sa Espinola <cezarsa@gmail.com> Cezar Sa Espinola <cezarsa@gmail.com>
@@ -128,26 +138,31 @@ Coenraad Loubser <coenraad@wish.org.za>
Colin Hebert <hebert.colin@gmail.com> Colin Hebert <hebert.colin@gmail.com>
Collin Guarino <collin.guarino@gmail.com> Collin Guarino <collin.guarino@gmail.com>
Colm Hally <colmhally@gmail.com> Colm Hally <colmhally@gmail.com>
Comical Derskeal <27731088+derskeal@users.noreply.github.com>
Corey Farrell <git@cfware.com> Corey Farrell <git@cfware.com>
Corey Quon <corey.quon@docker.com> Corey Quon <corey.quon@docker.com>
Craig Wilhite <crwilhit@microsoft.com> Craig Wilhite <crwilhit@microsoft.com>
Cristian Staretu <cristian.staretu@gmail.com> Cristian Staretu <cristian.staretu@gmail.com>
Daehyeok Mun <daehyeok@gmail.com> Daehyeok Mun <daehyeok@gmail.com>
Dafydd Crosby <dtcrsby@gmail.com> Dafydd Crosby <dtcrsby@gmail.com>
Daisuke Ito <itodaisuke00@gmail.com>
dalanlan <dalanlan925@gmail.com> dalanlan <dalanlan925@gmail.com>
Damien Nadé <github@livna.org> Damien Nadé <github@livna.org>
Dan Cotora <dan@bluevision.ro> Dan Cotora <dan@bluevision.ro>
Daniel Artine <daniel.artine@ufrj.br>
Daniel Cassidy <mail@danielcassidy.me.uk> Daniel Cassidy <mail@danielcassidy.me.uk>
Daniel Dao <dqminh@cloudflare.com> Daniel Dao <dqminh@cloudflare.com>
Daniel Farrell <dfarrell@redhat.com> Daniel Farrell <dfarrell@redhat.com>
Daniel Gasienica <daniel@gasienica.ch> Daniel Gasienica <daniel@gasienica.ch>
Daniel Goosen <daniel.goosen@surveysampling.com> Daniel Goosen <daniel.goosen@surveysampling.com>
Daniel Helfand <dhelfand@redhat.com>
Daniel Hiltgen <daniel.hiltgen@docker.com> Daniel Hiltgen <daniel.hiltgen@docker.com>
Daniel J Walsh <dwalsh@redhat.com> Daniel J Walsh <dwalsh@redhat.com>
Daniel Nephin <dnephin@docker.com> Daniel Nephin <dnephin@docker.com>
Daniel Norberg <dano@spotify.com> Daniel Norberg <dano@spotify.com>
Daniel Watkins <daniel@daniel-watkins.co.uk> Daniel Watkins <daniel@daniel-watkins.co.uk>
Daniel Zhang <jmzwcn@gmail.com> Daniel Zhang <jmzwcn@gmail.com>
Daniil Nikolenko <qoo2p5@gmail.com>
Danny Berger <dpb587@gmail.com> Danny Berger <dpb587@gmail.com>
Darren Shepherd <darren.s.shepherd@gmail.com> Darren Shepherd <darren.s.shepherd@gmail.com>
Darren Stahl <darst@microsoft.com> Darren Stahl <darst@microsoft.com>
@@ -180,13 +195,15 @@ Dima Stopel <dima@twistlock.com>
Dimitry Andric <d.andric@activevideo.com> Dimitry Andric <d.andric@activevideo.com>
Ding Fei <dingfei@stars.org.cn> Ding Fei <dingfei@stars.org.cn>
Diogo Monica <diogo@docker.com> Diogo Monica <diogo@docker.com>
Djordje Lukic <djordje.lukic@docker.com>
Dmitry Gusev <dmitry.gusev@gmail.com> Dmitry Gusev <dmitry.gusev@gmail.com>
Dmitry Smirnov <onlyjob@member.fsf.org> Dmitry Smirnov <onlyjob@member.fsf.org>
Dmitry V. Krivenok <krivenok.dmitry@gmail.com> Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
Dominik Braun <dominik.braun@nbsp.de>
Don Kjer <don.kjer@gmail.com> Don Kjer <don.kjer@gmail.com>
Dong Chen <dongluo.chen@docker.com> Dong Chen <dongluo.chen@docker.com>
Doug Davis <dug@us.ibm.com> Doug Davis <dug@us.ibm.com>
Drew Erny <drew.erny@docker.com> Drew Erny <derny@mirantis.com>
Ed Costello <epc@epcostello.com> Ed Costello <epc@epcostello.com>
Elango Sivanandam <elango.siva@docker.com> Elango Sivanandam <elango.siva@docker.com>
Eli Uriegas <eli.uriegas@docker.com> Eli Uriegas <eli.uriegas@docker.com>
@@ -249,6 +266,7 @@ Harald Albers <github@albersweb.de>
Harold Cooper <hrldcpr@gmail.com> Harold Cooper <hrldcpr@gmail.com>
Harry Zhang <harryz@hyper.sh> Harry Zhang <harryz@hyper.sh>
He Simei <hesimei@zju.edu.cn> He Simei <hesimei@zju.edu.cn>
Hector S <hfsam88@gmail.com>
Helen Xie <chenjg@harmonycloud.cn> Helen Xie <chenjg@harmonycloud.cn>
Henning Sprang <henning.sprang@gmail.com> Henning Sprang <henning.sprang@gmail.com>
Henry N <henrynmail-github@yahoo.de> Henry N <henrynmail-github@yahoo.de>
@@ -256,6 +274,7 @@ Hernan Garcia <hernandanielg@gmail.com>
Hongbin Lu <hongbin034@gmail.com> Hongbin Lu <hongbin034@gmail.com>
Hu Keping <hukeping@huawei.com> Hu Keping <hukeping@huawei.com>
Huayi Zhang <irachex@gmail.com> Huayi Zhang <irachex@gmail.com>
Hugo Gabriel Eyherabide <hugogabriel.eyherabide@gmail.com>
huqun <huqun@zju.edu.cn> huqun <huqun@zju.edu.cn>
Huu Nguyen <huu@prismskylabs.com> Huu Nguyen <huu@prismskylabs.com>
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com> Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
@@ -297,7 +316,7 @@ Jeremy Unruh <jeremybunruh@gmail.com>
Jeremy Yallop <yallop@docker.com> Jeremy Yallop <yallop@docker.com>
Jeroen Franse <jeroenfranse@gmail.com> Jeroen Franse <jeroenfranse@gmail.com>
Jesse Adametz <jesseadametz@gmail.com> Jesse Adametz <jesseadametz@gmail.com>
Jessica Frazelle <jessfraz@google.com> Jessica Frazelle <jess@oxide.computer>
Jezeniel Zapanta <jpzapanta22@gmail.com> Jezeniel Zapanta <jpzapanta22@gmail.com>
Jian Zhang <zhangjian.fnst@cn.fujitsu.com> Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
Jie Luo <luo612@zju.edu.cn> Jie Luo <luo612@zju.edu.cn>
@@ -308,6 +327,7 @@ Jimmy Song <rootsongjc@gmail.com>
jimmyxian <jimmyxian2004@yahoo.com.cn> jimmyxian <jimmyxian2004@yahoo.com.cn>
Jintao Zhang <zhangjintao9020@gmail.com> Jintao Zhang <zhangjintao9020@gmail.com>
Joao Fernandes <joao.fernandes@docker.com> Joao Fernandes <joao.fernandes@docker.com>
Joe Abbey <joe.abbey@gmail.com>
Joe Doliner <jdoliner@pachyderm.io> Joe Doliner <jdoliner@pachyderm.io>
Joe Gordon <joe.gordon0@gmail.com> Joe Gordon <joe.gordon0@gmail.com>
Joel Handwell <joelhandwell@gmail.com> Joel Handwell <joelhandwell@gmail.com>
@@ -317,7 +337,7 @@ Johan Euphrosine <proppy@google.com>
Johannes 'fish' Ziemke <github@freigeist.org> Johannes 'fish' Ziemke <github@freigeist.org>
John Feminella <jxf@jxf.me> John Feminella <jxf@jxf.me>
John Harris <john@johnharris.io> John Harris <john@johnharris.io>
John Howard (VM) <John.Howard@microsoft.com> John Howard <github@lowenna.com>
John Laswell <john.n.laswell@gmail.com> John Laswell <john.n.laswell@gmail.com>
John Maguire <jmaguire@duosecurity.com> John Maguire <jmaguire@duosecurity.com>
John Mulhausen <john@docker.com> John Mulhausen <john@docker.com>
@@ -326,12 +346,15 @@ John Stephens <johnstep@docker.com>
John Tims <john.k.tims@gmail.com> John Tims <john.k.tims@gmail.com>
John V. Martinez <jvmatl@gmail.com> John V. Martinez <jvmatl@gmail.com>
John Willis <john.willis@docker.com> John Willis <john.willis@docker.com>
Jon Johnson <jonjohnson@google.com>
Jonatas Baldin <jonatas.baldin@gmail.com>
Jonathan Boulle <jonathanboulle@gmail.com> Jonathan Boulle <jonathanboulle@gmail.com>
Jonathan Lee <jonjohn1232009@gmail.com> Jonathan Lee <jonjohn1232009@gmail.com>
Jonathan Lomas <jonathan@floatinglomas.ca> Jonathan Lomas <jonathan@floatinglomas.ca>
Jonathan McCrohan <jmccrohan@gmail.com> Jonathan McCrohan <jmccrohan@gmail.com>
Jonh Wendell <jonh.wendell@redhat.com> Jonh Wendell <jonh.wendell@redhat.com>
Jordan Jennings <jjn2009@gmail.com> Jordan Jennings <jjn2009@gmail.com>
Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com>
Joseph Kern <jkern@semafour.net> Joseph Kern <jkern@semafour.net>
Josh Bodah <jb3689@yahoo.com> Josh Bodah <jb3689@yahoo.com>
Josh Chorlton <jchorlton@gmail.com> Josh Chorlton <jchorlton@gmail.com>
@@ -369,6 +392,7 @@ Kevin Kern <kaiwentan@harmonycloud.cn>
Kevin Kirsche <Kev.Kirsche+GitHub@gmail.com> Kevin Kirsche <Kev.Kirsche+GitHub@gmail.com>
Kevin Meredith <kevin.m.meredith@gmail.com> Kevin Meredith <kevin.m.meredith@gmail.com>
Kevin Richardson <kevin@kevinrichardson.co> Kevin Richardson <kevin@kevinrichardson.co>
Kevin Woblick <mail@kovah.de>
khaled souf <khaled.souf@gmail.com> khaled souf <khaled.souf@gmail.com>
Kim Eik <kim@heldig.org> Kim Eik <kim@heldig.org>
Kir Kolyshkin <kolyshkin@gmail.com> Kir Kolyshkin <kolyshkin@gmail.com>
@@ -406,13 +430,16 @@ Luca Favatella <luca.favatella@erlang-solutions.com>
Luca Marturana <lucamarturana@gmail.com> Luca Marturana <lucamarturana@gmail.com>
Lucas Chan <lucas-github@lucaschan.com> Lucas Chan <lucas-github@lucaschan.com>
Luka Hartwig <mail@lukahartwig.de> Luka Hartwig <mail@lukahartwig.de>
Lukas Heeren <lukas-heeren@hotmail.com>
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com> Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
Lydell Manganti <LydellManganti@users.noreply.github.com> Lydell Manganti <LydellManganti@users.noreply.github.com>
Lénaïc Huard <lhuard@amadeus.com> Lénaïc Huard <lhuard@amadeus.com>
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com> Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
Mabin <bin.ma@huawei.com> Mabin <bin.ma@huawei.com>
Maciej Kalisz <maciej.d.kalisz@gmail.com>
Madhav Puri <madhav.puri@gmail.com> Madhav Puri <madhav.puri@gmail.com>
Madhu Venugopal <madhu@socketplane.io> Madhu Venugopal <madhu@socketplane.io>
Madhur Batra <madhurbatra097@gmail.com>
Malte Janduda <mail@janduda.net> Malte Janduda <mail@janduda.net>
Manjunath A Kumatagi <mkumatag@in.ibm.com> Manjunath A Kumatagi <mkumatag@in.ibm.com>
Mansi Nahar <mmn4185@rit.edu> Mansi Nahar <mmn4185@rit.edu>
@@ -422,6 +449,7 @@ Marco Mariani <marco.mariani@alterway.fr>
Marco Vedovati <mvedovati@suse.com> Marco Vedovati <mvedovati@suse.com>
Marcus Martins <marcus@docker.com> Marcus Martins <marcus@docker.com>
Marianna Tessel <mtesselh@gmail.com> Marianna Tessel <mtesselh@gmail.com>
Marius Ileana <marius.ileana@gmail.com>
Marius Sturm <marius@graylog.com> Marius Sturm <marius@graylog.com>
Mark Oates <fl0yd@me.com> Mark Oates <fl0yd@me.com>
Marsh Macy <marsma@microsoft.com> Marsh Macy <marsma@microsoft.com>
@@ -467,12 +495,14 @@ mikelinjie <294893458@qq.com>
Mikhail Vasin <vasin@cloud-tv.ru> Mikhail Vasin <vasin@cloud-tv.ru>
Milind Chawre <milindchawre@gmail.com> Milind Chawre <milindchawre@gmail.com>
Mindaugas Rukas <momomg@gmail.com> Mindaugas Rukas <momomg@gmail.com>
Miroslav Gula <miroslav.gula@naytrolabs.com>
Misty Stanley-Jones <misty@docker.com> Misty Stanley-Jones <misty@docker.com>
Mohammad Banikazemi <mb@us.ibm.com> Mohammad Banikazemi <mb@us.ibm.com>
Mohammed Aaqib Ansari <maaquib@gmail.com> Mohammed Aaqib Ansari <maaquib@gmail.com>
Mohini Anne Dsouza <mohini3917@gmail.com> Mohini Anne Dsouza <mohini3917@gmail.com>
Moorthy RS <rsmoorthy@gmail.com> Moorthy RS <rsmoorthy@gmail.com>
Morgan Bauer <mbauer@us.ibm.com> Morgan Bauer <mbauer@us.ibm.com>
Morten Hekkvang <morten.hekkvang@sbab.se>
Moysés Borges <moysesb@gmail.com> Moysés Borges <moysesb@gmail.com>
Mrunal Patel <mrunalp@gmail.com> Mrunal Patel <mrunalp@gmail.com>
muicoder <muicoder@gmail.com> muicoder <muicoder@gmail.com>
@@ -503,9 +533,11 @@ Nishant Totla <nishanttotla@gmail.com>
NIWA Hideyuki <niwa.niwa@nifty.ne.jp> NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
Noah Treuhaft <noah.treuhaft@docker.com> Noah Treuhaft <noah.treuhaft@docker.com>
O.S. Tezer <ostezer@gmail.com> O.S. Tezer <ostezer@gmail.com>
Odin Ugedal <odin@ugedal.com>
ohmystack <jun.jiang02@ele.me> ohmystack <jun.jiang02@ele.me>
Olle Jonsson <olle.jonsson@gmail.com> Olle Jonsson <olle.jonsson@gmail.com>
Olli Janatuinen <olli.janatuinen@gmail.com> Olli Janatuinen <olli.janatuinen@gmail.com>
Oscar Wieman <oscrx@icloud.com>
Otto Kekäläinen <otto@seravo.fi> Otto Kekäläinen <otto@seravo.fi>
Ovidio Mallo <ovidio.mallo@gmail.com> Ovidio Mallo <ovidio.mallo@gmail.com>
Pascal Borreli <pascal@borreli.com> Pascal Borreli <pascal@borreli.com>
@@ -515,6 +547,7 @@ Patrick Lang <plang@microsoft.com>
Paul <paul9869@gmail.com> Paul <paul9869@gmail.com>
Paul Kehrer <paul.l.kehrer@gmail.com> Paul Kehrer <paul.l.kehrer@gmail.com>
Paul Lietar <paul@lietar.net> Paul Lietar <paul@lietar.net>
Paul Mulders <justinkb@gmail.com>
Paul Weaver <pauweave@cisco.com> Paul Weaver <pauweave@cisco.com>
Pavel Pospisil <pospispa@gmail.com> Pavel Pospisil <pospispa@gmail.com>
Paweł Szczekutowicz <pszczekutowicz@gmail.com> Paweł Szczekutowicz <pszczekutowicz@gmail.com>
@@ -541,6 +574,7 @@ Qiang Huang <h.huangqiang@huawei.com>
Qinglan Peng <qinglanpeng@zju.edu.cn> Qinglan Peng <qinglanpeng@zju.edu.cn>
qudongfang <qudongfang@gmail.com> qudongfang <qudongfang@gmail.com>
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Rahul Zoldyck <rahulzoldyck@gmail.com>
Ravi Shekhar Jethani <rsjethani@gmail.com> Ravi Shekhar Jethani <rsjethani@gmail.com>
Ray Tsang <rayt@google.com> Ray Tsang <rayt@google.com>
Reficul <xuzhenglun@gmail.com> Reficul <xuzhenglun@gmail.com>
@@ -553,6 +587,7 @@ Richard Scothern <richard.scothern@gmail.com>
Rick Wieman <git@rickw.nl> Rick Wieman <git@rickw.nl>
Ritesh H Shukla <sritesh@vmware.com> Ritesh H Shukla <sritesh@vmware.com>
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com> Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
Rob Gulewich <rgulewich@netflix.com>
Robert Wallis <smilingrob@gmail.com> Robert Wallis <smilingrob@gmail.com>
Robin Naundorf <r.naundorf@fh-muenster.de> Robin Naundorf <r.naundorf@fh-muenster.de>
Robin Speekenbrink <robin@kingsquare.nl> Robin Speekenbrink <robin@kingsquare.nl>
@@ -574,10 +609,14 @@ Sainath Grandhi <sainath.grandhi@intel.com>
Sakeven Jiang <jc5930@sina.cn> Sakeven Jiang <jc5930@sina.cn>
Sally O'Malley <somalley@redhat.com> Sally O'Malley <somalley@redhat.com>
Sam Neirinck <sam@samneirinck.com> Sam Neirinck <sam@samneirinck.com>
Samarth Shah <samashah@microsoft.com>
Sambuddha Basu <sambuddhabasu1@gmail.com> Sambuddha Basu <sambuddhabasu1@gmail.com>
Sami Tabet <salph.tabet@gmail.com> Sami Tabet <salph.tabet@gmail.com>
Samuel Cochran <sj26@sj26.com>
Samuel Karp <skarp@amazon.com> Samuel Karp <skarp@amazon.com>
Santhosh Manohar <santhosh@docker.com> Santhosh Manohar <santhosh@docker.com>
Sargun Dhillon <sargun@netflix.com>
Saswat Bhattacharya <sas.saswat@gmail.com>
Scott Brenner <scott@scottbrenner.me> Scott Brenner <scott@scottbrenner.me>
Scott Collier <emailscottcollier@gmail.com> Scott Collier <emailscottcollier@gmail.com>
Sean Christopherson <sean.j.christopherson@intel.com> Sean Christopherson <sean.j.christopherson@intel.com>
@@ -598,6 +637,7 @@ sidharthamani <sid@rancher.com>
Silvin Lubecki <silvin.lubecki@docker.com> Silvin Lubecki <silvin.lubecki@docker.com>
Simei He <hesimei@zju.edu.cn> Simei He <hesimei@zju.edu.cn>
Simon Ferquel <simon.ferquel@docker.com> Simon Ferquel <simon.ferquel@docker.com>
Simon Heimberg <simon.heimberg@heimberg-ea.ch>
Sindhu S <sindhus@live.in> Sindhu S <sindhus@live.in>
Slava Semushin <semushin@redhat.com> Slava Semushin <semushin@redhat.com>
Solomon Hykes <solomon@docker.com> Solomon Hykes <solomon@docker.com>
@@ -627,7 +667,10 @@ TAGOMORI Satoshi <tagomoris@gmail.com>
taiji-tech <csuhqg@foxmail.com> taiji-tech <csuhqg@foxmail.com>
Taylor Jones <monitorjbl@gmail.com> Taylor Jones <monitorjbl@gmail.com>
Tejaswini Duggaraju <naduggar@microsoft.com> Tejaswini Duggaraju <naduggar@microsoft.com>
Tengfei Wang <tfwang@alauda.io>
Teppei Fukuda <knqyf263@gmail.com>
Thatcher Peskens <thatcher@docker.com> Thatcher Peskens <thatcher@docker.com>
Thibault Coupin <thibault.coupin@gmail.com>
Thomas Gazagnaire <thomas@gazagnaire.org> Thomas Gazagnaire <thomas@gazagnaire.org>
Thomas Krzero <thomas.kovatchitch@gmail.com> Thomas Krzero <thomas.kovatchitch@gmail.com>
Thomas Leonard <thomas.leonard@docker.com> Thomas Leonard <thomas.leonard@docker.com>
@@ -639,6 +682,7 @@ Tianyi Wang <capkurmagati@gmail.com>
Tibor Vass <teabee89@gmail.com> Tibor Vass <teabee89@gmail.com>
Tim Dettrick <t.dettrick@uq.edu.au> Tim Dettrick <t.dettrick@uq.edu.au>
Tim Hockin <thockin@google.com> Tim Hockin <thockin@google.com>
Tim Sampson <tim@sampson.fi>
Tim Smith <timbot@google.com> Tim Smith <timbot@google.com>
Tim Waugh <twaugh@redhat.com> Tim Waugh <twaugh@redhat.com>
Tim Wraight <tim.wraight@tangentlabs.co.uk> Tim Wraight <tim.wraight@tangentlabs.co.uk>
@@ -663,9 +707,11 @@ Tristan Carel <tristan@cogniteev.com>
Tycho Andersen <tycho@docker.com> Tycho Andersen <tycho@docker.com>
Tycho Andersen <tycho@tycho.ws> Tycho Andersen <tycho@tycho.ws>
uhayate <uhayate.gong@daocloud.io> uhayate <uhayate.gong@daocloud.io>
Ulrich Bareth <ulrich.bareth@gmail.com>
Ulysses Souza <ulysses.souza@docker.com> Ulysses Souza <ulysses.souza@docker.com>
Umesh Yadav <umesh4257@gmail.com> Umesh Yadav <umesh4257@gmail.com>
Valentin Lorentz <progval+git@progval.net> Valentin Lorentz <progval+git@progval.net>
Venkateswara Reddy Bukkasamudram <bukkasamudram@outlook.com>
Veres Lajos <vlajos@gmail.com> Veres Lajos <vlajos@gmail.com>
Victor Vieux <victor.vieux@docker.com> Victor Vieux <victor.vieux@docker.com>
Victoria Bialas <victoria.bialas@docker.com> Victoria Bialas <victoria.bialas@docker.com>
@@ -683,6 +729,7 @@ Wang Long <long.wanglong@huawei.com>
Wang Ping <present.wp@icloud.com> Wang Ping <present.wp@icloud.com>
Wang Xing <hzwangxing@corp.netease.com> Wang Xing <hzwangxing@corp.netease.com>
Wang Yuexiao <wang.yuexiao@zte.com.cn> Wang Yuexiao <wang.yuexiao@zte.com.cn>
Wang Yumu <37442693@qq.com>
Wataru Ishida <ishida.wataru@lab.ntt.co.jp> Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
Wayne Song <wsong@docker.com> Wayne Song <wsong@docker.com>
Wen Cheng Ma <wenchma@cn.ibm.com> Wen Cheng Ma <wenchma@cn.ibm.com>
@@ -691,6 +738,7 @@ Wes Morgan <cap10morgan@gmail.com>
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn> Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
William Henry <whenry@redhat.com> William Henry <whenry@redhat.com>
Xianglin Gao <xlgao@zju.edu.cn> Xianglin Gao <xlgao@zju.edu.cn>
Xiaodong Liu <liuxiaodong@loongson.cn>
Xiaodong Zhang <a4012017@sina.com> Xiaodong Zhang <a4012017@sina.com>
Xiaoxi He <xxhe@alauda.io> Xiaoxi He <xxhe@alauda.io>
Xinbo Weng <xihuanbo_0521@zju.edu.cn> Xinbo Weng <xihuanbo_0521@zju.edu.cn>

View File

@@ -6,6 +6,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"github.com/docker/cli/cli/config/configfile" "github.com/docker/cli/cli/config/configfile"
"github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/credentials"
@@ -23,10 +24,15 @@ const (
) )
var ( var (
configDir = os.Getenv("DOCKER_CONFIG") initConfigDir sync.Once
configDir string
) )
func init() { func setConfigDir() {
if configDir != "" {
return
}
configDir = os.Getenv("DOCKER_CONFIG")
if configDir == "" { if configDir == "" {
configDir = filepath.Join(homedir.Get(), configFileDir) configDir = filepath.Join(homedir.Get(), configFileDir)
} }
@@ -34,6 +40,7 @@ func init() {
// Dir returns the directory the configuration file is stored in // Dir returns the directory the configuration file is stored in
func Dir() string { func Dir() string {
initConfigDir.Do(setConfigDir)
return configDir return configDir
} }
@@ -88,11 +95,7 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
configFile := configfile.New(filename) configFile := configfile.New(filename)
// Try happy path first - latest config file // Try happy path first - latest config file
if _, err := os.Stat(filename); err == nil { if file, err := os.Open(filename); err == nil {
file, err := os.Open(filename)
if err != nil {
return configFile, errors.Wrap(err, filename)
}
defer file.Close() defer file.Close()
err = configFile.LoadFromReader(file) err = configFile.LoadFromReader(file)
if err != nil { if err != nil {
@@ -106,22 +109,16 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
} }
// Can't find latest config file so check for the old one // Can't find latest config file so check for the old one
homedir, err := os.UserHomeDir() home, err := os.UserHomeDir()
if err != nil { if err != nil {
return configFile, errors.Wrap(err, oldConfigfile) return configFile, errors.Wrap(err, oldConfigfile)
} }
confFile := filepath.Join(homedir, oldConfigfile) filename = filepath.Join(home, oldConfigfile)
if _, err := os.Stat(confFile); err != nil { if file, err := os.Open(filename); err == nil {
return configFile, nil // missing file is not an error defer file.Close()
} if err := configFile.LegacyLoadFromReader(file); err != nil {
file, err := os.Open(confFile) return configFile, errors.Wrap(err, filename)
if err != nil { }
return configFile, errors.Wrap(err, filename)
}
defer file.Close()
err = configFile.LegacyLoadFromReader(file)
if err != nil {
return configFile, errors.Wrap(err, filename)
} }
return configFile, nil return configFile, nil
} }

View File

@@ -13,6 +13,7 @@ import (
"github.com/docker/cli/cli/config/credentials" "github.com/docker/cli/cli/config/credentials"
"github.com/docker/cli/cli/config/types" "github.com/docker/cli/cli/config/types"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus"
) )
const ( const (
@@ -118,7 +119,7 @@ func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
// LoadFromReader reads the configuration data given and sets up the auth config // LoadFromReader reads the configuration data given and sets up the auth config
// information with given directory and populates the receiver object // information with given directory and populates the receiver object
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
if err := json.NewDecoder(configData).Decode(&configFile); err != nil { if err := json.NewDecoder(configData).Decode(&configFile); err != nil && !errors.Is(err, io.EOF) {
return err return err
} }
var err error var err error
@@ -168,6 +169,13 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
configFile.AuthConfigs = tmpAuthConfigs configFile.AuthConfigs = tmpAuthConfigs
defer func() { configFile.AuthConfigs = saveAuthConfigs }() defer func() { configFile.AuthConfigs = saveAuthConfigs }()
// User-Agent header is automatically set, and should not be stored in the configuration
for v := range configFile.HTTPHeaders {
if strings.EqualFold(v, "User-Agent") {
delete(configFile.HTTPHeaders, v)
}
}
data, err := json.MarshalIndent(configFile, "", "\t") data, err := json.MarshalIndent(configFile, "", "\t")
if err != nil { if err != nil {
return err return err
@@ -177,7 +185,7 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
} }
// Save encodes and writes out all the authorization information // Save encodes and writes out all the authorization information
func (configFile *ConfigFile) Save() error { func (configFile *ConfigFile) Save() (retErr error) {
if configFile.Filename == "" { if configFile.Filename == "" {
return errors.Errorf("Can't save config with empty filename") return errors.Errorf("Can't save config with empty filename")
} }
@@ -190,16 +198,33 @@ func (configFile *ConfigFile) Save() error {
if err != nil { if err != nil {
return err return err
} }
defer func() {
temp.Close()
if retErr != nil {
if err := os.Remove(temp.Name()); err != nil {
logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file")
}
}
}()
err = configFile.SaveToWriter(temp) err = configFile.SaveToWriter(temp)
temp.Close()
if err != nil { if err != nil {
os.Remove(temp.Name())
return err return err
} }
// Try copying the current config file (if any) ownership and permissions
copyFilePermissions(configFile.Filename, temp.Name())
return os.Rename(temp.Name(), configFile.Filename) if err := temp.Close(); err != nil {
return errors.Wrap(err, "error closing temp file")
}
// Handle situation where the configfile is a symlink
cfgFile := configFile.Filename
if f, err := os.Readlink(cfgFile); err == nil {
cfgFile = f
}
// Try copying the current config file (if any) ownership and permissions
copyFilePermissions(cfgFile, temp.Name())
return os.Rename(temp.Name(), cfgFile)
} }
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and // ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and

12
vendor/github.com/imdario/mergo/.deepsource.toml generated vendored Normal file
View File

@@ -0,0 +1,12 @@
version = 1
test_patterns = [
"*_test.go"
]
[[analyzers]]
name = "go"
enabled = true
[analyzers.meta]
import_path = "github.com/imdario/mergo"

View File

@@ -99,11 +99,11 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
continue continue
} }
if srcKind == dstKind { if srcKind == dstKind {
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return return
} }
} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { if _, err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
return return
} }
} else if srcKind == reflect.Map { } else if srcKind == reflect.Map {
@@ -157,7 +157,8 @@ func _map(dst, src interface{}, opts ...func(*Config)) error {
// To be friction-less, we redirect equal-type arguments // To be friction-less, we redirect equal-type arguments
// to deepMerge. Only because arguments can be anything. // to deepMerge. Only because arguments can be anything.
if vSrc.Kind() == vDst.Kind() { if vSrc.Kind() == vDst.Kind() {
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) _, err := deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
return err
} }
switch vSrc.Kind() { switch vSrc.Kind() {
case reflect.Struct: case reflect.Struct:

View File

@@ -11,20 +11,32 @@ package mergo
import ( import (
"fmt" "fmt"
"reflect" "reflect"
"unsafe"
) )
func hasExportedField(dst reflect.Value) (exported bool) { func hasExportedField(dst reflect.Value) (exported bool) {
for i, n := 0, dst.NumField(); i < n; i++ { for i, n := 0, dst.NumField(); i < n; i++ {
field := dst.Type().Field(i) field := dst.Type().Field(i)
if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { if isExportedComponent(&field) {
exported = exported || hasExportedField(dst.Field(i)) return true
} else {
exported = exported || len(field.PkgPath) == 0
} }
} }
return return
} }
func isExportedComponent(field *reflect.StructField) bool {
name := field.Name
pkgPath := field.PkgPath
if len(pkgPath) > 0 {
return false
}
c := name[0]
if 'a' <= c && c <= 'z' || c == '_' {
return false
}
return true
}
type Config struct { type Config struct {
Overwrite bool Overwrite bool
AppendSlice bool AppendSlice bool
@@ -41,16 +53,17 @@ type Transformers interface {
// Traverses recursively both values, assigning src's fields values to dst. // Traverses recursively both values, assigning src's fields values to dst.
// The map argument tracks comparisons that have already been seen, which allows // The map argument tracks comparisons that have already been seen, which allows
// short circuiting on recursive types. // short circuiting on recursive types.
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { func deepMerge(dstIn, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (dst reflect.Value, err error) {
dst = dstIn
overwrite := config.Overwrite overwrite := config.Overwrite
typeCheck := config.TypeCheck typeCheck := config.TypeCheck
overwriteWithEmptySrc := config.overwriteWithEmptyValue overwriteWithEmptySrc := config.overwriteWithEmptyValue
overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
config.overwriteWithEmptyValue = false
if !src.IsValid() { if !src.IsValid() {
return return
} }
if dst.CanAddr() { if dst.CanAddr() {
addr := dst.UnsafeAddr() addr := dst.UnsafeAddr()
h := 17 * addr h := 17 * addr
@@ -58,7 +71,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
typ := dst.Type() typ := dst.Type()
for p := seen; p != nil; p = p.next { for p := seen; p != nil; p = p.next {
if p.ptr == addr && p.typ == typ { if p.ptr == addr && p.typ == typ {
return nil return dst, nil
} }
} }
// Remember, remember... // Remember, remember...
@@ -72,114 +85,124 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
} }
} }
if dst.IsValid() && src.IsValid() && src.Type() != dst.Type() {
err = fmt.Errorf("cannot append two different types (%s, %s)", src.Kind(), dst.Kind())
return
}
switch dst.Kind() { switch dst.Kind() {
case reflect.Struct: case reflect.Struct:
if hasExportedField(dst) { if hasExportedField(dst) {
dstCp := reflect.New(dst.Type()).Elem()
for i, n := 0, dst.NumField(); i < n; i++ { for i, n := 0, dst.NumField(); i < n; i++ {
if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { dstField := dst.Field(i)
structField := dst.Type().Field(i)
// copy un-exported struct fields
if !isExportedComponent(&structField) {
rf := dstCp.Field(i)
rf = reflect.NewAt(rf.Type(), unsafe.Pointer(rf.UnsafeAddr())).Elem() //nolint:gosec
dstRF := dst.Field(i)
if !dst.Field(i).CanAddr() {
continue
}
dstRF = reflect.NewAt(dstRF.Type(), unsafe.Pointer(dstRF.UnsafeAddr())).Elem() //nolint:gosec
rf.Set(dstRF)
continue
}
dstField, err = deepMerge(dstField, src.Field(i), visited, depth+1, config)
if err != nil {
return return
} }
dstCp.Field(i).Set(dstField)
} }
if dst.CanSet() {
dst.Set(dstCp)
} else {
dst = dstCp
}
return
} else { } else {
if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { if (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
dst.Set(src) dst = src
} }
} }
case reflect.Map: case reflect.Map:
if dst.IsNil() && !src.IsNil() { if dst.IsNil() && !src.IsNil() {
dst.Set(reflect.MakeMap(dst.Type())) if dst.CanSet() {
dst.Set(reflect.MakeMap(dst.Type()))
} else {
dst = src
return
}
} }
for _, key := range src.MapKeys() { for _, key := range src.MapKeys() {
srcElement := src.MapIndex(key) srcElement := src.MapIndex(key)
dstElement := dst.MapIndex(key)
if !srcElement.IsValid() { if !srcElement.IsValid() {
continue continue
} }
dstElement := dst.MapIndex(key) if dst.MapIndex(key).IsValid() {
switch srcElement.Kind() { k := dstElement.Interface()
case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: dstElement = reflect.ValueOf(k)
if srcElement.IsNil() {
continue
}
fallthrough
default:
if !srcElement.CanInterface() {
continue
}
switch reflect.TypeOf(srcElement.Interface()).Kind() {
case reflect.Struct:
fallthrough
case reflect.Ptr:
fallthrough
case reflect.Map:
srcMapElm := srcElement
dstMapElm := dstElement
if srcMapElm.CanInterface() {
srcMapElm = reflect.ValueOf(srcMapElm.Interface())
if dstMapElm.IsValid() {
dstMapElm = reflect.ValueOf(dstMapElm.Interface())
}
}
if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
return
}
case reflect.Slice:
srcSlice := reflect.ValueOf(srcElement.Interface())
var dstSlice reflect.Value
if !dstElement.IsValid() || dstElement.IsNil() {
dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
} else {
dstSlice = reflect.ValueOf(dstElement.Interface())
}
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
if typeCheck && srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = srcSlice
} else if config.AppendSlice {
if srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
}
dst.SetMapIndex(key, dstSlice)
}
} }
if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { if isReflectNil(srcElement) {
if overwrite || isReflectNil(dstElement) {
dst.SetMapIndex(key, srcElement)
}
continue
}
if !srcElement.CanInterface() {
continue continue
} }
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { if srcElement.CanInterface() {
if dst.IsNil() { srcElement = reflect.ValueOf(srcElement.Interface())
dst.Set(reflect.MakeMap(dst.Type())) if dstElement.IsValid() {
dstElement = reflect.ValueOf(dstElement.Interface())
} }
dst.SetMapIndex(key, srcElement)
} }
dstElement, err = deepMerge(dstElement, srcElement, visited, depth+1, config)
if err != nil {
return
}
dst.SetMapIndex(key, dstElement)
} }
case reflect.Slice: case reflect.Slice:
if !dst.CanSet() { newSlice := dst
break
}
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
dst.Set(src) if typeCheck && src.Type() != dst.Type() {
} else if config.AppendSlice { return dst, fmt.Errorf("cannot override two slices with different type (%s, %s)", src.Type(), dst.Type())
if src.Type() != dst.Type() {
return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
} }
dst.Set(reflect.AppendSlice(dst, src)) newSlice = src
} else if config.AppendSlice {
if typeCheck && src.Type() != dst.Type() {
err = fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
return
}
newSlice = reflect.AppendSlice(dst, src)
} }
case reflect.Ptr: if dst.CanSet() {
fallthrough dst.Set(newSlice)
case reflect.Interface: } else {
if src.IsNil() { dst = newSlice
}
case reflect.Ptr, reflect.Interface:
if isReflectNil(src) {
break break
} }
if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) { if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) {
if dst.IsNil() || overwrite { if dst.IsNil() || overwrite {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) { if overwrite || isEmptyValue(dst) {
dst.Set(src) if dst.CanSet() {
dst.Set(src)
} else {
dst = src
}
} }
} }
break break
@@ -191,28 +214,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dst.Set(src) dst.Set(src)
} }
} else if src.Kind() == reflect.Ptr { } else if src.Kind() == reflect.Ptr {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { if dst, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return return
} }
dst = dst.Addr()
} else if dst.Elem().Type() == src.Type() { } else if dst.Elem().Type() == src.Type() {
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { if dst, err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
return return
} }
} else { } else {
return ErrDifferentArgumentsTypes return dst, ErrDifferentArgumentsTypes
} }
break break
} }
if dst.IsNil() || overwrite { if dst.IsNil() || overwrite {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) { if (overwrite || isEmptyValue(dst)) && (overwriteWithEmptySrc || !isEmptyValue(src)) {
dst.Set(src) if dst.CanSet() {
dst.Set(src)
} else {
dst = src
}
} }
} else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { } else if _, err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return return
} }
default: default:
if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { overwriteFull := (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst))
dst.Set(src) if overwriteFull {
if dst.CanSet() {
dst.Set(src)
} else {
dst = src
}
} }
} }
@@ -246,7 +279,12 @@ func WithOverride(config *Config) {
config.Overwrite = true config.Overwrite = true
} }
// WithOverride will make merge override empty dst slice with empty src slice. // WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
func WithOverwriteWithEmptyValue(config *Config) {
config.overwriteWithEmptyValue = true
}
// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
func WithOverrideEmptySlice(config *Config) { func WithOverrideEmptySlice(config *Config) {
config.overwriteSliceWithEmptyValue = true config.overwriteSliceWithEmptyValue = true
} }
@@ -276,8 +314,25 @@ func merge(dst, src interface{}, opts ...func(*Config)) error {
if vDst, vSrc, err = resolveValues(dst, src); err != nil { if vDst, vSrc, err = resolveValues(dst, src); err != nil {
return err return err
} }
if !vDst.CanSet() {
return fmt.Errorf("cannot set dst, needs reference")
}
if vDst.Type() != vSrc.Type() { if vDst.Type() != vSrc.Type() {
return ErrDifferentArgumentsTypes return ErrDifferentArgumentsTypes
} }
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) _, err = deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
return err
}
// IsReflectNil is the reflect value provided nil
func isReflectNil(v reflect.Value) bool {
k := v.Kind()
switch k {
case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
// Both interface and slice are nil if first word is 0.
// Both are always bigger than a word; assume flagIndir.
return v.IsNil()
default:
return false
}
} }

View File

@@ -138,6 +138,9 @@ func Compare(v, w string) int {
// Max canonicalizes its arguments and then returns the version string // Max canonicalizes its arguments and then returns the version string
// that compares greater. // that compares greater.
//
// Deprecated: use Compare instead. In most cases, returning a canonicalized
// version is not expected or desired.
func Max(v, w string) string { func Max(v, w string) string {
v = Canonical(v) v = Canonical(v)
w = Canonical(w) w = Canonical(w)

132
vendor/golang.org/x/mod/sumdb/dirhash/hash.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package dirhash defines hashes over directory trees.
// These hashes are recorded in go.sum files and in the Go checksum database,
// to allow verifying that a newly-downloaded module has the expected content.
package dirhash
import (
"archive/zip"
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strings"
)
// DefaultHash is the default hash function used in new go.sum entries.
var DefaultHash Hash = Hash1
// A Hash is a directory hash function.
// It accepts a list of files along with a function that opens the content of each file.
// It opens, reads, hashes, and closes each file and returns the overall directory hash.
type Hash func(files []string, open func(string) (io.ReadCloser, error)) (string, error)
// Hash1 is the "h1:" directory hash function, using SHA-256.
//
// Hash1 is "h1:" followed by the base64-encoded SHA-256 hash of a summary
// prepared as if by the Unix command:
//
// find . -type f | sort | sha256sum
//
// More precisely, the hashed summary contains a single line for each file in the list,
// ordered by sort.Strings applied to the file names, where each line consists of
// the hexadecimal SHA-256 hash of the file content,
// two spaces (U+0020), the file name, and a newline (U+000A).
//
// File names with newlines (U+000A) are disallowed.
func Hash1(files []string, open func(string) (io.ReadCloser, error)) (string, error) {
h := sha256.New()
files = append([]string(nil), files...)
sort.Strings(files)
for _, file := range files {
if strings.Contains(file, "\n") {
return "", errors.New("dirhash: filenames with newlines are not supported")
}
r, err := open(file)
if err != nil {
return "", err
}
hf := sha256.New()
_, err = io.Copy(hf, r)
r.Close()
if err != nil {
return "", err
}
fmt.Fprintf(h, "%x %s\n", hf.Sum(nil), file)
}
return "h1:" + base64.StdEncoding.EncodeToString(h.Sum(nil)), nil
}
// HashDir returns the hash of the local file system directory dir,
// replacing the directory name itself with prefix in the file names
// used in the hash function.
func HashDir(dir, prefix string, hash Hash) (string, error) {
files, err := DirFiles(dir, prefix)
if err != nil {
return "", err
}
osOpen := func(name string) (io.ReadCloser, error) {
return os.Open(filepath.Join(dir, strings.TrimPrefix(name, prefix)))
}
return hash(files, osOpen)
}
// DirFiles returns the list of files in the tree rooted at dir,
// replacing the directory name dir with prefix in each name.
// The resulting names always use forward slashes.
func DirFiles(dir, prefix string) ([]string, error) {
var files []string
dir = filepath.Clean(dir)
err := filepath.Walk(dir, func(file string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
rel := file
if dir != "." {
rel = file[len(dir)+1:]
}
f := filepath.Join(prefix, rel)
files = append(files, filepath.ToSlash(f))
return nil
})
if err != nil {
return nil, err
}
return files, nil
}
// HashZip returns the hash of the file content in the named zip file.
// Only the file names and their contents are included in the hash:
// the exact zip file format encoding, compression method,
// per-file modification times, and other metadata are ignored.
func HashZip(zipfile string, hash Hash) (string, error) {
z, err := zip.OpenReader(zipfile)
if err != nil {
return "", err
}
defer z.Close()
var files []string
zfiles := make(map[string]*zip.File)
for _, file := range z.File {
files = append(files, file.Name)
zfiles[file.Name] = file
}
zipOpen := func(name string) (io.ReadCloser, error) {
f := zfiles[name]
if f == nil {
return nil, fmt.Errorf("file %q not found in zip", name) // should never happen
}
return f.Open()
}
return hash(files, zipOpen)
}

11
vendor/modules.txt vendored
View File

@@ -123,7 +123,7 @@ github.com/crillab/gophersat/solver
github.com/cyphar/filepath-securejoin github.com/cyphar/filepath-securejoin
# github.com/davecgh/go-spew v1.1.1 # github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew github.com/davecgh/go-spew/spew
# github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24 # github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
## explicit ## explicit
github.com/docker/cli/cli/config github.com/docker/cli/cli/config
github.com/docker/cli/cli/config/configfile github.com/docker/cli/cli/config/configfile
@@ -191,6 +191,7 @@ github.com/docker/go/canonical/json
github.com/docker/go-connections/nat github.com/docker/go-connections/nat
github.com/docker/go-connections/tlsconfig github.com/docker/go-connections/tlsconfig
# github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c # github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c
## explicit
github.com/docker/go-events github.com/docker/go-events
# github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916 # github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916
github.com/docker/go-metrics github.com/docker/go-metrics
@@ -315,7 +316,7 @@ github.com/hashicorp/hcl/json/token
github.com/heroku/docker-registry-client/registry github.com/heroku/docker-registry-client/registry
# github.com/huandu/xstrings v1.3.1 # github.com/huandu/xstrings v1.3.1
github.com/huandu/xstrings github.com/huandu/xstrings
# github.com/imdario/mergo v0.3.8 # github.com/imdario/mergo v0.3.9
## explicit ## explicit
github.com/imdario/mergo github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.0 # github.com/inconshreveable/mousetrap v1.0.0
@@ -689,8 +690,10 @@ golang.org/x/crypto/ssh/terminal
# golang.org/x/lint v0.0.0-20200302205851-738671d3881b # golang.org/x/lint v0.0.0-20200302205851-738671d3881b
golang.org/x/lint golang.org/x/lint
golang.org/x/lint/golint golang.org/x/lint/golint
# golang.org/x/mod v0.3.0 # golang.org/x/mod v0.4.2
## explicit
golang.org/x/mod/semver golang.org/x/mod/semver
golang.org/x/mod/sumdb/dirhash
# golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 # golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0
golang.org/x/net/context golang.org/x/net/context
golang.org/x/net/context/ctxhttp golang.org/x/net/context/ctxhttp
@@ -858,6 +861,8 @@ gopkg.in/tomb.v1
# gopkg.in/yaml.v2 v2.3.0 # gopkg.in/yaml.v2 v2.3.0
## explicit ## explicit
gopkg.in/yaml.v2 gopkg.in/yaml.v2
# gotest.tools/v3 v3.0.2
## explicit
# helm.sh/helm/v3 v3.3.4 # helm.sh/helm/v3 v3.3.4
## explicit ## explicit
helm.sh/helm/v3/internal/ignore helm.sh/helm/v3/internal/ignore