mirror of
https://github.com/mudler/luet.git
synced 2025-09-03 00:06:36 +00:00
Compare commits
28 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
7128c88da6 | ||
|
74402fae81 | ||
|
9d1594c036 | ||
|
75906c4198 | ||
|
cb032dc714 | ||
|
2c7e495fa1 | ||
|
db8bf2b85e | ||
|
5eb586ddb0 | ||
|
f9747cdf87 | ||
|
becac7d853 | ||
|
5aa5bffb48 | ||
|
9aa3159787 | ||
|
9cb6e65bb6 | ||
|
92b243d7aa | ||
|
29ec19a8a1 | ||
|
440e07c418 | ||
|
acf74f5896 | ||
|
1ee1894ffa | ||
|
c8573f9535 | ||
|
76b70ebeb4 | ||
|
2efb17a06c | ||
|
64ab3711ca | ||
|
eb5d7ba35b | ||
|
b6b91cfd7a | ||
|
4d8a9a544b | ||
|
654b5b48cd | ||
|
92e18d5782 | ||
|
8780e4f16f |
49
.chglog/CHANGELOG.tpl.md
Normal file
49
.chglog/CHANGELOG.tpl.md
Normal file
@@ -0,0 +1,49 @@
|
||||
{{ if .Versions -}}
|
||||
{{ if .Unreleased.CommitGroups -}}
|
||||
{{ range .Unreleased.CommitGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ else }}
|
||||
{{ range .Unreleased.Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{ range .Versions }}
|
||||
<a name="{{ .Tag.Name }}"></a>
|
||||
|
||||
{{ if .CommitGroups -}}
|
||||
{{ range .CommitGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ else }}
|
||||
{{ range .Commits -}}
|
||||
- {{ if .Scope }}**{{ .Scope }}:** {{ end }}{{ .Subject }} (https://github.com/mudler/luet/commit/{{.Hash.Short}})
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .NoteGroups -}}
|
||||
{{ range .NoteGroups -}}
|
||||
### {{ .Title }}
|
||||
{{ range .Notes }}
|
||||
{{ .Body }}
|
||||
{{ end }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
{{- if .Versions }}
|
||||
[Unreleased]: {{ .Info.RepositoryURL }}/compare/{{ $latest := index .Versions 0 }}{{ $latest.Tag.Name }}...HEAD
|
||||
{{ range .Versions -}}
|
||||
{{ if .Tag.Previous -}}
|
||||
[{{ .Tag.Name }}]: {{ $.Info.RepositoryURL }}/compare/{{ .Tag.Previous.Name }}...{{ .Tag.Name }}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
27
.chglog/config.yml
Executable file
27
.chglog/config.yml
Executable file
@@ -0,0 +1,27 @@
|
||||
style: github
|
||||
template: CHANGELOG.tpl.md
|
||||
info:
|
||||
title: CHANGELOG
|
||||
repository_url: https://github.com/mudler/luet
|
||||
options:
|
||||
commits:
|
||||
# filters:
|
||||
# Type:
|
||||
# - feat
|
||||
# - fix
|
||||
# - perf
|
||||
# - refactor
|
||||
commit_groups:
|
||||
title_maps:
|
||||
feat: Features
|
||||
fix: Bug Fixes
|
||||
perf: Performance Improvements
|
||||
refactor: Code Refactoring
|
||||
ci: Continous Integration
|
||||
header:
|
||||
pattern: "(.*)"
|
||||
pattern_maps:
|
||||
- Subject
|
||||
notes:
|
||||
keywords:
|
||||
- BREAKING CHANGE
|
72
.github/workflows/release.yml
vendored
72
.github/workflows/release.yml
vendored
@@ -1,8 +1,46 @@
|
||||
on: push
|
||||
concurrency:
|
||||
group: ci-${{ github.head_ref || github.ref }}-${{ github.repository }}
|
||||
cancel-in-progress: true
|
||||
name: Build and release on push
|
||||
jobs:
|
||||
release:
|
||||
name: Test and Release
|
||||
tests-integration:
|
||||
name: Integration tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Login to quay
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Login to quay with img
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Tests with Img backend
|
||||
run: |
|
||||
sudo -E env "PATH=$PATH" \
|
||||
env "LUET_BACKEND=img" \
|
||||
make test-integration
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo -E \
|
||||
env "PATH=$PATH" \
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make test-integration
|
||||
|
||||
tests-unit:
|
||||
name: Unit tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
@@ -22,13 +60,6 @@ jobs:
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build test
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
- name: Login to quay with img
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo img login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Tests with Img backend
|
||||
run: |
|
||||
sudo -E env "PATH=$PATH" \
|
||||
env "LUET_BACKEND=img" \
|
||||
make test-integration
|
||||
- name: Tests
|
||||
run: |
|
||||
sudo -E \
|
||||
@@ -36,7 +67,28 @@ jobs:
|
||||
env "TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE=${{ secrets.DOCKER_TESTING_IMAGE }}" \
|
||||
env "UNIT_TEST_DOCKER_IMAGE_REPOSITORY=${{ secrets.DOCKER_TESTING_UNIT_TEST_IMAGE }}" \
|
||||
make test-integration test-coverage
|
||||
make test-coverage
|
||||
|
||||
release:
|
||||
name: Test and Release
|
||||
needs: ["tests-integration","tests-unit"]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Login to quay
|
||||
run: echo ${{ secrets.DOCKER_TESTING_PASSWORD }} | sudo -E docker login -u ${{ secrets.DOCKER_TESTING_USERNAME }} --password-stdin quay.io
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small && sudo chmod -R 777 release/
|
||||
- name: Release
|
||||
|
30
.github/workflows/test.yml
vendored
30
.github/workflows/test.yml
vendored
@@ -2,7 +2,31 @@
|
||||
on: pull_request
|
||||
name: Build and Test
|
||||
jobs:
|
||||
test:
|
||||
tests-integration:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x]
|
||||
platform: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: setup-docker
|
||||
uses: docker-practice/actions-setup-docker@0.0.1
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get install -y upx && sudo -E env "PATH=$PATH" make deps
|
||||
sudo curl -fSL "https://github.com/genuinetools/img/releases/download/v0.5.11/img-linux-amd64" -o "/usr/bin/img"
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Tests with Img backend
|
||||
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make test-integration
|
||||
tests-unit:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.14.x]
|
||||
@@ -24,7 +48,5 @@ jobs:
|
||||
sudo chmod a+x "/usr/bin/img"
|
||||
- name: Build
|
||||
run: sudo -E env "PATH=$PATH" make multiarch-build-small
|
||||
- name: Tests with Img backend
|
||||
run: sudo -E env "PATH=$PATH" env "LUET_BACKEND=img" make test-integration
|
||||
- name: Tests
|
||||
run: sudo -E env "PATH=$PATH" make test-integration test-coverage
|
||||
run: sudo -E env "PATH=$PATH" make test-coverage
|
||||
|
19
cmd/build.go
19
cmd/build.go
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
"github.com/mudler/luet/pkg/compiler"
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
compilerspec "github.com/mudler/luet/pkg/compiler/types/spec"
|
||||
@@ -83,10 +84,8 @@ Build packages specifying multiple definition trees:
|
||||
viper.BindPFlag("wait", cmd.Flags().Lookup("wait"))
|
||||
viper.BindPFlag("keep-images", cmd.Flags().Lookup("keep-images"))
|
||||
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
util.BindSolverFlags(cmd)
|
||||
|
||||
LuetCfg.Viper.BindPFlag("general.show_build_output", cmd.Flags().Lookup("live-output"))
|
||||
LuetCfg.Viper.BindPFlag("backend-args", cmd.Flags().Lookup("backend-args"))
|
||||
|
||||
@@ -148,21 +147,11 @@ Build packages specifying multiple definition trees:
|
||||
|
||||
Info("Building in", dst)
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
opts := util.SetSolverConfig()
|
||||
pullRepo, _ := cmd.Flags().GetStringArray("pull-repository")
|
||||
|
||||
LuetCfg.GetGeneral().ShowBuildOutput = LuetCfg.Viper.GetBool("general.show_build_output")
|
||||
|
||||
opts := &LuetSolverOptions{
|
||||
Type: stype,
|
||||
LearnRate: float32(rate),
|
||||
Discount: float32(discount),
|
||||
MaxAttempts: attempts,
|
||||
}
|
||||
|
||||
Debug("Solver", opts.CompactString())
|
||||
|
||||
if concurrent {
|
||||
|
@@ -20,8 +20,8 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
config "github.com/mudler/luet/pkg/config"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
@@ -33,19 +33,11 @@ var cleanupCmd = &cobra.Command{
|
||||
Short: "Clean packages cache.",
|
||||
Long: `remove downloaded packages tarballs and clean cache directory`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
|
||||
util.BindSystemFlags(cmd)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var cleaned int = 0
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := config.LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := config.LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
// Check if cache dir exists
|
||||
if fileHelper.Exists(LuetCfg.GetSystem().GetSystemPkgsCacheDirPath()) {
|
||||
|
||||
|
@@ -18,6 +18,7 @@ package cmd_database
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -45,21 +46,11 @@ The yaml must contain the package definition, and the file list at least.
|
||||
For reference, inspect a "metadata.yaml" file generated while running "luet build"`,
|
||||
Args: cobra.OnlyValidArgs,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
|
||||
util.BindSystemFlags(cmd)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
util.SetSystemConfig()
|
||||
systemDB := LuetCfg.GetSystemDB()
|
||||
|
||||
for _, a := range args {
|
||||
@@ -74,6 +65,12 @@ For reference, inspect a "metadata.yaml" file generated while running "luet buil
|
||||
|
||||
files := art.Files
|
||||
|
||||
// Check if the package is already present
|
||||
if p, err := systemDB.FindPackage(art.CompileSpec.GetPackage()); err == nil && p.GetName() != "" {
|
||||
Fatal("Package", art.CompileSpec.GetPackage().HumanReadableString(),
|
||||
" already present.")
|
||||
}
|
||||
|
||||
if _, err := systemDB.CreatePackage(art.CompileSpec.GetPackage()); err != nil {
|
||||
Fatal("Failed to create ", a, ": ", err.Error())
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
@@ -38,20 +39,11 @@ To return also files:
|
||||
$ luet database get --files system/foo`,
|
||||
Args: cobra.OnlyValidArgs,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
|
||||
util.BindSystemFlags(cmd)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
showFiles, _ := cmd.Flags().GetBool("files")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
|
||||
systemDB := LuetCfg.GetSystemDB()
|
||||
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -36,19 +37,10 @@ This commands takes multiple packages as arguments and prunes their entries from
|
||||
`,
|
||||
Args: cobra.OnlyValidArgs,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
|
||||
util.BindSystemFlags(cmd)
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
|
||||
systemDB := LuetCfg.GetSystemDB()
|
||||
|
||||
|
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -47,13 +48,8 @@ To force install a package:
|
||||
`,
|
||||
Aliases: []string{"i"},
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
|
||||
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
@@ -70,10 +66,6 @@ To force install a package:
|
||||
toInstall = append(toInstall, pack)
|
||||
}
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
nodeps := LuetCfg.Viper.GetBool("nodeps")
|
||||
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
|
||||
@@ -81,18 +73,8 @@ To force install a package:
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
util.SetSystemConfig()
|
||||
util.SetSolverConfig()
|
||||
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
|
@@ -15,6 +15,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
@@ -27,9 +28,7 @@ var reclaimCmd = &cobra.Command{
|
||||
Use: "reclaim",
|
||||
Short: "Reclaim packages to Luet database from available repositories",
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
util.BindSystemFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
},
|
||||
Long: `Reclaim tries to find association between packages in the online repositories and the system one.
|
||||
@@ -39,13 +38,7 @@ var reclaimCmd = &cobra.Command{
|
||||
It scans the target file system, and if finds a match with a package available in the repositories, it marks as installed in the system database.
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
|
||||
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
|
||||
repos := installer.Repositories{}
|
||||
|
128
cmd/reinstall.go
Normal file
128
cmd/reinstall.go
Normal file
@@ -0,0 +1,128 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
package cmd
|
||||
|
||||
import (
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var reinstallCmd = &cobra.Command{
|
||||
Use: "reinstall <pkg1> <pkg2> <pkg3>",
|
||||
Short: "reinstall a set of packages",
|
||||
Long: `Reinstall a group of packages in the system:
|
||||
|
||||
$ luet reinstall -y system/busybox shells/bash system/coreutils ...
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
LuetCfg.Viper.BindPFlag("for", cmd.Flags().Lookup("for"))
|
||||
|
||||
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var toUninstall pkg.Packages
|
||||
var toAdd pkg.Packages
|
||||
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
util.SetSystemConfig()
|
||||
|
||||
for _, a := range args {
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
if err != nil {
|
||||
Fatal("Invalid package string ", a, ": ", err.Error())
|
||||
}
|
||||
toUninstall = append(toUninstall, pack)
|
||||
toAdd = append(toAdd, pack)
|
||||
}
|
||||
|
||||
// This shouldn't be necessary, but we need to unmarshal the repositories to a concrete struct, thus we need to port them back to the Repositories type
|
||||
repos := installer.Repositories{}
|
||||
for _, repo := range LuetCfg.SystemRepositories {
|
||||
if !repo.Enable {
|
||||
continue
|
||||
}
|
||||
r := installer.NewSystemRepository(repo)
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
util.SetSolverConfig()
|
||||
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
} else {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.SingleCoreSimple
|
||||
}
|
||||
|
||||
Debug("Solver", LuetCfg.GetSolverOptions().CompactString())
|
||||
|
||||
// Load config protect configs
|
||||
installer.LoadConfigProtectConfs(LuetCfg)
|
||||
|
||||
inst := installer.NewLuetInstaller(installer.LuetInstallerOptions{
|
||||
Concurrency: LuetCfg.GetGeneral().Concurrency,
|
||||
SolverOptions: *LuetCfg.GetSolverOptions(),
|
||||
NoDeps: true,
|
||||
Force: force,
|
||||
OnlyDeps: onlydeps,
|
||||
PreserveSystemEssentialData: true,
|
||||
Ask: !yes,
|
||||
DownloadOnly: downloadOnly,
|
||||
})
|
||||
inst.Repositories(repos)
|
||||
|
||||
system := &installer.System{Database: LuetCfg.GetSystemDB(), Target: LuetCfg.GetSystem().Rootfs}
|
||||
err := inst.Swap(toUninstall, toAdd, system)
|
||||
if err != nil {
|
||||
Fatal("Error: " + err.Error())
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
reinstallCmd.Flags().String("system-dbpath", "", "System db path")
|
||||
reinstallCmd.Flags().String("system-target", "", "System rootpath")
|
||||
reinstallCmd.Flags().String("system-engine", "", "System DB engine")
|
||||
|
||||
reinstallCmd.Flags().String("solver-type", "", "Solver strategy ( Defaults none, available: "+AvailableResolvers+" )")
|
||||
reinstallCmd.Flags().Float32("solver-rate", 0.7, "Solver learning rate")
|
||||
reinstallCmd.Flags().Float32("solver-discount", 1.0, "Solver discount rate")
|
||||
reinstallCmd.Flags().Int("solver-attempts", 9000, "Solver maximum attempts")
|
||||
reinstallCmd.Flags().Bool("onlydeps", false, "Consider **only** package dependencies")
|
||||
reinstallCmd.Flags().Bool("force", false, "Skip errors and keep going (potentially harmful)")
|
||||
reinstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
reinstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
reinstallCmd.Flags().Bool("download-only", false, "Download only")
|
||||
|
||||
RootCmd.AddCommand(reinstallCmd)
|
||||
}
|
@@ -19,6 +19,7 @@ import (
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
@@ -35,13 +36,8 @@ var replaceCmd = &cobra.Command{
|
||||
$ luet replace -y system/busybox ... --for shells/bash --for system/coreutils ...
|
||||
`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("onlydeps", cmd.Flags().Lookup("onlydeps"))
|
||||
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
@@ -54,24 +50,15 @@ var replaceCmd = &cobra.Command{
|
||||
var toAdd pkg.Packages
|
||||
|
||||
f := LuetCfg.Viper.GetStringSlice("for")
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
nodeps := LuetCfg.Viper.GetBool("nodeps")
|
||||
onlydeps := LuetCfg.Viper.GetBool("onlydeps")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
|
||||
util.SetSystemConfig()
|
||||
util.SetSolverConfig()
|
||||
for _, a := range args {
|
||||
pack, err := helpers.ParsePackageStr(a)
|
||||
if err != nil {
|
||||
@@ -98,11 +85,6 @@ var replaceCmd = &cobra.Command{
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
} else {
|
||||
|
71
cmd/root.go
71
cmd/root.go
@@ -41,8 +41,14 @@ var Verbose bool
|
||||
var LockedCommands = []string{"install", "uninstall", "upgrade"}
|
||||
|
||||
const (
|
||||
LuetCLIVersion = "0.16.6"
|
||||
LuetCLIVersion = "0.17.3"
|
||||
LuetEnvPrefix = "LUET"
|
||||
license = `
|
||||
Luet Copyright (C) 2019-2021 Ettore Di Giacinto
|
||||
This program comes with ABSOLUTELY NO WARRANTY.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions.
|
||||
`
|
||||
)
|
||||
|
||||
// Build time and commit information.
|
||||
@@ -53,6 +59,47 @@ var (
|
||||
BuildCommit string
|
||||
)
|
||||
|
||||
func version() string {
|
||||
return fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime)
|
||||
}
|
||||
|
||||
var noBannerCommands = []string{"search", "exec", "tree", "database", "box", "cleanup"}
|
||||
|
||||
func displayVersionBanner() {
|
||||
display := true
|
||||
if len(os.Args) > 1 {
|
||||
for _, c := range noBannerCommands {
|
||||
if os.Args[1] == c {
|
||||
display = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if display {
|
||||
Info("Luet version", version())
|
||||
Info(license)
|
||||
}
|
||||
}
|
||||
|
||||
func handleLock() {
|
||||
if os.Getenv("LUET_NOLOCK") != "true" {
|
||||
if len(os.Args) > 1 {
|
||||
for _, lockedCmd := range LockedCommands {
|
||||
if os.Args[1] == lockedCmd {
|
||||
s := single.New("luet")
|
||||
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
|
||||
Fatal("another instance of the app is already running, exiting")
|
||||
} else if err != nil {
|
||||
// Another error occurred, might be worth handling it as well
|
||||
Fatal("failed to acquire exclusive app lock:", err.Error())
|
||||
}
|
||||
defer s.TryUnlock()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RootCmd represents the base command when called without any subcommands
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "luet",
|
||||
@@ -80,8 +127,9 @@ To build a package, from a tree definition:
|
||||
$ luet build --tree tree/path package
|
||||
|
||||
`,
|
||||
Version: fmt.Sprintf("%s-g%s %s", LuetCLIVersion, BuildCommit, BuildTime),
|
||||
Version: version(),
|
||||
PersistentPreRun: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
err := LoadConfig(config.LuetCfg)
|
||||
if err != nil {
|
||||
Fatal("failed to load configuration:", err.Error())
|
||||
@@ -155,23 +203,8 @@ func LoadConfig(c *config.LuetConfig) error {
|
||||
// This is called by main.main(). It only needs to happen once to the rootCmd.
|
||||
func Execute() {
|
||||
|
||||
if os.Getenv("LUET_NOLOCK") != "true" {
|
||||
if len(os.Args) > 1 {
|
||||
for _, lockedCmd := range LockedCommands {
|
||||
if os.Args[1] == lockedCmd {
|
||||
s := single.New("luet")
|
||||
if err := s.CheckLock(); err != nil && err == single.ErrAlreadyRunning {
|
||||
Fatal("another instance of the app is already running, exiting")
|
||||
} else if err != nil {
|
||||
// Another error occurred, might be worth handling it as well
|
||||
Fatal("failed to acquire exclusive app lock:", err.Error())
|
||||
}
|
||||
defer s.TryUnlock()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
handleLock()
|
||||
displayVersionBanner()
|
||||
|
||||
if err := RootCmd.Execute(); err != nil {
|
||||
fmt.Println(err)
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/jedib0t/go-pretty/table"
|
||||
"github.com/jedib0t/go-pretty/v6/list"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -306,14 +307,9 @@ Search can also return results in the terminal in different ways: as terminal ou
|
||||
`,
|
||||
Aliases: []string{"s"},
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("installed", cmd.Flags().Lookup("installed"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var results Results
|
||||
@@ -325,32 +321,20 @@ Search can also return results in the terminal in different ways: as terminal ou
|
||||
hidden, _ := cmd.Flags().GetBool("hidden")
|
||||
|
||||
installed := LuetCfg.Viper.GetBool("installed")
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
searchWithLabel, _ := cmd.Flags().GetBool("by-label")
|
||||
searchWithLabelMatch, _ := cmd.Flags().GetBool("by-label-regex")
|
||||
revdeps, _ := cmd.Flags().GetBool("revdeps")
|
||||
tableMode, _ := cmd.Flags().GetBool("table")
|
||||
files, _ := cmd.Flags().GetBool("files")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
util.SetSolverConfig()
|
||||
|
||||
out, _ := cmd.Flags().GetString("output")
|
||||
if out != "terminal" {
|
||||
LuetCfg.GetLogging().SetLogLevel("error")
|
||||
}
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
|
||||
l := list.NewWriter()
|
||||
t := table.NewWriter()
|
||||
t.AppendHeader(rows)
|
||||
|
@@ -16,6 +16,7 @@ package cmd
|
||||
|
||||
import (
|
||||
helpers "github.com/mudler/luet/cmd/helpers"
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -31,16 +32,11 @@ var uninstallCmd = &cobra.Command{
|
||||
Long: `Uninstall packages`,
|
||||
Aliases: []string{"rm", "un"},
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("nodeps", cmd.Flags().Lookup("nodeps"))
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
toRemove := []pkg.Package{}
|
||||
@@ -53,10 +49,6 @@ var uninstallCmd = &cobra.Command{
|
||||
toRemove = append(toRemove, pack)
|
||||
}
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
nodeps, _ := cmd.Flags().GetBool("nodeps")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
@@ -64,18 +56,12 @@ var uninstallCmd = &cobra.Command{
|
||||
fullClean, _ := cmd.Flags().GetBool("full-clean")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
keepProtected, _ := cmd.Flags().GetBool("keep-protected-files")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
util.SetSystemConfig()
|
||||
util.SetSolverConfig()
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
LuetCfg.ConfigProtectSkip = !keepProtected
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
} else {
|
||||
@@ -123,6 +109,7 @@ func init() {
|
||||
uninstallCmd.Flags().Bool("full-clean", false, "(experimental) Uninstall packages and all the other deps/revdeps of it.")
|
||||
uninstallCmd.Flags().Bool("solver-concurrent", false, "Use concurrent solver (experimental)")
|
||||
uninstallCmd.Flags().BoolP("yes", "y", false, "Don't ask questions")
|
||||
uninstallCmd.Flags().BoolP("keep-protected-files", "k", false, "Keep package protected files around")
|
||||
|
||||
RootCmd.AddCommand(uninstallCmd)
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/mudler/luet/cmd/util"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
installer "github.com/mudler/luet/pkg/installer"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -28,15 +29,10 @@ var upgradeCmd = &cobra.Command{
|
||||
Short: "Upgrades the system",
|
||||
Aliases: []string{"u"},
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", installCmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", installCmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
util.BindSystemFlags(cmd)
|
||||
util.BindSolverFlags(cmd)
|
||||
LuetCfg.Viper.BindPFlag("force", cmd.Flags().Lookup("force"))
|
||||
LuetCfg.Viper.BindPFlag("yes", cmd.Flags().Lookup("yes"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
},
|
||||
Long: `Upgrades packages in parallel`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
@@ -51,10 +47,6 @@ var upgradeCmd = &cobra.Command{
|
||||
repos = append(repos, r)
|
||||
}
|
||||
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
force := LuetCfg.Viper.GetBool("force")
|
||||
nodeps, _ := cmd.Flags().GetBool("nodeps")
|
||||
full, _ := cmd.Flags().GetBool("full")
|
||||
@@ -63,18 +55,11 @@ var upgradeCmd = &cobra.Command{
|
||||
sync, _ := cmd.Flags().GetBool("sync")
|
||||
concurrent, _ := cmd.Flags().GetBool("solver-concurrent")
|
||||
yes := LuetCfg.Viper.GetBool("yes")
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
downloadOnly, _ := cmd.Flags().GetBool("download-only")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.Rootfs = rootfs
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
util.SetSystemConfig()
|
||||
util.SetSolverConfig()
|
||||
|
||||
if concurrent {
|
||||
LuetCfg.GetSolverOptions().Implementation = solver.ParallelSimple
|
||||
} else {
|
||||
|
65
cmd/util/cli.go
Normal file
65
cmd/util/cli.go
Normal file
@@ -0,0 +1,65 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
. "github.com/mudler/luet/pkg/config"
|
||||
)
|
||||
|
||||
func BindSystemFlags(cmd *cobra.Command) {
|
||||
LuetCfg.Viper.BindPFlag("system.database_path", cmd.Flags().Lookup("system-dbpath"))
|
||||
LuetCfg.Viper.BindPFlag("system.rootfs", cmd.Flags().Lookup("system-target"))
|
||||
LuetCfg.Viper.BindPFlag("system.database_engine", cmd.Flags().Lookup("system-engine"))
|
||||
}
|
||||
|
||||
func BindSolverFlags(cmd *cobra.Command) {
|
||||
LuetCfg.Viper.BindPFlag("solver.type", cmd.Flags().Lookup("solver-type"))
|
||||
LuetCfg.Viper.BindPFlag("solver.discount", cmd.Flags().Lookup("solver-discount"))
|
||||
LuetCfg.Viper.BindPFlag("solver.rate", cmd.Flags().Lookup("solver-rate"))
|
||||
LuetCfg.Viper.BindPFlag("solver.max_attempts", cmd.Flags().Lookup("solver-attempts"))
|
||||
}
|
||||
|
||||
func SetSystemConfig() {
|
||||
dbpath := LuetCfg.Viper.GetString("system.database_path")
|
||||
rootfs := LuetCfg.Viper.GetString("system.rootfs")
|
||||
engine := LuetCfg.Viper.GetString("system.database_engine")
|
||||
|
||||
LuetCfg.System.DatabaseEngine = engine
|
||||
LuetCfg.System.DatabasePath = dbpath
|
||||
LuetCfg.System.SetRootFS(rootfs)
|
||||
}
|
||||
|
||||
func SetSolverConfig() (c *config.LuetSolverOptions) {
|
||||
stype := LuetCfg.Viper.GetString("solver.type")
|
||||
discount := LuetCfg.Viper.GetFloat64("solver.discount")
|
||||
rate := LuetCfg.Viper.GetFloat64("solver.rate")
|
||||
attempts := LuetCfg.Viper.GetInt("solver.max_attempts")
|
||||
|
||||
LuetCfg.GetSolverOptions().Type = stype
|
||||
LuetCfg.GetSolverOptions().LearnRate = float32(rate)
|
||||
LuetCfg.GetSolverOptions().Discount = float32(discount)
|
||||
LuetCfg.GetSolverOptions().MaxAttempts = attempts
|
||||
|
||||
return &config.LuetSolverOptions{
|
||||
Type: stype,
|
||||
LearnRate: float32(rate),
|
||||
Discount: float32(discount),
|
||||
MaxAttempts: attempts,
|
||||
}
|
||||
}
|
18
go.mod
18
go.mod
@@ -9,23 +9,21 @@ require (
|
||||
github.com/asdine/storm v0.0.0-20190418133842-e0f77eada154
|
||||
github.com/briandowns/spinner v1.12.1-0.20201220203425-e201aaea0a31
|
||||
github.com/cavaliercoder/grab v1.0.1-0.20201108051000-98a5bfe305ec
|
||||
github.com/containerd/cgroups v0.0.0-20200217135630-d732e370d46d // indirect
|
||||
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
|
||||
github.com/containerd/ttrpc v0.0.0-20200121165050-0be804eadb15 // indirect
|
||||
github.com/containerd/typeurl v0.0.0-20200205145503-b45ef1f1f737 // indirect
|
||||
github.com/crillab/gophersat v1.3.2-0.20201023142334-3fc2ac466765
|
||||
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24
|
||||
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/ecooper/qlearning v0.0.0-20160612200101-3075011a69fd
|
||||
github.com/genuinetools/img v0.5.11
|
||||
github.com/ghodss/yaml v1.0.0
|
||||
github.com/google/go-containerregistry v0.2.1
|
||||
github.com/google/renameio v1.0.0
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/go-version v1.2.1
|
||||
github.com/imdario/mergo v0.3.8
|
||||
github.com/imdario/mergo v0.3.9
|
||||
github.com/jedib0t/go-pretty v4.3.0+incompatible
|
||||
github.com/jedib0t/go-pretty/v6 v6.0.5
|
||||
github.com/jinzhu/copier v0.0.0-20180308034124-7e38e58719c3
|
||||
@@ -36,6 +34,7 @@ require (
|
||||
github.com/logrusorgru/aurora v0.0.0-20190417123914-21d75270181e
|
||||
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.1
|
||||
github.com/moby/buildkit v0.7.2
|
||||
github.com/moby/sys/mount v0.2.0 // indirect
|
||||
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d
|
||||
github.com/mudler/docker-companion v0.4.6-0.20200418093252-41846f112d87
|
||||
@@ -45,7 +44,6 @@ require (
|
||||
github.com/onsi/gomega v1.10.3
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4 // indirect
|
||||
github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578
|
||||
github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f
|
||||
github.com/pkg/errors v0.9.1
|
||||
@@ -58,6 +56,8 @@ require (
|
||||
go.uber.org/atomic v1.5.1 // indirect
|
||||
go.uber.org/multierr v1.4.0
|
||||
go.uber.org/zap v1.13.0
|
||||
golang.org/x/mod v0.4.2
|
||||
google.golang.org/grpc v1.29.1
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
gotest.tools/v3 v3.0.2 // indirect
|
||||
helm.sh/helm/v3 v3.3.4
|
||||
@@ -67,3 +67,9 @@ require (
|
||||
replace github.com/docker/docker => github.com/Luet-lab/moby v17.12.0-ce-rc1.0.20200605210607-749178b8f80d+incompatible
|
||||
|
||||
replace github.com/containerd/containerd => github.com/containerd/containerd v1.3.1-0.20200227195959-4d242818bf55
|
||||
|
||||
replace github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
|
||||
|
||||
replace github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
|
||||
|
||||
replace github.com/opencontainers/runc => github.com/opencontainers/runc v1.0.0-rc9.0.20200221051241-688cf6d43cc4
|
||||
|
@@ -774,22 +774,31 @@ func (cs *LuetCompiler) getSpecHash(pkgs pkg.DefaultPackages, salt string) (stri
|
||||
return fmt.Sprintf("%x", h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func (cs *LuetCompiler) resolveJoinImages(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) error {
|
||||
func (cs *LuetCompiler) resolveFinalImages(concurrency int, keepPermissions bool, p *compilerspec.LuetCompilationSpec) error {
|
||||
|
||||
joinTag := ">:loop: join<"
|
||||
if len(p.Join) != 0 {
|
||||
Info(joinTag, "Generating a joint parent image from final packages")
|
||||
joinTag := ">:loop: final images<"
|
||||
var fromPackages pkg.DefaultPackages
|
||||
|
||||
if len(p.Join) > 0 {
|
||||
fromPackages = p.Join
|
||||
Warning(joinTag, `
|
||||
Attention! the 'join' keyword is going to be deprecated in Luet >=0.18.x.
|
||||
Use 'requires_final_images: true' instead in the build.yaml file`)
|
||||
} else if p.RequiresFinalImages {
|
||||
Info(joinTag, "Generating a parent image from final packages")
|
||||
fromPackages = p.Package.GetRequires()
|
||||
} else {
|
||||
// No source image to resolve
|
||||
return nil
|
||||
}
|
||||
|
||||
// First compute a hash and check if image is available. if it is, then directly consume that
|
||||
overallFp, err := cs.getSpecHash(p.Join, "join")
|
||||
overallFp, err := cs.getSpecHash(fromPackages, "join")
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not generate image hash")
|
||||
}
|
||||
|
||||
Info(joinTag, "Searching existing image with hash ", overallFp)
|
||||
Info(joinTag, "Searching existing image with hash", overallFp)
|
||||
|
||||
image := cs.findImageHash(overallFp, p)
|
||||
if image != "" {
|
||||
@@ -811,12 +820,12 @@ func (cs *LuetCompiler) resolveJoinImages(concurrency int, keepPermissions bool,
|
||||
}
|
||||
defer os.RemoveAll(joinDir) // clean up
|
||||
|
||||
for _, p := range p.Join { //highly dependent on the order
|
||||
for _, p := range fromPackages {
|
||||
Info(joinTag, ":arrow_right_hook:", p.HumanReadableString(), ":leaves:")
|
||||
}
|
||||
|
||||
current := 0
|
||||
for _, c := range p.Join {
|
||||
for _, c := range fromPackages {
|
||||
current++
|
||||
if c != nil && c.Name != "" && c.Version != "" {
|
||||
joinTag2 := fmt.Sprintf("%s %d/%d ⤑ :hammer: build %s", joinTag, current, len(p.Join), c.HumanReadableString())
|
||||
@@ -924,7 +933,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
|
||||
|
||||
//Before multistage : join - same as multistage, but keep artifacts, join them, create a new one and generate a final image.
|
||||
// When the image is there, use it as a source here, in place of GetImage().
|
||||
if err := cs.resolveJoinImages(concurrency, keepPermissions, p); err != nil {
|
||||
if err := cs.resolveFinalImages(concurrency, keepPermissions, p); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving join images")
|
||||
}
|
||||
|
||||
@@ -1028,7 +1037,7 @@ func (cs *LuetCompiler) compile(concurrency int, keepPermissions bool, generateF
|
||||
Assert: assertion,
|
||||
})
|
||||
|
||||
if err := cs.resolveJoinImages(concurrency, keepPermissions, compileSpec); err != nil {
|
||||
if err := cs.resolveFinalImages(concurrency, keepPermissions, compileSpec); err != nil {
|
||||
return nil, errors.Wrap(err, "while resolving join images")
|
||||
}
|
||||
|
||||
@@ -1247,6 +1256,11 @@ func (cs *LuetCompiler) FromPackage(p pkg.Package) (*compilerspec.LuetCompilatio
|
||||
|
||||
cs.inheritSpecBuildOptions(newSpec)
|
||||
|
||||
// Update the package in the compiler database to catch updates from NewLuetCompilationSpec
|
||||
if err := cs.Database.UpdatePackage(newSpec.Package); err != nil {
|
||||
return nil, errors.Wrap(err, "failed updating new package entry in compiler database")
|
||||
}
|
||||
|
||||
return newSpec, err
|
||||
}
|
||||
|
||||
|
@@ -46,13 +46,13 @@ var _ = Describe("ImageHashTree", func() {
|
||||
|
||||
packageHash, err := hashtree.Query(compiler, spec)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(packageHash.Target.Hash.BuildHash).To(Equal("4db24406e8db30a3310a1cf8c4d4e19597745e6d41b189dc51a73ac4a50cc9e6"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("4c867c9bab6c71d9420df75806e7a2f171dbc08487852ab4e2487bab04066cf2"))
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-e6f9c5552a67c463215b0a9e4f7c7fc8"))
|
||||
Expect(packageHash.Target.Hash.BuildHash).To(Equal("53993e5a02da4c21ad845371c872f5836fe45ff3a4e3c5ccb6296d0faee2b107"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("a786d3fd29d0b8bdfe5f304c8bf8be909d5c764cd7059c0e63294a8bff17f3ef"))
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-0cd3c0d07fc9be568377b3bf1b699e06"))
|
||||
})
|
||||
})
|
||||
|
||||
expectedPackageHash := "15811d83d0f8360318c54d91dcae3714f8efb39bf872572294834880f00ee7a8"
|
||||
expectedPackageHash := "0d568ac04c4ca528a4e5b67978f2ad3a75d31d443ab20f9d7683b9608cc0d494"
|
||||
|
||||
Context("complex package definition", func() {
|
||||
BeforeEach(func() {
|
||||
@@ -73,23 +73,26 @@ var _ = Describe("ImageHashTree", func() {
|
||||
|
||||
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(expectedPackageHash))
|
||||
Expect(packageHash.SourceHash).To(Equal(expectedPackageHash))
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-3d739cab442aec15a6da238481df73c5"))
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-0f45c345f59103e84fc8bebbf02f2e2b"))
|
||||
|
||||
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("99c4ebb4bc4754985fcc28677badf90f525aa231b1db0fe75659f11b86dc20e8"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("2e8159583ac825acada763358290cfbea919a33873a926cab84f4f1a67ecf111"))
|
||||
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
|
||||
hash, err := packageHash.DependencyBuildImage(a)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(hash).To(Equal("f48f28ab62f1379a4247ec763681ccede68ea1e5c25aae8fb72459c0b2f8742e"))
|
||||
|
||||
Expect(hash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
|
||||
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
|
||||
Expect(assertionA.Hash.PackageHash).To(Equal(expectedPackageHash))
|
||||
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
|
||||
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
|
||||
Expect(assertionB.Hash.PackageHash).To(Equal("f48f28ab62f1379a4247ec763681ccede68ea1e5c25aae8fb72459c0b2f8742e"))
|
||||
|
||||
Expect(assertionB.Hash.PackageHash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
hashB, err := packageHash.DependencyBuildImage(b)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(hashB).To(Equal("2668e418eab6861404834ad617713e39b8e58f68016a1fbfcc9384efdd037376"))
|
||||
|
||||
Expect(hashB).To(Equal("315075265aeb2e3c04c5428d31911f53c194ec9fa3db1421e8478f44b1e0def8"))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -112,33 +115,35 @@ var _ = Describe("ImageHashTree", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).ToNot(Equal(expectedPackageHash))
|
||||
sourceHash := "1d91b13d0246fa000085a1071c63397d21546300b17f69493f22315a64b717d4"
|
||||
sourceHash := "66ec001fe72052d0e605ca96f607ae39ea4f8b53f0b7f762e377622d9c654de3"
|
||||
Expect(packageHash.Dependencies[len(packageHash.Dependencies)-1].Hash.PackageHash).To(Equal(sourceHash))
|
||||
Expect(packageHash.SourceHash).To(Equal(sourceHash))
|
||||
|
||||
Expect(packageHash.SourceHash).ToNot(Equal(expectedPackageHash))
|
||||
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-03ee108a7c56b17ee568ace0800dd16d"))
|
||||
Expect(packageHash.BuilderImageHash).To(Equal("builder-ffc02fd8aaa916d0e17249885b3226b1"))
|
||||
|
||||
//Expect(packageHash.Target.Hash.BuildHash).To(Equal("79d7107d13d578b362e6a7bf10ec850efce26316405b8d732ce8f9e004d64281"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("7677da23b2cc866c2d07aa4a58fbf703340f2f78c0efbb1ba9faf8979f250c87"))
|
||||
Expect(packageHash.Target.Hash.PackageHash).To(Equal("b9c0286ebf6d28be831926ec7da9cb3cda6b489722d656aefc363ebd7173f937"))
|
||||
a := &pkg.DefaultPackage{Name: "a", Category: "test", Version: "1.1"}
|
||||
hash, err := packageHash.DependencyBuildImage(a)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(hash).To(Equal("f48f28ab62f1379a4247ec763681ccede68ea1e5c25aae8fb72459c0b2f8742e"))
|
||||
|
||||
Expect(hash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
|
||||
assertionA := packageHash.Dependencies.Search(a.GetFingerPrint())
|
||||
|
||||
Expect(assertionA.Hash.PackageHash).To(Equal("1d91b13d0246fa000085a1071c63397d21546300b17f69493f22315a64b717d4"))
|
||||
Expect(assertionA.Hash.PackageHash).To(Equal("66ec001fe72052d0e605ca96f607ae39ea4f8b53f0b7f762e377622d9c654de3"))
|
||||
Expect(assertionA.Hash.PackageHash).ToNot(Equal(expectedPackageHash))
|
||||
|
||||
b := &pkg.DefaultPackage{Name: "b", Category: "test", Version: "1.0"}
|
||||
assertionB := packageHash.Dependencies.Search(b.GetFingerPrint())
|
||||
|
||||
Expect(assertionB.Hash.PackageHash).To(Equal("f48f28ab62f1379a4247ec763681ccede68ea1e5c25aae8fb72459c0b2f8742e"))
|
||||
Expect(assertionB.Hash.PackageHash).To(Equal("74c6c833730e9ebd1d9fc669278152b5b58ec7ecb28fdae56658665616076adf"))
|
||||
hashB, err := packageHash.DependencyBuildImage(b)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(hashB).To(Equal("2668e418eab6861404834ad617713e39b8e58f68016a1fbfcc9384efdd037376"))
|
||||
Expect(hashB).To(Equal("315075265aeb2e3c04c5428d31911f53c194ec9fa3db1421e8478f44b1e0def8"))
|
||||
})
|
||||
})
|
||||
|
||||
|
@@ -453,6 +453,9 @@ func (a *PackageArtifact) GetProtectFiles() []string {
|
||||
|
||||
// Unpack Untar and decompress (TODO) to the given path
|
||||
func (a *PackageArtifact) Unpack(dst string, keepPerms bool) error {
|
||||
if !strings.HasPrefix(dst, "/") {
|
||||
return errors.New("destination must be an absolute path")
|
||||
}
|
||||
|
||||
// Create
|
||||
protectedFiles := a.GetProtectFiles()
|
||||
|
@@ -23,10 +23,11 @@ import (
|
||||
"github.com/mitchellh/hashstructure/v2"
|
||||
options "github.com/mudler/luet/pkg/compiler/types/options"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
"github.com/mudler/luet/pkg/solver"
|
||||
"github.com/otiai10/copy"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
dirhash "golang.org/x/mod/sumdb/dirhash"
|
||||
)
|
||||
|
||||
type LuetCompilationspecs []LuetCompilationSpec
|
||||
@@ -115,49 +116,72 @@ type LuetCompilationSpec struct {
|
||||
|
||||
Copy []CopyField `json:"copy"`
|
||||
|
||||
Join pkg.DefaultPackages `json:"join"`
|
||||
Join pkg.DefaultPackages `json:"join"`
|
||||
RequiresFinalImages bool `json:"requires_final_images" yaml:"requires_final_images"`
|
||||
}
|
||||
|
||||
// Signature is a portion of the spec that yields a signature for the hash
|
||||
type Signature struct {
|
||||
Image string
|
||||
Steps []string
|
||||
PackageDir string
|
||||
Prelude []string
|
||||
Seed string
|
||||
Env []string
|
||||
Retrieve []string
|
||||
Unpack bool
|
||||
Includes []string
|
||||
Excludes []string
|
||||
Copy []CopyField
|
||||
Join pkg.DefaultPackages
|
||||
Image string
|
||||
Steps []string
|
||||
PackageDir string
|
||||
Prelude []string
|
||||
Seed string
|
||||
Env []string
|
||||
Retrieve []string
|
||||
Unpack bool
|
||||
Includes []string
|
||||
Excludes []string
|
||||
Copy []CopyField
|
||||
Join pkg.DefaultPackages
|
||||
RequiresFinalImages bool
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) signature() Signature {
|
||||
return Signature{
|
||||
Image: cs.Image,
|
||||
Steps: cs.Steps,
|
||||
PackageDir: cs.PackageDir,
|
||||
Prelude: cs.Prelude,
|
||||
Seed: cs.Seed,
|
||||
Env: cs.Env,
|
||||
Retrieve: cs.Retrieve,
|
||||
Unpack: cs.Unpack,
|
||||
Includes: cs.Includes,
|
||||
Excludes: cs.Excludes,
|
||||
Copy: cs.Copy,
|
||||
Join: cs.Join,
|
||||
Image: cs.Image,
|
||||
Steps: cs.Steps,
|
||||
PackageDir: cs.PackageDir,
|
||||
Prelude: cs.Prelude,
|
||||
Seed: cs.Seed,
|
||||
Env: cs.Env,
|
||||
Retrieve: cs.Retrieve,
|
||||
Unpack: cs.Unpack,
|
||||
Includes: cs.Includes,
|
||||
Excludes: cs.Excludes,
|
||||
Copy: cs.Copy,
|
||||
Join: cs.Join,
|
||||
RequiresFinalImages: cs.RequiresFinalImages,
|
||||
}
|
||||
}
|
||||
|
||||
func NewLuetCompilationSpec(b []byte, p pkg.Package) (*LuetCompilationSpec, error) {
|
||||
var spec LuetCompilationSpec
|
||||
var packageDefinition pkg.DefaultPackage
|
||||
err := yaml.Unmarshal(b, &spec)
|
||||
if err != nil {
|
||||
return &spec, err
|
||||
}
|
||||
spec.Package = p.(*pkg.DefaultPackage)
|
||||
err = yaml.Unmarshal(b, &packageDefinition)
|
||||
if err != nil {
|
||||
return &spec, err
|
||||
}
|
||||
|
||||
// Update requires/conflict/provides
|
||||
// When we have been passed a bytes slice, parse it as a package
|
||||
// and updates requires/conflicts/provides.
|
||||
// This is required in order to allow manipulation of such fields with templating
|
||||
copy := *p.(*pkg.DefaultPackage)
|
||||
spec.Package = ©
|
||||
if len(packageDefinition.GetRequires()) != 0 {
|
||||
spec.Package.Requires(packageDefinition.GetRequires())
|
||||
}
|
||||
if len(packageDefinition.GetConflicts()) != 0 {
|
||||
spec.Package.Conflicts(packageDefinition.GetConflicts())
|
||||
}
|
||||
if len(packageDefinition.GetProvides()) != 0 {
|
||||
spec.Package.SetProvides(packageDefinition.GetProvides())
|
||||
}
|
||||
return &spec, nil
|
||||
}
|
||||
func (cs *LuetCompilationSpec) GetSourceAssertion() solver.PackagesAssertions {
|
||||
@@ -242,7 +266,7 @@ func (cs *LuetCompilationSpec) SetSeedImage(s string) {
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) EmptyPackage() bool {
|
||||
return len(cs.BuildSteps()) == 0 && len(cs.GetPreBuildSteps()) == 0 && !cs.UnpackedPackage()
|
||||
return len(cs.BuildSteps()) == 0 && !cs.UnpackedPackage()
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) UnpackedPackage() bool {
|
||||
@@ -266,7 +290,14 @@ func (cs *LuetCompilationSpec) Hash() (string, error) {
|
||||
// build a signature, we want to be part of the hash only the fields that are relevant for build purposes
|
||||
signature := cs.signature()
|
||||
h, err := hashstructure.Hash(signature, hashstructure.FormatV2, nil)
|
||||
return fmt.Sprint(h), err
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sum, err := dirhash.HashDir(cs.Package.Path, "", dirhash.DefaultHash)
|
||||
if err != nil {
|
||||
return fmt.Sprint(h), err
|
||||
}
|
||||
return fmt.Sprint(h, sum), err
|
||||
}
|
||||
|
||||
func (cs *LuetCompilationSpec) CopyRetrieves(dest string) error {
|
||||
|
@@ -107,6 +107,19 @@ type LuetSystemConfig struct {
|
||||
TmpDirBase string `yaml:"tmpdir_base" mapstructure:"tmpdir_base"`
|
||||
}
|
||||
|
||||
func (s *LuetSystemConfig) SetRootFS(path string) error {
|
||||
pathToSet := path
|
||||
if !filepath.IsAbs(path) {
|
||||
abs, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pathToSet = abs
|
||||
}
|
||||
s.Rootfs = pathToSet
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *LuetSystemConfig) GetRepoDatabaseDirPath(name string) string {
|
||||
dbpath := filepath.Join(sc.Rootfs, sc.DatabasePath)
|
||||
dbpath = filepath.Join(dbpath, "repos/"+name)
|
||||
|
@@ -18,17 +18,22 @@ package docker
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/archive"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/mudler/luet/pkg/helpers/imgworker"
|
||||
|
||||
continerdarchive "github.com/containerd/containerd/archive"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/mutate"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
@@ -118,39 +123,75 @@ func (s staticAuth) Authorization() (*authn.AuthConfig, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
type Image struct {
|
||||
// Name of the image.
|
||||
//
|
||||
// To be pulled, it must be a reference compatible with resolvers.
|
||||
//
|
||||
// This field is required.
|
||||
Name string
|
||||
|
||||
// Labels provide runtime decoration for the image record.
|
||||
//
|
||||
// There is no default behavior for how these labels are propagated. They
|
||||
// only decorate the static metadata object.
|
||||
//
|
||||
// This field is optional.
|
||||
Labels map[string]string
|
||||
|
||||
// Target describes the root content for this image. Typically, this is
|
||||
// a manifest, index or manifest list.
|
||||
Target specs.Descriptor
|
||||
|
||||
CreatedAt, UpdatedAt time.Time
|
||||
|
||||
ContentSize int64
|
||||
}
|
||||
|
||||
// UnpackEventData is the data structure to pass for the bus events
|
||||
type UnpackEventData struct {
|
||||
Image string
|
||||
Dest string
|
||||
}
|
||||
|
||||
// DownloadAndExtractDockerImage is a re-adaption
|
||||
func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*Image, error) {
|
||||
// privilegedExtractImage uses the imgworker (which requires privileges) to extract a container image
|
||||
func privilegedExtractImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
|
||||
defer os.RemoveAll(temp)
|
||||
c, err := imgworker.New(temp, auth)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed creating client")
|
||||
}
|
||||
defer c.Close()
|
||||
|
||||
listedImage, err := c.Pull(image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed listing images")
|
||||
}
|
||||
|
||||
os.RemoveAll(dest)
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
err = c.Unpack(image, dest)
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return listedImage, err
|
||||
}
|
||||
|
||||
// UnarchiveLayers extract layers with archive.Untar from docker instead of containerd
|
||||
func UnarchiveLayers(temp string, img v1.Image, image, dest string, auth *types.AuthConfig, verify bool) (int64, error) {
|
||||
layers, err := img.Layers()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading layers from '%s' image failed: %v", image, err)
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
var size int64
|
||||
for _, l := range layers {
|
||||
s, err := l.Size()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading layer size from '%s' image failed: %v", image, err)
|
||||
}
|
||||
size += s
|
||||
|
||||
layerReader, err := l.Uncompressed()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("reading uncompressed layer from '%s' image failed: %v", image, err)
|
||||
}
|
||||
defer layerReader.Close()
|
||||
|
||||
// Unpack the tarfile to the rootfs path.
|
||||
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
|
||||
if err := archive.Untar(layerReader, dest, &archive.TarOptions{
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
}); err != nil {
|
||||
return 0, fmt.Errorf("extracting '%s' image to directory %s failed: %v", image, dest, err)
|
||||
}
|
||||
}
|
||||
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// DownloadAndExtractDockerImage extracts a container image natively. It supports privileged/unprivileged mode
|
||||
func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthConfig, verify bool) (*imgworker.ListedImage, error) {
|
||||
if verify {
|
||||
img, err := verifyImage(image, auth)
|
||||
if err != nil {
|
||||
@@ -159,6 +200,10 @@ func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthCon
|
||||
image = img
|
||||
}
|
||||
|
||||
if os.Getenv("LUET_PRIVILEGED_EXTRACT") == "true" {
|
||||
return privilegedExtractImage(temp, image, dest, auth, verify)
|
||||
}
|
||||
|
||||
ref, err := name.ParseReference(image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -186,32 +231,27 @@ func DownloadAndExtractDockerImage(temp, image, dest string, auth *types.AuthCon
|
||||
|
||||
reader := mutate.Extract(img)
|
||||
defer reader.Close()
|
||||
|
||||
os.RemoveAll(temp)
|
||||
os.RemoveAll(dest)
|
||||
if err := os.MkdirAll(dest, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.RemoveAll(temp)
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
c, err := archive.Apply(context.TODO(), dest, reader)
|
||||
c, err := continerdarchive.Apply(context.TODO(), dest, reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return &Image{
|
||||
|
||||
Name: image,
|
||||
Labels: m.Annotations,
|
||||
Target: specs.Descriptor{
|
||||
MediaType: string(mt),
|
||||
Digest: digest.Digest(d.String()),
|
||||
Size: c,
|
||||
return &imgworker.ListedImage{
|
||||
Image: images.Image{
|
||||
Name: image,
|
||||
Labels: m.Annotations,
|
||||
Target: specs.Descriptor{
|
||||
MediaType: string(mt),
|
||||
Digest: digest.Digest(d.String()),
|
||||
Size: c,
|
||||
},
|
||||
},
|
||||
|
||||
ContentSize: c,
|
||||
}, nil
|
||||
}
|
||||
|
36
pkg/helpers/imgworker/auth.go
Normal file
36
pkg/helpers/imgworker/auth.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package imgworker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/auth"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func NewDockerAuthProvider(auth *types.AuthConfig) session.Attachable {
|
||||
return &authProvider{
|
||||
config: auth,
|
||||
}
|
||||
}
|
||||
|
||||
type authProvider struct {
|
||||
config *types.AuthConfig
|
||||
}
|
||||
|
||||
func (ap *authProvider) Register(server *grpc.Server) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) {
|
||||
res := &auth.CredentialsResponse{}
|
||||
if ap.config.IdentityToken != "" {
|
||||
res.Secret = ap.config.IdentityToken
|
||||
} else {
|
||||
res.Username = ap.config.Username
|
||||
res.Secret = ap.config.Password
|
||||
}
|
||||
return res, nil
|
||||
}
|
81
pkg/helpers/imgworker/client.go
Normal file
81
pkg/helpers/imgworker/client.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package imgworker
|
||||
|
||||
// FROM Slightly adapted from genuinetools/img worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/genuinetools/img/types"
|
||||
"github.com/moby/buildkit/control"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/moby/buildkit/worker/base"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Client holds the information for the client we will use for communicating
|
||||
// with the buildkit controller.
|
||||
type Client struct {
|
||||
backend string
|
||||
localDirs map[string]string
|
||||
root string
|
||||
|
||||
sessionManager *session.Manager
|
||||
controller *control.Controller
|
||||
opts *base.WorkerOpt
|
||||
|
||||
sess *session.Session
|
||||
ctx context.Context
|
||||
auth *dockertypes.AuthConfig
|
||||
}
|
||||
|
||||
// New returns a new client for communicating with the buildkit controller.
|
||||
func New(root string, auth *dockertypes.AuthConfig) (*Client, error) {
|
||||
// Native backend is fine, our images have just one layer. No need to depend on anything
|
||||
backend := types.NativeBackend
|
||||
|
||||
// Create the root/
|
||||
root = filepath.Join(root, "runc", backend)
|
||||
if err := os.MkdirAll(root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
backend: types.NativeBackend,
|
||||
root: root,
|
||||
localDirs: nil,
|
||||
auth: auth,
|
||||
}
|
||||
|
||||
if err := c.prepare(); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed preparing client")
|
||||
}
|
||||
|
||||
// Create the start of the client.
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Client) Close() {
|
||||
c.sess.Close()
|
||||
}
|
||||
|
||||
func (c *Client) prepare() error {
|
||||
ctx := appcontext.Context()
|
||||
sess, sessDialer, err := c.Session(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed creating Session")
|
||||
}
|
||||
ctx = session.NewContext(ctx, sess.ID())
|
||||
ctx = namespaces.WithNamespace(ctx, "buildkit")
|
||||
|
||||
c.ctx = ctx
|
||||
c.sess = sess
|
||||
|
||||
go func() {
|
||||
sess.Run(ctx, sessDialer)
|
||||
}()
|
||||
return nil
|
||||
}
|
129
pkg/helpers/imgworker/pull.go
Normal file
129
pkg/helpers/imgworker/pull.go
Normal file
@@ -0,0 +1,129 @@
|
||||
package imgworker
|
||||
|
||||
// FROM Slightly adapted from genuinetools/img worker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
imageexporter "github.com/moby/buildkit/exporter/containerimage"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/source/containerimage"
|
||||
)
|
||||
|
||||
// ListedImage represents an image structure returuned from ListImages.
|
||||
// It extends containerd/images.Image with extra fields.
|
||||
type ListedImage struct {
|
||||
images.Image
|
||||
ContentSize int64
|
||||
}
|
||||
|
||||
// Pull retrieves an image from a remote registry.
|
||||
func (c *Client) Pull(image string) (*ListedImage, error) {
|
||||
|
||||
ctx := c.ctx
|
||||
|
||||
sm, err := c.getSessionManager()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse the image name and tag.
|
||||
named, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing image name %q failed: %v", image, err)
|
||||
}
|
||||
// Add the latest lag if they did not provide one.
|
||||
named = reference.TagNameOnly(named)
|
||||
image = named.String()
|
||||
|
||||
// Get the identifier for the image.
|
||||
identifier, err := source.NewImageIdentifier(image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the worker opts.
|
||||
opt, err := c.createWorkerOpt()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating worker opt failed: %v", err)
|
||||
}
|
||||
|
||||
cm, err := cache.NewManager(cache.ManagerOpt{
|
||||
Snapshotter: opt.Snapshotter,
|
||||
MetadataStore: opt.MetadataStore,
|
||||
ContentStore: opt.ContentStore,
|
||||
LeaseManager: opt.LeaseManager,
|
||||
GarbageCollect: opt.GarbageCollect,
|
||||
Applier: opt.Applier,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the source for the pull.
|
||||
srcOpt := containerimage.SourceOpt{
|
||||
Snapshotter: opt.Snapshotter,
|
||||
ContentStore: opt.ContentStore,
|
||||
Applier: opt.Applier,
|
||||
CacheAccessor: cm,
|
||||
ImageStore: opt.ImageStore,
|
||||
RegistryHosts: opt.RegistryHosts,
|
||||
LeaseManager: opt.LeaseManager,
|
||||
}
|
||||
src, err := containerimage.NewSource(srcOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s, err := src.Resolve(ctx, identifier, sm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ref, err := s.Snapshot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Create the exporter for the pull.
|
||||
iw, err := imageexporter.NewImageWriter(imageexporter.WriterOpt{
|
||||
Snapshotter: opt.Snapshotter,
|
||||
ContentStore: opt.ContentStore,
|
||||
Differ: opt.Differ,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expOpt := imageexporter.Opt{
|
||||
SessionManager: sm,
|
||||
ImageWriter: iw,
|
||||
Images: opt.ImageStore,
|
||||
RegistryHosts: opt.RegistryHosts,
|
||||
LeaseManager: opt.LeaseManager,
|
||||
}
|
||||
exp, err := imageexporter.New(expOpt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, err := exp.Resolve(ctx, map[string]string{"name": image})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := e.Export(ctx, exporter.Source{Ref: ref}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Get the image.
|
||||
img, err := opt.ImageStore.Get(ctx, image)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting image %s from image store failed: %v", image, err)
|
||||
}
|
||||
size, err := img.Size(ctx, opt.ContentStore, platforms.Default())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("calculating size of image %s failed: %v", img.Name, err)
|
||||
}
|
||||
|
||||
return &ListedImage{Image: img, ContentSize: size}, nil
|
||||
}
|
49
pkg/helpers/imgworker/session.go
Normal file
49
pkg/helpers/imgworker/session.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package imgworker
|
||||
|
||||
// FROM Slightly adapted from genuinetools/img worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/moby/buildkit/session/testutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (c *Client) getSessionManager() (*session.Manager, error) {
|
||||
if c.sessionManager == nil {
|
||||
var err error
|
||||
c.sessionManager, err = session.NewManager()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return c.sessionManager, nil
|
||||
}
|
||||
|
||||
// Session creates the session manager and returns the session and it's
|
||||
// dialer.
|
||||
func (c *Client) Session(ctx context.Context) (*session.Session, session.Dialer, error) {
|
||||
m, err := c.getSessionManager()
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create session manager")
|
||||
}
|
||||
sessionName := "luet"
|
||||
s, err := session.NewSession(ctx, sessionName, "")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create session")
|
||||
}
|
||||
syncedDirs := make([]filesync.SyncedDir, 0, len(c.localDirs))
|
||||
for name, d := range c.localDirs {
|
||||
syncedDirs = append(syncedDirs, filesync.SyncedDir{Name: name, Dir: d})
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
|
||||
s.Allow(NewDockerAuthProvider(c.auth))
|
||||
return s, sessionDialer(s, m), err
|
||||
}
|
||||
|
||||
func sessionDialer(s *session.Session, m *session.Manager) session.Dialer {
|
||||
// FIXME: rename testutil
|
||||
return session.Dialer(testutil.TestStream(testutil.Handler(m.HandleConn)))
|
||||
}
|
93
pkg/helpers/imgworker/unpack.go
Normal file
93
pkg/helpers/imgworker/unpack.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package imgworker
|
||||
|
||||
// FROM Slightly adapted from genuinetools/img worker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// TODO: this requires root permissions to mount/unmount layers, althrought it shouldn't be required.
|
||||
// See how backends are unpacking images without asking for root permissions.
|
||||
|
||||
// UnpackEventData is the data structure to pass for the bus events
|
||||
type UnpackEventData struct {
|
||||
Image string
|
||||
Dest string
|
||||
}
|
||||
|
||||
// Unpack exports an image to a rootfs destination directory.
|
||||
func (c *Client) Unpack(image, dest string) error {
|
||||
|
||||
ctx := c.ctx
|
||||
if len(dest) < 1 {
|
||||
return errors.New("destination directory for rootfs cannot be empty")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(dest); err == nil {
|
||||
return fmt.Errorf("destination directory already exists: %s", dest)
|
||||
}
|
||||
|
||||
// Parse the image name and tag.
|
||||
named, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing image name %q failed: %v", image, err)
|
||||
}
|
||||
// Add the latest lag if they did not provide one.
|
||||
named = reference.TagNameOnly(named)
|
||||
image = named.String()
|
||||
|
||||
// Create the worker opts.
|
||||
opt, err := c.createWorkerOpt()
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating worker opt failed: %v", err)
|
||||
}
|
||||
|
||||
if opt.ImageStore == nil {
|
||||
return errors.New("image store is nil")
|
||||
}
|
||||
|
||||
img, err := opt.ImageStore.Get(ctx, image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting image %s from image store failed: %v", image, err)
|
||||
}
|
||||
|
||||
manifest, err := images.Manifest(ctx, opt.ContentStore, img.Target, platforms.Default())
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting image manifest failed: %v", err)
|
||||
}
|
||||
|
||||
_,_ = bus.Manager.Publish(bus.EventImagePreUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
for _, desc := range manifest.Layers {
|
||||
logrus.Debugf("Unpacking layer %s", desc.Digest.String())
|
||||
|
||||
// Read the blob from the content store.
|
||||
layer, err := opt.ContentStore.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting reader for digest %s failed: %v", desc.Digest.String(), err)
|
||||
}
|
||||
|
||||
// Unpack the tarfile to the rootfs path.
|
||||
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
|
||||
if err := archive.Untar(content.NewReader(layer), dest, &archive.TarOptions{
|
||||
NoLchown: false,
|
||||
ExcludePatterns: []string{"dev/"}, // prevent 'operation not permitted'
|
||||
}); err != nil {
|
||||
return fmt.Errorf("extracting tar for %s to directory %s failed: %v", desc.Digest.String(), dest, err)
|
||||
}
|
||||
}
|
||||
|
||||
_, _ = bus.Manager.Publish(bus.EventImagePostUnPack, UnpackEventData{Image: image, Dest: dest})
|
||||
|
||||
return nil
|
||||
}
|
106
pkg/helpers/imgworker/workeropts.go
Normal file
106
pkg/helpers/imgworker/workeropts.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package imgworker
|
||||
|
||||
// FROM Slightly adapted from genuinetools/img worker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/containerd/content/local"
|
||||
"github.com/containerd/containerd/diff/apply"
|
||||
"github.com/containerd/containerd/diff/walking"
|
||||
ctdmetadata "github.com/containerd/containerd/metadata"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
ctdsnapshot "github.com/containerd/containerd/snapshots"
|
||||
"github.com/containerd/containerd/snapshots/native"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
|
||||
"github.com/moby/buildkit/util/binfmt_misc"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/moby/buildkit/worker/base"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// createWorkerOpt creates a base.WorkerOpt to be used for a new worker.
|
||||
func (c *Client) createWorkerOpt() (opt base.WorkerOpt, err error) {
|
||||
|
||||
if c.opts != nil {
|
||||
return *c.opts, nil
|
||||
}
|
||||
|
||||
// Create the metadata store.
|
||||
md, err := metadata.NewStore(filepath.Join(c.root, "metadata.db"))
|
||||
if err != nil {
|
||||
return opt, err
|
||||
}
|
||||
|
||||
snapshotRoot := filepath.Join(c.root, "snapshots")
|
||||
|
||||
s, err := native.NewSnapshotter(snapshotRoot)
|
||||
if err != nil {
|
||||
return opt, fmt.Errorf("creating %s snapshotter failed: %v", c.backend, err)
|
||||
}
|
||||
|
||||
// Create the content store locally.
|
||||
contentStore, err := local.NewStore(filepath.Join(c.root, "content"))
|
||||
if err != nil {
|
||||
return opt, err
|
||||
}
|
||||
|
||||
// Open the bolt database for metadata.
|
||||
db, err := bolt.Open(filepath.Join(c.root, "containerdmeta.db"), 0644, nil)
|
||||
if err != nil {
|
||||
return opt, err
|
||||
}
|
||||
|
||||
// Create the new database for metadata.
|
||||
mdb := ctdmetadata.NewDB(db, contentStore, map[string]ctdsnapshot.Snapshotter{
|
||||
c.backend: s,
|
||||
})
|
||||
if err := mdb.Init(context.TODO()); err != nil {
|
||||
return opt, err
|
||||
}
|
||||
|
||||
// Create the image store.
|
||||
imageStore := ctdmetadata.NewImageStore(mdb)
|
||||
|
||||
contentStore = containerdsnapshot.NewContentStore(mdb.ContentStore(), "buildkit")
|
||||
|
||||
id, err := base.ID(c.root)
|
||||
if err != nil {
|
||||
return opt, err
|
||||
}
|
||||
|
||||
xlabels := base.Labels("oci", c.backend)
|
||||
|
||||
var supportedPlatforms []specs.Platform
|
||||
for _, p := range binfmt_misc.SupportedPlatforms(false) {
|
||||
parsed, err := platforms.Parse(p)
|
||||
if err != nil {
|
||||
return opt, err
|
||||
}
|
||||
supportedPlatforms = append(supportedPlatforms, platforms.Normalize(parsed))
|
||||
}
|
||||
|
||||
opt = base.WorkerOpt{
|
||||
ID: id,
|
||||
Labels: xlabels,
|
||||
MetadataStore: md,
|
||||
Snapshotter: containerdsnapshot.NewSnapshotter(c.backend, mdb.Snapshotter(c.backend), "buildkit", nil),
|
||||
ContentStore: contentStore,
|
||||
Applier: apply.NewFileSystemApplier(contentStore),
|
||||
Differ: walking.NewWalkingDiff(contentStore),
|
||||
ImageStore: imageStore,
|
||||
Platforms: supportedPlatforms,
|
||||
RegistryHosts: docker.ConfigureDefaultRegistries(),
|
||||
LeaseManager: leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), "buildkit"),
|
||||
GarbageCollect: mdb.GarbageCollect,
|
||||
}
|
||||
|
||||
c.opts = &opt
|
||||
|
||||
return opt, err
|
||||
}
|
10
pkg/helpers/slice.go
Normal file
10
pkg/helpers/slice.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package helpers
|
||||
|
||||
func Contains(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
@@ -140,7 +140,6 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
var file *os.File = nil
|
||||
var err error
|
||||
var temp, contentstore string
|
||||
var info *docker.Image
|
||||
// Files should be in URI/repository:<file>
|
||||
ok := false
|
||||
|
||||
@@ -164,7 +163,7 @@ func (c *DockerClient) DownloadFile(name string) (string, error) {
|
||||
imageName := fmt.Sprintf("%s:%s", uri, docker.StripInvalidStringsFromImage(name))
|
||||
Info("Downloading", imageName)
|
||||
|
||||
info, err = docker.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
info, err := docker.DownloadAndExtractDockerImage(contentstore, imageName, temp, c.auth, c.RepoData.Verify)
|
||||
if err != nil {
|
||||
Warning(fmt.Sprintf(errImageDownloadMsg, imageName, err.Error()))
|
||||
continue
|
||||
|
@@ -18,10 +18,12 @@ package client
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
@@ -42,6 +44,27 @@ func NewHttpClient(r RepoData) *HttpClient {
|
||||
return &HttpClient{RepoData: r}
|
||||
}
|
||||
|
||||
func NewGrabClient() *grab.Client {
|
||||
httpTimeout := 30
|
||||
timeout := os.Getenv("HTTP_TIMEOUT")
|
||||
if timeout != "" {
|
||||
timeoutI, err := strconv.Atoi(timeout)
|
||||
if err == nil {
|
||||
httpTimeout = timeoutI
|
||||
}
|
||||
}
|
||||
|
||||
return &grab.Client{
|
||||
UserAgent: "grab",
|
||||
HTTPClient: &http.Client{
|
||||
Timeout: time.Duration(httpTimeout) * time.Second,
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HttpClient) PrepareReq(dst, url string) (*grab.Request, error) {
|
||||
|
||||
req, err := grab.NewRequest(dst, url)
|
||||
@@ -86,7 +109,7 @@ func (c *HttpClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.Pa
|
||||
}
|
||||
defer os.RemoveAll(temp)
|
||||
|
||||
client := grab.NewClient()
|
||||
client := NewGrabClient()
|
||||
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
Debug("Downloading artifact", artifactName, "from", uri)
|
||||
@@ -186,7 +209,7 @@ func (c *HttpClient) DownloadFile(name string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
client := grab.NewClient()
|
||||
client := NewGrabClient()
|
||||
|
||||
for _, uri := range c.RepoData.Urls {
|
||||
|
||||
|
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/mudler/luet/pkg/bus"
|
||||
artifact "github.com/mudler/luet/pkg/compiler/types/artifact"
|
||||
"github.com/mudler/luet/pkg/config"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
"github.com/mudler/luet/pkg/helpers/match"
|
||||
. "github.com/mudler/luet/pkg/logger"
|
||||
@@ -755,12 +756,63 @@ func (l *LuetInstaller) getFinalizers(allRepos pkg.PackageDatabase, solution sol
|
||||
return toFinalize, nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) checkFileconflicts(toInstall map[string]ArtifactMatch, s *System) error {
|
||||
Info("Checking for file conflicts..")
|
||||
defer s.Clean() // Release memory
|
||||
|
||||
filesToInstall := []string{}
|
||||
for _, m := range toInstall {
|
||||
a, err := l.downloadPackage(m)
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrap(err, "Failed downloading package")
|
||||
}
|
||||
files, err := a.FileList()
|
||||
if err != nil && !l.Options.Force {
|
||||
return errors.Wrapf(err, "Could not get filelist for %s", a.CompileSpec.Package.HumanReadableString())
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if helpers.Contains(filesToInstall, f) {
|
||||
return fmt.Errorf(
|
||||
"file conflict between packages to be installed",
|
||||
)
|
||||
}
|
||||
|
||||
exists, p, err := s.ExistsPackageFile(f)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed checking into system db")
|
||||
}
|
||||
if exists {
|
||||
return fmt.Errorf(
|
||||
"file conflict between '%s' and '%s' ( file: %s )",
|
||||
p.HumanReadableString(),
|
||||
m.Package.HumanReadableString(),
|
||||
f,
|
||||
)
|
||||
}
|
||||
}
|
||||
filesToInstall = append(filesToInstall, files...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *LuetInstaller) install(o Option, syncedRepos Repositories, toInstall map[string]ArtifactMatch, p pkg.Packages, solution solver.PackagesAssertions, allRepos pkg.PackageDatabase, s *System) error {
|
||||
// Install packages into rootfs in parallel.
|
||||
|
||||
// Download packages in parallel first
|
||||
if err := l.download(syncedRepos, toInstall); err != nil {
|
||||
return errors.Wrap(err, "Downloading packages")
|
||||
}
|
||||
|
||||
// Check file conflicts
|
||||
if err := l.checkFileconflicts(toInstall, s); err != nil {
|
||||
if !l.Options.Force {
|
||||
return errors.Wrap(err, "file conflict found")
|
||||
} else {
|
||||
Warning("file conflict found", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if l.Options.DownloadOnly {
|
||||
return nil
|
||||
}
|
||||
|
@@ -1,6 +1,8 @@
|
||||
package installer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/mudler/luet/pkg/helpers"
|
||||
fileHelper "github.com/mudler/luet/pkg/helpers/file"
|
||||
@@ -10,8 +12,10 @@ import (
|
||||
)
|
||||
|
||||
type System struct {
|
||||
Database pkg.PackageDatabase
|
||||
Target string
|
||||
Database pkg.PackageDatabase
|
||||
Target string
|
||||
fileIndex map[string]pkg.Package
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func (s *System) World() (pkg.Packages, error) {
|
||||
@@ -52,3 +56,38 @@ func (s *System) ExecuteFinalizers(packs []pkg.Package) error {
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (s *System) buildFileIndex() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
// Check if cache is empty or if it got modified
|
||||
if s.fileIndex == nil { //|| len(s.Database.GetPackages()) != len(s.fileIndex) {
|
||||
s.fileIndex = make(map[string]pkg.Package)
|
||||
for _, p := range s.Database.World() {
|
||||
files, _ := s.Database.GetPackageFiles(p)
|
||||
for _, f := range files {
|
||||
s.fileIndex[f] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *System) Clean() {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.fileIndex = nil
|
||||
}
|
||||
|
||||
func (s *System) ExistsPackageFile(file string) (bool, pkg.Package, error) {
|
||||
Debug("Checking if file ", file, "belongs to any package")
|
||||
s.buildFileIndex()
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if p, exists := s.fileIndex[file]; exists {
|
||||
Debug(file, "belongs already to", p.HumanReadableString())
|
||||
|
||||
return exists, p, nil
|
||||
}
|
||||
Debug(file, "doesn't belong to any package")
|
||||
return false, nil, nil
|
||||
}
|
||||
|
70
pkg/installer/system_test.go
Normal file
70
pkg/installer/system_test.go
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright © 2021 Ettore Di Giacinto <mudler@mocaccino.org>
|
||||
//
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License along
|
||||
// with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
package installer_test
|
||||
|
||||
import (
|
||||
|
||||
// . "github.com/mudler/luet/pkg/installer"
|
||||
|
||||
. "github.com/mudler/luet/pkg/installer"
|
||||
pkg "github.com/mudler/luet/pkg/package"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("System", func() {
|
||||
Context("Files", func() {
|
||||
var s *System
|
||||
var db pkg.PackageDatabase
|
||||
var a, b *pkg.DefaultPackage
|
||||
|
||||
BeforeEach(func() {
|
||||
db = pkg.NewInMemoryDatabase(false)
|
||||
s = &System{Database: db}
|
||||
|
||||
a = &pkg.DefaultPackage{Name: "test", Version: "1", Category: "t"}
|
||||
|
||||
db.CreatePackage(a)
|
||||
db.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: a.GetFingerPrint(), Files: []string{"foo", "f"}})
|
||||
|
||||
b = &pkg.DefaultPackage{Name: "test2", Version: "1", Category: "t"}
|
||||
|
||||
db.CreatePackage(b)
|
||||
db.SetPackageFiles(&pkg.PackageFile{PackageFingerprint: b.GetFingerPrint(), Files: []string{"barz", "f"}})
|
||||
})
|
||||
|
||||
It("detects when are already shipped by other packages", func() {
|
||||
r, p, err := s.ExistsPackageFile("foo")
|
||||
Expect(r).To(BeTrue())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p).To(Equal(a))
|
||||
r, p, err = s.ExistsPackageFile("baz")
|
||||
Expect(r).To(BeFalse())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p).To(BeNil())
|
||||
|
||||
r, p, err = s.ExistsPackageFile("f")
|
||||
Expect(r).To(BeTrue())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p).To(Equal(b))
|
||||
r, p, err = s.ExistsPackageFile("barz")
|
||||
Expect(r).To(BeTrue())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(p).To(Equal(b))
|
||||
})
|
||||
})
|
||||
})
|
6
tests/fixtures/fileconflicts/conflict1/build.yaml
vendored
Normal file
6
tests/fixtures/fileconflicts/conflict1/build.yaml
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
image: "alpine"
|
||||
prelude:
|
||||
- mkdir /foo
|
||||
steps:
|
||||
- echo conflict > /foo/test1
|
||||
package_dir: /foo
|
7
tests/fixtures/fileconflicts/conflict1/collection.yaml
vendored
Normal file
7
tests/fixtures/fileconflicts/conflict1/collection.yaml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
packages:
|
||||
- category: "test1"
|
||||
name: "conflict"
|
||||
version: "1.0"
|
||||
- category: "test2"
|
||||
name: "conflict"
|
||||
version: "1.0"
|
@@ -92,7 +92,7 @@ testUnInstall() {
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
# TODO: we need remove it or not??
|
||||
assertTrue 'config protect created' "[ -e '$tmpdir/testrootfs/etc/a/._cfg0001_conf' ]"
|
||||
assertTrue 'config protect maintains the protected files' "[ -e '$tmpdir/testrootfs/etc/a/conf' ]"
|
||||
assertTrue 'config protect maintains the protected files' "[ ! -e '$tmpdir/testrootfs/etc/a/conf' ]"
|
||||
}
|
||||
|
||||
|
||||
|
@@ -87,7 +87,7 @@ testInstall() {
|
||||
|
||||
|
||||
testUnInstall() {
|
||||
luet uninstall -y --full --config $tmpdir/luet.yaml test/a
|
||||
luet uninstall -y --full --keep-protected-files --config $tmpdir/luet.yaml test/a
|
||||
installst=$?
|
||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
|
@@ -86,7 +86,7 @@ testInstall() {
|
||||
|
||||
|
||||
testUnInstall() {
|
||||
luet uninstall -y --full --config $tmpdir/luet.yaml test/a
|
||||
luet uninstall -y --full --keep-protected-files --config $tmpdir/luet.yaml test/a
|
||||
installst=$?
|
||||
assertEquals 'uninstall test successfully' "$installst" "0"
|
||||
assertTrue 'package uninstalled' "[ ! -e '$tmpdir/testrootfs/c' ]"
|
||||
|
@@ -51,9 +51,9 @@ testBuild() {
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
|
||||
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
|
||||
assertContains 'Does use the upstream cache without specifying it test/c' "$build_output" "Images available remotely for test/c-1.0 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:ac34beb3fc2752bab54549db095dd6994d7531b88e1ff7f902d01ae80fdd030d"
|
||||
assertContains 'Does use the upstream cache without specifying it test/z' "$build_output" "Images available remotely for test/z-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:d8b8881d50ae83646728bb28ad678fc14e4e003e4b0d0a66f8e6167c6116e024"
|
||||
assertContains 'Does use the upstream cache without specifying it test/interpolated' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:2aaccd929ebe5683f95c70c6ec8d68f240ceea8633f3350b94904ad73da5fd47"
|
||||
assertContains 'Does use the upstream cache without specifying it test/c' "$build_output" "Images available remotely for test/c-1.0 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:7a46e5c8893b574d9f984eaade75b0e68cc891bb6d01860ba91a62f9e4de2b62"
|
||||
assertContains 'Does use the upstream cache without specifying it test/z' "$build_output" "Images available remotely for test/z-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:4277934ece4de17856b87294301cdfcdf3c6f956e682ff64697be67178b8a4b1"
|
||||
assertContains 'Does use the upstream cache without specifying it test/interpolated' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:132c097844b4e809efa8ae234452893b0214a1c860371e21564d89655ab10a56"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
|
@@ -56,8 +56,8 @@ EOF
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
|
||||
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
|
||||
assertNotContains 'Does NOT use the upstream cache without specifying it' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:2aaccd929ebe5683f95c70c6ec8d68f240ceea8633f3350b94904ad73da5fd47"
|
||||
assertContains 'Does generate a new hash as values changed build.yaml for test/interpolated-1.0+2 package image' "$build_output" "Building image luet/cache:c34b533cf76886c332fe9b2d3208f04265360a465a90c996cb4fcdaf959dee36 done"
|
||||
assertNotContains 'Does NOT use the upstream cache without specifying it' "$build_output" "Images available remotely for test/interpolated-1.0+2 generating artifact from remote images: quay.io/mocaccinoos/integration-test-cache:132c097844b4e809efa8ae234452893b0214a1c860371e21564d89655ab10a56"
|
||||
assertContains 'Does generate a new hash as values changed build.yaml for test/interpolated-1.0+2 package image' "$build_output" "Building image luet/cache:a7edf5c9ab219e406b64b2692ce08d56a8bcd212a43bb70df3737f13a008dfd4 done"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
|
@@ -56,7 +56,7 @@ EOF
|
||||
assertTrue 'create package' "[ -e '$tmpdir/testbuild/c-test-1.0.package.tar.zst' ]"
|
||||
assertTrue 'create package Z' "[ -e '$tmpdir/testbuild/z-test-1.0+2.package.tar.zst' ]"
|
||||
assertTrue 'create package interpolated' "[ -e '$tmpdir/testbuild/interpolated-test-1.0+2.package.tar.zst' ]"
|
||||
assertContains 'Does use the upstream cache without specifying it' "$build_output" "Downloading image quay.io/mocaccinoos/integration-test-cache:4db24406e8db30a3310a1cf8c4d4e19597745e6d41b189dc51a73ac4a50cc9e6"
|
||||
assertContains 'Does use the upstream cache without specifying it (test/c-1.0)' "$build_output" "quay.io/mocaccinoos/integration-test-cache:a786d3fd29d0b8bdfe5f304c8bf8be909d5c764cd7059c0e63294a8bff17f3ef"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
|
84
tests/integration/32_fileconflicts.sh
Executable file
84
tests/integration/32_fileconflicts.sh
Executable file
@@ -0,0 +1,84 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/fileconflicts" --destination $tmpdir/testbuild --compression gzip --all
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create packages' "[ -e '$tmpdir/testbuild/conflict-test1-1.0.package.tar.gz' ]"
|
||||
assertTrue 'create packages' "[ -e '$tmpdir/testbuild/conflict-test2-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/fileconflicts" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type disk > /dev/null
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test1/conflict test2/conflict
|
||||
#luet install -y --config $tmpdir/luet.yaml test/c@1.0 > /dev/null
|
||||
installst=$?
|
||||
assertEquals 'install test failed' "$installst" "1"
|
||||
#assertTrue 'package installed' "[ -e '$tmpdir/testrootfs/c' ]"
|
||||
}
|
||||
|
||||
testReInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test1/conflict
|
||||
#luet install -y --config $tmpdir/luet.yaml test/c@1.0 > /dev/null
|
||||
installst=$?
|
||||
assertEquals 'install test succeeded' "$installst" "0"
|
||||
luet install -y --config $tmpdir/luet.yaml test2/conflict
|
||||
#luet install -y --config $tmpdir/luet.yaml test/c@1.0 > /dev/null
|
||||
installst=$?
|
||||
assertEquals 'install test succeeded' "$installst" "1"
|
||||
luet install -y --force --config $tmpdir/luet.yaml test2/conflict
|
||||
#luet install -y --config $tmpdir/luet.yaml test/c@1.0 > /dev/null
|
||||
installst=$?
|
||||
assertEquals 'install test succeeded' "$installst" "0"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
70
tests/integration/33_reinstall.sh
Executable file
70
tests/integration/33_reinstall.sh
Executable file
@@ -0,0 +1,70 @@
|
||||
#!/bin/bash
|
||||
|
||||
export LUET_NOLOCK=true
|
||||
|
||||
oneTimeSetUp() {
|
||||
export tmpdir="$(mktemp -d)"
|
||||
}
|
||||
|
||||
oneTimeTearDown() {
|
||||
rm -rf "$tmpdir"
|
||||
}
|
||||
|
||||
testBuild() {
|
||||
mkdir $tmpdir/testbuild
|
||||
luet build --tree "$ROOT_DIR/tests/fixtures/fileconflicts" --destination $tmpdir/testbuild --compression gzip --all
|
||||
buildst=$?
|
||||
assertEquals 'builds successfully' "$buildst" "0"
|
||||
assertTrue 'create packages' "[ -e '$tmpdir/testbuild/conflict-test1-1.0.package.tar.gz' ]"
|
||||
assertTrue 'create packages' "[ -e '$tmpdir/testbuild/conflict-test2-1.0.package.tar.gz' ]"
|
||||
}
|
||||
|
||||
testRepo() {
|
||||
assertTrue 'no repository' "[ ! -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
luet create-repo --tree "$ROOT_DIR/tests/fixtures/fileconflicts" \
|
||||
--output $tmpdir/testbuild \
|
||||
--packages $tmpdir/testbuild \
|
||||
--name "test" \
|
||||
--descr "Test Repo" \
|
||||
--urls $tmpdir/testrootfs \
|
||||
--type disk > /dev/null
|
||||
|
||||
createst=$?
|
||||
assertEquals 'create repo successfully' "$createst" "0"
|
||||
assertTrue 'create repository' "[ -e '$tmpdir/testbuild/repository.yaml' ]"
|
||||
}
|
||||
|
||||
testConfig() {
|
||||
mkdir $tmpdir/testrootfs
|
||||
cat <<EOF > $tmpdir/luet.yaml
|
||||
general:
|
||||
debug: true
|
||||
system:
|
||||
rootfs: $tmpdir/testrootfs
|
||||
database_path: "/"
|
||||
database_engine: "boltdb"
|
||||
config_from_host: true
|
||||
repositories:
|
||||
- name: "main"
|
||||
type: "disk"
|
||||
enable: true
|
||||
urls:
|
||||
- "$tmpdir/testbuild"
|
||||
EOF
|
||||
luet config --config $tmpdir/luet.yaml
|
||||
res=$?
|
||||
assertEquals 'config test successfully' "$res" "0"
|
||||
}
|
||||
|
||||
testReInstall() {
|
||||
luet install -y --config $tmpdir/luet.yaml test1/conflict
|
||||
installst=$?
|
||||
assertEquals 'install test succeeded' "$installst" "0"
|
||||
luet reinstall -y --config $tmpdir/luet.yaml test1/conflict
|
||||
installst=$?
|
||||
assertEquals 'reinstall test succeeded' "$installst" "0"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. "$ROOT_DIR/tests/integration/shunit2"/shunit2
|
||||
|
5479
vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
generated
vendored
Normal file
5479
vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
318
vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
generated
vendored
Normal file
318
vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
generated
vendored
Normal file
@@ -0,0 +1,318 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.content.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/field_mask.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/content/v1;content";
|
||||
|
||||
// Content provides access to a content addressable storage system.
|
||||
service Content {
|
||||
// Info returns information about a committed object.
|
||||
//
|
||||
// This call can be used for getting the size of content and checking for
|
||||
// existence.
|
||||
rpc Info(InfoRequest) returns (InfoResponse);
|
||||
|
||||
// Update updates content metadata.
|
||||
//
|
||||
// This call can be used to manage the mutable content labels. The
|
||||
// immutable metadata such as digest, size, and committed at cannot
|
||||
// be updated.
|
||||
rpc Update(UpdateRequest) returns (UpdateResponse);
|
||||
|
||||
// List streams the entire set of content as Info objects and closes the
|
||||
// stream.
|
||||
//
|
||||
// Typically, this will yield a large response, chunked into messages.
|
||||
// Clients should make provisions to ensure they can handle the entire data
|
||||
// set.
|
||||
rpc List(ListContentRequest) returns (stream ListContentResponse);
|
||||
|
||||
// Delete will delete the referenced object.
|
||||
rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// Read allows one to read an object based on the offset into the content.
|
||||
//
|
||||
// The requested data may be returned in one or more messages.
|
||||
rpc Read(ReadContentRequest) returns (stream ReadContentResponse);
|
||||
|
||||
// Status returns the status for a single reference.
|
||||
rpc Status(StatusRequest) returns (StatusResponse);
|
||||
|
||||
// ListStatuses returns the status of ongoing object ingestions, started via
|
||||
// Write.
|
||||
//
|
||||
// Only those matching the regular expression will be provided in the
|
||||
// response. If the provided regular expression is empty, all ingestions
|
||||
// will be provided.
|
||||
rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);
|
||||
|
||||
// Write begins or resumes writes to a resource identified by a unique ref.
|
||||
// Only one active stream may exist at a time for each ref.
|
||||
//
|
||||
// Once a write stream has started, it may only write to a single ref, thus
|
||||
// once a stream is started, the ref may be omitted on subsequent writes.
|
||||
//
|
||||
// For any write transaction represented by a ref, only a single write may
|
||||
// be made to a given offset. If overlapping writes occur, it is an error.
|
||||
// Writes should be sequential and implementations may throw an error if
|
||||
// this is required.
|
||||
//
|
||||
// If expected_digest is set and already part of the content store, the
|
||||
// write will fail.
|
||||
//
|
||||
// When completed, the commit flag should be set to true. If expected size
|
||||
// or digest is set, the content will be validated against those values.
|
||||
rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);
|
||||
|
||||
// Abort cancels the ongoing write named in the request. Any resources
|
||||
// associated with the write will be collected.
|
||||
rpc Abort(AbortRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message Info {
|
||||
// Digest is the hash identity of the blob.
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
|
||||
// Size is the total number of bytes in the blob.
|
||||
int64 size = 2;
|
||||
|
||||
// CreatedAt provides the time at which the blob was committed.
|
||||
google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// UpdatedAt provides the time the info was last updated.
|
||||
google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// Labels are arbitrary data on snapshots.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
map<string, string> labels = 5;
|
||||
}
|
||||
|
||||
message InfoRequest {
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message InfoResponse {
|
||||
Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message UpdateRequest {
|
||||
Info info = 1 [(gogoproto.nullable) = false];
|
||||
|
||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||
// the operation applies to all fields.
|
||||
//
|
||||
// In info, Digest, Size, and CreatedAt are immutable,
|
||||
// other field may be updated using this mask.
|
||||
// If no mask is provided, all mutable field are updated.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
}
|
||||
|
||||
message UpdateResponse {
|
||||
Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ListContentRequest {
|
||||
// Filters contains one or more filters using the syntax defined in the
|
||||
// containerd filter package.
|
||||
//
|
||||
// The returned result will be those that match any of the provided
|
||||
// filters. Expanded, containers that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message ListContentResponse {
|
||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message DeleteContentRequest {
|
||||
// Digest specifies which content to delete.
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// ReadContentRequest defines the fields that make up a request to read a portion of
|
||||
// data from a stored object.
|
||||
message ReadContentRequest {
|
||||
// Digest is the hash identity to read.
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
|
||||
// Offset specifies the number of bytes from the start at which to begin
|
||||
// the read. If zero or less, the read will be from the start. This uses
|
||||
// standard zero-indexed semantics.
|
||||
int64 offset = 2;
|
||||
|
||||
// size is the total size of the read. If zero, the entire blob will be
|
||||
// returned by the service.
|
||||
int64 size = 3;
|
||||
}
|
||||
|
||||
// ReadContentResponse carries byte data for a read request.
|
||||
message ReadContentResponse {
|
||||
int64 offset = 1; // offset of the returned data
|
||||
bytes data = 2; // actual data
|
||||
}
|
||||
|
||||
message Status {
|
||||
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
string ref = 3;
|
||||
int64 offset = 4;
|
||||
int64 total = 5;
|
||||
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
|
||||
message StatusRequest {
|
||||
string ref = 1;
|
||||
}
|
||||
|
||||
message StatusResponse {
|
||||
Status status = 1;
|
||||
}
|
||||
|
||||
message ListStatusesRequest {
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message ListStatusesResponse {
|
||||
repeated Status statuses = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// WriteAction defines the behavior of a WriteRequest.
|
||||
enum WriteAction {
|
||||
option (gogoproto.goproto_enum_prefix) = false;
|
||||
option (gogoproto.enum_customname) = "WriteAction";
|
||||
|
||||
// WriteActionStat instructs the writer to return the current status while
|
||||
// holding the lock on the write.
|
||||
STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"];
|
||||
|
||||
// WriteActionWrite sets the action for the write request to write data.
|
||||
//
|
||||
// Any data included will be written at the provided offset. The
|
||||
// transaction will be left open for further writes.
|
||||
//
|
||||
// This is the default.
|
||||
WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"];
|
||||
|
||||
// WriteActionCommit will write any outstanding data in the message and
|
||||
// commit the write, storing it under the digest.
|
||||
//
|
||||
// This can be used in a single message to send the data, verify it and
|
||||
// commit it.
|
||||
//
|
||||
// This action will always terminate the write.
|
||||
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
|
||||
}
|
||||
|
||||
// WriteContentRequest writes data to the request ref at offset.
|
||||
message WriteContentRequest {
|
||||
// Action sets the behavior of the write.
|
||||
//
|
||||
// When this is a write and the ref is not yet allocated, the ref will be
|
||||
// allocated and the data will be written at offset.
|
||||
//
|
||||
// If the action is write and the ref is allocated, it will accept data to
|
||||
// an offset that has not yet been written.
|
||||
//
|
||||
// If the action is write and there is no data, the current write status
|
||||
// will be returned. This works differently from status because the stream
|
||||
// holds a lock.
|
||||
WriteAction action = 1;
|
||||
|
||||
// Ref identifies the pre-commit object to write to.
|
||||
string ref = 2;
|
||||
|
||||
// Total can be set to have the service validate the total size of the
|
||||
// committed content.
|
||||
//
|
||||
// The latest value before or with the commit action message will be use to
|
||||
// validate the content. If the offset overflows total, the service may
|
||||
// report an error. It is only required on one message for the write.
|
||||
//
|
||||
// If the value is zero or less, no validation of the final content will be
|
||||
// performed.
|
||||
int64 total = 3;
|
||||
|
||||
// Expected can be set to have the service validate the final content against
|
||||
// the provided digest.
|
||||
//
|
||||
// If the digest is already present in the object store, an AlreadyExists
|
||||
// error will be returned.
|
||||
//
|
||||
// Only the latest version will be used to check the content against the
|
||||
// digest. It is only required to include it on a single message, before or
|
||||
// with the commit action message.
|
||||
string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
|
||||
// Offset specifies the number of bytes from the start at which to begin
|
||||
// the write. For most implementations, this means from the start of the
|
||||
// file. This uses standard, zero-indexed semantics.
|
||||
//
|
||||
// If the action is write, the remote may remove all previously written
|
||||
// data after the offset. Implementations may support arbitrary offsets but
|
||||
// MUST support reseting this value to zero with a write. If an
|
||||
// implementation does not support a write at a particular offset, an
|
||||
// OutOfRange error must be returned.
|
||||
int64 offset = 5;
|
||||
|
||||
// Data is the actual bytes to be written.
|
||||
//
|
||||
// If this is empty and the message is not a commit, a response will be
|
||||
// returned with the current write state.
|
||||
bytes data = 6;
|
||||
|
||||
// Labels are arbitrary data on snapshots.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
map<string, string> labels = 7;
|
||||
}
|
||||
|
||||
// WriteContentResponse is returned on the culmination of a write call.
|
||||
message WriteContentResponse {
|
||||
// Action contains the action for the final message of the stream. A writer
|
||||
// should confirm that they match the intended result.
|
||||
WriteAction action = 1;
|
||||
|
||||
// StartedAt provides the time at which the write began.
|
||||
//
|
||||
// This must be set for stat and commit write actions. All other write
|
||||
// actions may omit this.
|
||||
google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// UpdatedAt provides the last time of a successful write.
|
||||
//
|
||||
// This must be set for stat and commit write actions. All other write
|
||||
// actions may omit this.
|
||||
google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// Offset is the current committed size for the write.
|
||||
int64 offset = 4;
|
||||
|
||||
// Total provides the current, expected total size of the write.
|
||||
//
|
||||
// We include this to provide consistency with the Status structure on the
|
||||
// client writer.
|
||||
//
|
||||
// This is only valid on the Stat and Commit response.
|
||||
int64 total = 5;
|
||||
|
||||
// Digest, if present, includes the digest up to the currently committed
|
||||
// bytes. If action is commit, this field will be set. It is implementation
|
||||
// defined if this is set for other actions.
|
||||
string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message AbortRequest {
|
||||
string ref = 1;
|
||||
}
|
266
vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
Normal file
266
vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
Normal file
@@ -0,0 +1,266 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compression
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
)
|
||||
|
||||
type (
|
||||
// Compression is the state represents if compressed or not.
|
||||
Compression int
|
||||
)
|
||||
|
||||
const (
|
||||
// Uncompressed represents the uncompressed.
|
||||
Uncompressed Compression = iota
|
||||
// Gzip is gzip compression algorithm.
|
||||
Gzip
|
||||
)
|
||||
|
||||
const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ"
|
||||
|
||||
var (
|
||||
initPigz sync.Once
|
||||
unpigzPath string
|
||||
)
|
||||
|
||||
var (
|
||||
bufioReader32KPool = &sync.Pool{
|
||||
New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
|
||||
}
|
||||
)
|
||||
|
||||
// DecompressReadCloser include the stream after decompress and the compress method detected.
|
||||
type DecompressReadCloser interface {
|
||||
io.ReadCloser
|
||||
// GetCompression returns the compress method which is used before decompressing
|
||||
GetCompression() Compression
|
||||
}
|
||||
|
||||
type readCloserWrapper struct {
|
||||
io.Reader
|
||||
compression Compression
|
||||
closer func() error
|
||||
}
|
||||
|
||||
func (r *readCloserWrapper) Close() error {
|
||||
if r.closer != nil {
|
||||
return r.closer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *readCloserWrapper) GetCompression() Compression {
|
||||
return r.compression
|
||||
}
|
||||
|
||||
type writeCloserWrapper struct {
|
||||
io.Writer
|
||||
closer func() error
|
||||
}
|
||||
|
||||
func (w *writeCloserWrapper) Close() error {
|
||||
if w.closer != nil {
|
||||
w.closer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type bufferedReader struct {
|
||||
buf *bufio.Reader
|
||||
}
|
||||
|
||||
func newBufferedReader(r io.Reader) *bufferedReader {
|
||||
buf := bufioReader32KPool.Get().(*bufio.Reader)
|
||||
buf.Reset(r)
|
||||
return &bufferedReader{buf}
|
||||
}
|
||||
|
||||
func (r *bufferedReader) Read(p []byte) (n int, err error) {
|
||||
if r.buf == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err = r.buf.Read(p)
|
||||
if err == io.EOF {
|
||||
r.buf.Reset(nil)
|
||||
bufioReader32KPool.Put(r.buf)
|
||||
r.buf = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *bufferedReader) Peek(n int) ([]byte, error) {
|
||||
if r.buf == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
return r.buf.Peek(n)
|
||||
}
|
||||
|
||||
// DetectCompression detects the compression algorithm of the source.
|
||||
func DetectCompression(source []byte) Compression {
|
||||
for compression, m := range map[Compression][]byte{
|
||||
Gzip: {0x1F, 0x8B, 0x08},
|
||||
} {
|
||||
if len(source) < len(m) {
|
||||
// Len too short
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(m, source[:len(m)]) {
|
||||
return compression
|
||||
}
|
||||
}
|
||||
return Uncompressed
|
||||
}
|
||||
|
||||
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
|
||||
func DecompressStream(archive io.Reader) (DecompressReadCloser, error) {
|
||||
buf := newBufferedReader(archive)
|
||||
bs, err := buf.Peek(10)
|
||||
if err != nil && err != io.EOF {
|
||||
// Note: we'll ignore any io.EOF error because there are some odd
|
||||
// cases where the layer.tar file will be empty (zero bytes) and
|
||||
// that results in an io.EOF from the Peek() call. So, in those
|
||||
// cases we'll just treat it as a non-compressed stream and
|
||||
// that means just create an empty layer.
|
||||
// See Issue docker/docker#18170
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch compression := DetectCompression(bs); compression {
|
||||
case Uncompressed:
|
||||
return &readCloserWrapper{
|
||||
Reader: buf,
|
||||
compression: compression,
|
||||
}, nil
|
||||
case Gzip:
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
gzReader, err := gzipDecompress(ctx, buf)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &readCloserWrapper{
|
||||
Reader: gzReader,
|
||||
compression: compression,
|
||||
closer: func() error {
|
||||
cancel()
|
||||
return gzReader.Close()
|
||||
},
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
}
|
||||
|
||||
// CompressStream compresses the dest with specified compression algorithm.
|
||||
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
|
||||
switch compression {
|
||||
case Uncompressed:
|
||||
return &writeCloserWrapper{dest, nil}, nil
|
||||
case Gzip:
|
||||
return gzip.NewWriter(dest), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
}
|
||||
|
||||
// Extension returns the extension of a file that uses the specified compression algorithm.
|
||||
func (compression *Compression) Extension() string {
|
||||
switch *compression {
|
||||
case Gzip:
|
||||
return "gz"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
|
||||
initPigz.Do(func() {
|
||||
if unpigzPath = detectPigz(); unpigzPath != "" {
|
||||
log.L.Debug("using pigz for decompression")
|
||||
}
|
||||
})
|
||||
|
||||
if unpigzPath == "" {
|
||||
return gzip.NewReader(buf)
|
||||
}
|
||||
|
||||
return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
|
||||
}
|
||||
|
||||
func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
cmd.Stdin = in
|
||||
cmd.Stdout = writer
|
||||
|
||||
var errBuf bytes.Buffer
|
||||
cmd.Stderr = &errBuf
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := cmd.Wait(); err != nil {
|
||||
writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
|
||||
} else {
|
||||
writer.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
func detectPigz() string {
|
||||
path, err := exec.LookPath("unpigz")
|
||||
if err != nil {
|
||||
log.L.WithError(err).Debug("unpigz not found, falling back to go gzip")
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable
|
||||
value := os.Getenv(disablePigzEnv)
|
||||
if value == "" {
|
||||
return path
|
||||
}
|
||||
|
||||
disable, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value)
|
||||
return path
|
||||
}
|
||||
|
||||
if disable {
|
||||
return ""
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
112
vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
Normal file
112
vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
||||
// Container represents the set of data pinned by a container. Unless otherwise
|
||||
// noted, the resources here are considered in use by the container.
|
||||
//
|
||||
// The resources specified in this object are used to create tasks from the container.
|
||||
type Container struct {
|
||||
// ID uniquely identifies the container in a namespace.
|
||||
//
|
||||
// This property is required and cannot be changed after creation.
|
||||
ID string
|
||||
|
||||
// Labels provide metadata extension for a container.
|
||||
//
|
||||
// These are optional and fully mutable.
|
||||
Labels map[string]string
|
||||
|
||||
// Image specifies the image reference used for a container.
|
||||
//
|
||||
// This property is optional and mutable.
|
||||
Image string
|
||||
|
||||
// Runtime specifies which runtime should be used when launching container
|
||||
// tasks.
|
||||
//
|
||||
// This property is required and immutable.
|
||||
Runtime RuntimeInfo
|
||||
|
||||
// Spec should carry the runtime specification used to implement the
|
||||
// container.
|
||||
//
|
||||
// This field is required but mutable.
|
||||
Spec *types.Any
|
||||
|
||||
// SnapshotKey specifies the snapshot key to use for the container's root
|
||||
// filesystem. When starting a task from this container, a caller should
|
||||
// look up the mounts from the snapshot service and include those on the
|
||||
// task create request.
|
||||
//
|
||||
// This field is not required but mutable.
|
||||
SnapshotKey string
|
||||
|
||||
// Snapshotter specifies the snapshotter name used for rootfs
|
||||
//
|
||||
// This field is not required but immutable.
|
||||
Snapshotter string
|
||||
|
||||
// CreatedAt is the time at which the container was created.
|
||||
CreatedAt time.Time
|
||||
|
||||
// UpdatedAt is the time at which the container was updated.
|
||||
UpdatedAt time.Time
|
||||
|
||||
// Extensions stores client-specified metadata
|
||||
Extensions map[string]types.Any
|
||||
}
|
||||
|
||||
// RuntimeInfo holds runtime specific information
|
||||
type RuntimeInfo struct {
|
||||
Name string
|
||||
Options *types.Any
|
||||
}
|
||||
|
||||
// Store interacts with the underlying container storage
|
||||
type Store interface {
|
||||
// Get a container using the id.
|
||||
//
|
||||
// Container object is returned on success. If the id is not known to the
|
||||
// store, an error will be returned.
|
||||
Get(ctx context.Context, id string) (Container, error)
|
||||
|
||||
// List returns containers that match one or more of the provided filters.
|
||||
List(ctx context.Context, filters ...string) ([]Container, error)
|
||||
|
||||
// Create a container in the store from the provided container.
|
||||
Create(ctx context.Context, container Container) (Container, error)
|
||||
|
||||
// Update the container with the provided container object. ID must be set.
|
||||
//
|
||||
// If one or more fieldpaths are provided, only the field corresponding to
|
||||
// the fieldpaths will be mutated.
|
||||
Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error)
|
||||
|
||||
// Delete a container using the id.
|
||||
//
|
||||
// nil will be returned on success. If the container is not known to the
|
||||
// store, ErrNotFound will be returned.
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
182
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
Normal file
182
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer
|
||||
type ReaderAt interface {
|
||||
io.ReaderAt
|
||||
io.Closer
|
||||
Size() int64
|
||||
}
|
||||
|
||||
// Provider provides a reader interface for specific content
|
||||
type Provider interface {
|
||||
// ReaderAt only requires desc.Digest to be set.
|
||||
// Other fields in the descriptor may be used internally for resolving
|
||||
// the location of the actual data.
|
||||
ReaderAt(ctx context.Context, dec ocispec.Descriptor) (ReaderAt, error)
|
||||
}
|
||||
|
||||
// Ingester writes content
|
||||
type Ingester interface {
|
||||
// Some implementations require WithRef to be included in opts.
|
||||
Writer(ctx context.Context, opts ...WriterOpt) (Writer, error)
|
||||
}
|
||||
|
||||
// Info holds content specific information
|
||||
//
|
||||
// TODO(stevvooe): Consider a very different name for this struct. Info is way
|
||||
// to general. It also reads very weird in certain context, like pluralization.
|
||||
type Info struct {
|
||||
Digest digest.Digest
|
||||
Size int64
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// Status of a content operation
|
||||
type Status struct {
|
||||
Ref string
|
||||
Offset int64
|
||||
Total int64
|
||||
Expected digest.Digest
|
||||
StartedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// WalkFunc defines the callback for a blob walk.
|
||||
type WalkFunc func(Info) error
|
||||
|
||||
// Manager provides methods for inspecting, listing and removing content.
|
||||
type Manager interface {
|
||||
// Info will return metadata about content available in the content store.
|
||||
//
|
||||
// If the content is not present, ErrNotFound will be returned.
|
||||
Info(ctx context.Context, dgst digest.Digest) (Info, error)
|
||||
|
||||
// Update updates mutable information related to content.
|
||||
// If one or more fieldpaths are provided, only those
|
||||
// fields will be updated.
|
||||
// Mutable fields:
|
||||
// labels.*
|
||||
Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
|
||||
|
||||
// Walk will call fn for each item in the content store which
|
||||
// match the provided filters. If no filters are given all
|
||||
// items will be walked.
|
||||
Walk(ctx context.Context, fn WalkFunc, filters ...string) error
|
||||
|
||||
// Delete removes the content from the store.
|
||||
Delete(ctx context.Context, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// IngestManager provides methods for managing ingests.
|
||||
type IngestManager interface {
|
||||
// Status returns the status of the provided ref.
|
||||
Status(ctx context.Context, ref string) (Status, error)
|
||||
|
||||
// ListStatuses returns the status of any active ingestions whose ref match the
|
||||
// provided regular expression. If empty, all active ingestions will be
|
||||
// returned.
|
||||
ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
|
||||
|
||||
// Abort completely cancels the ingest operation targeted by ref.
|
||||
Abort(ctx context.Context, ref string) error
|
||||
}
|
||||
|
||||
// Writer handles the write of content into a content store
|
||||
type Writer interface {
|
||||
// Close closes the writer, if the writer has not been
|
||||
// committed this allows resuming or aborting.
|
||||
// Calling Close on a closed writer will not error.
|
||||
io.WriteCloser
|
||||
|
||||
// Digest may return empty digest or panics until committed.
|
||||
Digest() digest.Digest
|
||||
|
||||
// Commit commits the blob (but no roll-back is guaranteed on an error).
|
||||
// size and expected can be zero-value when unknown.
|
||||
// Commit always closes the writer, even on error.
|
||||
// ErrAlreadyExists aborts the writer.
|
||||
Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error
|
||||
|
||||
// Status returns the current state of write
|
||||
Status() (Status, error)
|
||||
|
||||
// Truncate updates the size of the target blob
|
||||
Truncate(size int64) error
|
||||
}
|
||||
|
||||
// Store combines the methods of content-oriented interfaces into a set that
|
||||
// are commonly provided by complete implementations.
|
||||
type Store interface {
|
||||
Manager
|
||||
Provider
|
||||
IngestManager
|
||||
Ingester
|
||||
}
|
||||
|
||||
// Opt is used to alter the mutable properties of content
|
||||
type Opt func(*Info) error
|
||||
|
||||
// WithLabels allows labels to be set on content
|
||||
func WithLabels(labels map[string]string) Opt {
|
||||
return func(info *Info) error {
|
||||
info.Labels = labels
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WriterOpts is internally used by WriterOpt.
|
||||
type WriterOpts struct {
|
||||
Ref string
|
||||
Desc ocispec.Descriptor
|
||||
}
|
||||
|
||||
// WriterOpt is used for passing options to Ingester.Writer.
|
||||
type WriterOpt func(*WriterOpts) error
|
||||
|
||||
// WithDescriptor specifies an OCI descriptor.
|
||||
// Writer may optionally use the descriptor internally for resolving
|
||||
// the location of the actual data.
|
||||
// Write does not require any field of desc to be set.
|
||||
// If the data size is unknown, desc.Size should be set to 0.
|
||||
// Some implementations may also accept negative values as "unknown".
|
||||
func WithDescriptor(desc ocispec.Descriptor) WriterOpt {
|
||||
return func(opts *WriterOpts) error {
|
||||
opts.Desc = desc
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRef specifies a ref string.
|
||||
func WithRef(ref string) WriterOpt {
|
||||
return func(opts *WriterOpts) error {
|
||||
opts.Ref = ref
|
||||
return nil
|
||||
}
|
||||
}
|
237
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
Normal file
237
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,237 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 1<<20)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
// NewReader returns a io.Reader from a ReaderAt
|
||||
func NewReader(ra ReaderAt) io.Reader {
|
||||
rd := io.NewSectionReader(ra, 0, ra.Size())
|
||||
return rd
|
||||
}
|
||||
|
||||
// ReadBlob retrieves the entire contents of the blob from the provider.
|
||||
//
|
||||
// Avoid using this for large blobs, such as layers.
|
||||
func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) {
|
||||
ra, err := provider.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
p := make([]byte, ra.Size())
|
||||
|
||||
n, err := ra.ReadAt(p, 0)
|
||||
if err == io.EOF {
|
||||
if int64(n) != ra.Size() {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
// WriteBlob writes data with the expected digest into the content store. If
|
||||
// expected already exists, the method returns immediately and the reader will
|
||||
// not be consumed.
|
||||
//
|
||||
// This is useful when the digest and size are known beforehand.
|
||||
//
|
||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error {
|
||||
cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrap(err, "failed to open writer")
|
||||
}
|
||||
|
||||
return nil // all ready present
|
||||
}
|
||||
defer cw.Close()
|
||||
|
||||
return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...)
|
||||
}
|
||||
|
||||
// OpenWriter opens a new writer for the given reference, retrying if the writer
|
||||
// is locked until the reference is available or returns an error.
|
||||
func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) {
|
||||
var (
|
||||
cw Writer
|
||||
err error
|
||||
retry = 16
|
||||
)
|
||||
for {
|
||||
cw, err = cs.Writer(ctx, opts...)
|
||||
if err != nil {
|
||||
if !errdefs.IsUnavailable(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: Check status to determine if the writer is active,
|
||||
// continue waiting while active, otherwise return lock
|
||||
// error or abort. Requires asserting for an ingest manager
|
||||
|
||||
select {
|
||||
case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))):
|
||||
if retry < 2048 {
|
||||
retry = retry << 1
|
||||
}
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
// Propagate lock error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return cw, err
|
||||
}
|
||||
|
||||
// Copy copies data with the expected digest from the reader into the
|
||||
// provided content store writer. This copy commits the writer.
|
||||
//
|
||||
// This is useful when the digest and size are known beforehand. When
|
||||
// the size or digest is unknown, these values may be empty.
|
||||
//
|
||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get status")
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, size)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := copyWithBuffer(cw, r); err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
}
|
||||
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyReaderAt copies to a writer from a given reader at for the given
|
||||
// number of bytes. This copy does not commit the writer.
|
||||
func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
|
||||
return err
|
||||
}
|
||||
|
||||
// CopyReader copies to a writer from a given reader, returning
|
||||
// the number of bytes copied.
|
||||
// Note: if the writer has a non-zero offset, the total number
|
||||
// of bytes read may be greater than those copied if the reader
|
||||
// is not an io.Seeker.
|
||||
// This copy does not commit the writer.
|
||||
func CopyReader(cw Writer, r io.Reader) (int64, error) {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get status")
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, 0)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
return copyWithBuffer(cw, r)
|
||||
}
|
||||
|
||||
// seekReader attempts to seek the reader to the given offset, either by
|
||||
// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
|
||||
// up to the given offset.
|
||||
func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
||||
// attempt to resolve r as a seeker and setup the offset.
|
||||
seeker, ok := r.(io.Seeker)
|
||||
if ok {
|
||||
nn, err := seeker.Seek(offset, io.SeekStart)
|
||||
if nn != offset {
|
||||
return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ok, let's try io.ReaderAt!
|
||||
readerAt, ok := r.(io.ReaderAt)
|
||||
if ok && size > offset {
|
||||
sr := io.NewSectionReader(readerAt, offset, size)
|
||||
return sr, nil
|
||||
}
|
||||
|
||||
// well then, let's just discard up to the offset
|
||||
n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to discard to offset")
|
||||
}
|
||||
if n != offset {
|
||||
return nil, errors.Errorf("unable to discard to offset")
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buf := bufPool.Get().(*[]byte)
|
||||
written, err = io.CopyBuffer(dst, src, *buf)
|
||||
bufPool.Put(buf)
|
||||
return
|
||||
}
|
51
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
Normal file
51
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Handles locking references
|
||||
|
||||
var (
|
||||
// locks lets us lock in process
|
||||
locks = map[string]struct{}{}
|
||||
locksMu sync.Mutex
|
||||
)
|
||||
|
||||
func tryLock(ref string) error {
|
||||
locksMu.Lock()
|
||||
defer locksMu.Unlock()
|
||||
|
||||
if _, ok := locks[ref]; ok {
|
||||
return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", ref)
|
||||
}
|
||||
|
||||
locks[ref] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlock(ref string) {
|
||||
locksMu.Lock()
|
||||
defer locksMu.Unlock()
|
||||
|
||||
delete(locks, ref)
|
||||
}
|
40
vendor/github.com/containerd/containerd/content/local/readerat.go
generated
vendored
Normal file
40
vendor/github.com/containerd/containerd/content/local/readerat.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// readerat implements io.ReaderAt in a completely stateless manner by opening
|
||||
// the referenced file for each call to ReadAt.
|
||||
type sizeReaderAt struct {
|
||||
size int64
|
||||
fp *os.File
|
||||
}
|
||||
|
||||
func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) {
|
||||
return ra.fp.ReadAt(p, offset)
|
||||
}
|
||||
|
||||
func (ra sizeReaderAt) Size() int64 {
|
||||
return ra.size
|
||||
}
|
||||
|
||||
func (ra sizeReaderAt) Close() error {
|
||||
return ra.fp.Close()
|
||||
}
|
678
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
Normal file
678
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
Normal file
@@ -0,0 +1,678 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 1<<20)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
// LabelStore is used to store mutable labels for digests
|
||||
type LabelStore interface {
|
||||
// Get returns all the labels for the given digest
|
||||
Get(digest.Digest) (map[string]string, error)
|
||||
|
||||
// Set sets all the labels for a given digest
|
||||
Set(digest.Digest, map[string]string) error
|
||||
|
||||
// Update replaces the given labels for a digest,
|
||||
// a key with an empty value removes a label.
|
||||
Update(digest.Digest, map[string]string) (map[string]string, error)
|
||||
}
|
||||
|
||||
// Store is digest-keyed store for content. All data written into the store is
|
||||
// stored under a verifiable digest.
|
||||
//
|
||||
// Store can generally support multi-reader, single-writer ingest of data,
|
||||
// including resumable ingest.
|
||||
type store struct {
|
||||
root string
|
||||
ls LabelStore
|
||||
}
|
||||
|
||||
// NewStore returns a local content store
|
||||
func NewStore(root string) (content.Store, error) {
|
||||
return NewLabeledStore(root, nil)
|
||||
}
|
||||
|
||||
// NewLabeledStore returns a new content store using the provided label store
|
||||
//
|
||||
// Note: content stores which are used underneath a metadata store may not
|
||||
// require labels and should use `NewStore`. `NewLabeledStore` is primarily
|
||||
// useful for tests or standalone implementations.
|
||||
func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
|
||||
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &store{
|
||||
root: root,
|
||||
ls: ls,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
||||
p := s.blobPath(dgst)
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
|
||||
}
|
||||
|
||||
return content.Info{}, err
|
||||
}
|
||||
var labels map[string]string
|
||||
if s.ls != nil {
|
||||
labels, err = s.ls.Get(dgst)
|
||||
if err != nil {
|
||||
return content.Info{}, err
|
||||
}
|
||||
}
|
||||
return s.info(dgst, fi, labels), nil
|
||||
}
|
||||
|
||||
func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info {
|
||||
return content.Info{
|
||||
Digest: dgst,
|
||||
Size: fi.Size(),
|
||||
CreatedAt: fi.ModTime(),
|
||||
UpdatedAt: getATime(fi),
|
||||
Labels: labels,
|
||||
}
|
||||
}
|
||||
|
||||
// ReaderAt returns an io.ReaderAt for the blob.
|
||||
func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
p := s.blobPath(desc.Digest)
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p)
|
||||
}
|
||||
|
||||
fp, err := os.Open(p)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", desc.Digest, p)
|
||||
}
|
||||
|
||||
return sizeReaderAt{size: fi.Size(), fp: fp}, nil
|
||||
}
|
||||
|
||||
// Delete removes a blob by its digest.
|
||||
//
|
||||
// While this is safe to do concurrently, safe exist-removal logic must hold
|
||||
// some global lock on the store.
|
||||
func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
if err := os.RemoveAll(s.blobPath(dgst)); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
||||
if s.ls == nil {
|
||||
return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store")
|
||||
}
|
||||
|
||||
p := s.blobPath(info.Digest)
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest)
|
||||
}
|
||||
|
||||
return content.Info{}, err
|
||||
}
|
||||
|
||||
var (
|
||||
all bool
|
||||
labels map[string]string
|
||||
)
|
||||
if len(fieldpaths) > 0 {
|
||||
for _, path := range fieldpaths {
|
||||
if strings.HasPrefix(path, "labels.") {
|
||||
if labels == nil {
|
||||
labels = map[string]string{}
|
||||
}
|
||||
|
||||
key := strings.TrimPrefix(path, "labels.")
|
||||
labels[key] = info.Labels[key]
|
||||
continue
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "labels":
|
||||
all = true
|
||||
labels = info.Labels
|
||||
default:
|
||||
return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
all = true
|
||||
labels = info.Labels
|
||||
}
|
||||
|
||||
if all {
|
||||
err = s.ls.Set(info.Digest, labels)
|
||||
} else {
|
||||
labels, err = s.ls.Update(info.Digest, labels)
|
||||
}
|
||||
if err != nil {
|
||||
return content.Info{}, err
|
||||
}
|
||||
|
||||
info = s.info(info.Digest, fi, labels)
|
||||
info.UpdatedAt = time.Now()
|
||||
|
||||
if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest)
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (s *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
|
||||
// TODO: Support filters
|
||||
root := filepath.Join(s.root, "blobs")
|
||||
var alg digest.Algorithm
|
||||
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !fi.IsDir() && !alg.Available() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(stevvooe): There are few more cases with subdirs that should be
|
||||
// handled in case the layout gets corrupted. This isn't strict enough
|
||||
// and may spew bad data.
|
||||
|
||||
if path == root {
|
||||
return nil
|
||||
}
|
||||
if filepath.Dir(path) == root {
|
||||
alg = digest.Algorithm(filepath.Base(path))
|
||||
|
||||
if !alg.Available() {
|
||||
alg = ""
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
// descending into a hash directory
|
||||
return nil
|
||||
}
|
||||
|
||||
dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
|
||||
if err := dgst.Validate(); err != nil {
|
||||
// log error but don't report
|
||||
log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
|
||||
// if we see this, it could mean some sort of corruption of the
|
||||
// store or extra paths not expected previously.
|
||||
}
|
||||
|
||||
var labels map[string]string
|
||||
if s.ls != nil {
|
||||
labels, err = s.ls.Get(dgst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return fn(s.info(dgst, fi, labels))
|
||||
})
|
||||
}
|
||||
|
||||
func (s *store) Status(ctx context.Context, ref string) (content.Status, error) {
|
||||
return s.status(s.ingestRoot(ref))
|
||||
}
|
||||
|
||||
func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
|
||||
fp, err := os.Open(filepath.Join(s.root, "ingest"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer fp.Close()
|
||||
|
||||
fis, err := fp.Readdir(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var active []content.Status
|
||||
for _, fi := range fis {
|
||||
p := filepath.Join(s.root, "ingest", fi.Name())
|
||||
stat, err := s.status(p)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): This is a common error if uploads are being
|
||||
// completed while making this listing. Need to consider taking a
|
||||
// lock on the whole store to coordinate this aspect.
|
||||
//
|
||||
// Another option is to cleanup downloads asynchronously and
|
||||
// coordinate this method with the cleanup process.
|
||||
//
|
||||
// For now, we just skip them, as they really don't exist.
|
||||
continue
|
||||
}
|
||||
|
||||
if filter.Match(adaptStatus(stat)) {
|
||||
active = append(active, stat)
|
||||
}
|
||||
}
|
||||
|
||||
return active, nil
|
||||
}
|
||||
|
||||
// WalkStatusRefs is used to walk all status references
|
||||
// Failed status reads will be logged and ignored, if
|
||||
// this function is called while references are being altered,
|
||||
// these error messages may be produced.
|
||||
func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error {
|
||||
fp, err := os.Open(filepath.Join(s.root, "ingest"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer fp.Close()
|
||||
|
||||
fis, err := fp.Readdir(-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
rf := filepath.Join(s.root, "ingest", fi.Name(), "ref")
|
||||
|
||||
ref, err := readFileString(rf)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref")
|
||||
continue
|
||||
}
|
||||
|
||||
if err := fn(ref); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// status works like stat above except uses the path to the ingest.
|
||||
func (s *store) status(ingestPath string) (content.Status, error) {
|
||||
dp := filepath.Join(ingestPath, "data")
|
||||
fi, err := os.Stat(dp)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
||||
}
|
||||
return content.Status{}, err
|
||||
}
|
||||
|
||||
ref, err := readFileString(filepath.Join(ingestPath, "ref"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
||||
}
|
||||
return content.Status{}, err
|
||||
}
|
||||
|
||||
startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat"))
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrapf(err, "could not read startedat")
|
||||
}
|
||||
|
||||
updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat"))
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrapf(err, "could not read updatedat")
|
||||
}
|
||||
|
||||
// because we don't write updatedat on every write, the mod time may
|
||||
// actually be more up to date.
|
||||
if fi.ModTime().After(updatedAt) {
|
||||
updatedAt = fi.ModTime()
|
||||
}
|
||||
|
||||
return content.Status{
|
||||
Ref: ref,
|
||||
Offset: fi.Size(),
|
||||
Total: s.total(ingestPath),
|
||||
UpdatedAt: updatedAt,
|
||||
StartedAt: startedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func adaptStatus(status content.Status) filters.Adaptor {
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
switch fieldpath[0] {
|
||||
case "ref":
|
||||
return status.Ref, true
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
|
||||
// total attempts to resolve the total expected size for the write.
|
||||
func (s *store) total(ingestPath string) int64 {
|
||||
totalS, err := readFileString(filepath.Join(ingestPath, "total"))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
total, err := strconv.ParseInt(totalS, 10, 64)
|
||||
if err != nil {
|
||||
// represents a corrupted file, should probably remove.
|
||||
return 0
|
||||
}
|
||||
|
||||
return total
|
||||
}
|
||||
|
||||
// Writer begins or resumes the active writer identified by ref. If the writer
|
||||
// is already in use, an error is returned. Only one writer may be in use per
|
||||
// ref at a time.
|
||||
//
|
||||
// The argument `ref` is used to uniquely identify a long-lived writer transaction.
|
||||
func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
||||
var wOpts content.WriterOpts
|
||||
for _, opt := range opts {
|
||||
if err := opt(&wOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// TODO(AkihiroSuda): we could create a random string or one calculated based on the context
|
||||
// https://github.com/containerd/containerd/issues/2129#issuecomment-380255019
|
||||
if wOpts.Ref == "" {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
|
||||
}
|
||||
var lockErr error
|
||||
for count := uint64(0); count < 10; count++ {
|
||||
time.Sleep(time.Millisecond * time.Duration(rand.Intn(1<<count)))
|
||||
if err := tryLock(wOpts.Ref); err != nil {
|
||||
if !errdefs.IsUnavailable(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lockErr = err
|
||||
} else {
|
||||
lockErr = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if lockErr != nil {
|
||||
return nil, lockErr
|
||||
}
|
||||
|
||||
w, err := s.writer(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
|
||||
if err != nil {
|
||||
unlock(wOpts.Ref)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return w, nil // lock is now held by w.
|
||||
}
|
||||
|
||||
func (s *store) resumeStatus(ref string, total int64, digester digest.Digester) (content.Status, error) {
|
||||
path, _, data := s.ingestPaths(ref)
|
||||
status, err := s.status(path)
|
||||
if err != nil {
|
||||
return status, errors.Wrap(err, "failed reading status of resume write")
|
||||
}
|
||||
if ref != status.Ref {
|
||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
||||
// layout corruption or a hash collision for the ref key.
|
||||
return status, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
|
||||
}
|
||||
|
||||
if total > 0 && status.Total > 0 && total != status.Total {
|
||||
return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
|
||||
fp, err := os.Open(data)
|
||||
if err != nil {
|
||||
return status, err
|
||||
}
|
||||
|
||||
p := bufPool.Get().(*[]byte)
|
||||
status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
|
||||
bufPool.Put(p)
|
||||
fp.Close()
|
||||
return status, err
|
||||
}
|
||||
|
||||
// writer provides the main implementation of the Writer method. The caller
|
||||
// must hold the lock correctly and release on error if there is a problem.
|
||||
func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
|
||||
// TODO(stevvooe): Need to actually store expected here. We have
|
||||
// code in the service that shouldn't be dealing with this.
|
||||
if expected != "" {
|
||||
p := s.blobPath(expected)
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
|
||||
}
|
||||
}
|
||||
|
||||
path, refp, data := s.ingestPaths(ref)
|
||||
|
||||
var (
|
||||
digester = digest.Canonical.Digester()
|
||||
offset int64
|
||||
startedAt time.Time
|
||||
updatedAt time.Time
|
||||
)
|
||||
|
||||
foundValidIngest := false
|
||||
// ensure that the ingest path has been created.
|
||||
if err := os.Mkdir(path, 0755); err != nil {
|
||||
if !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
status, err := s.resumeStatus(ref, total, digester)
|
||||
if err == nil {
|
||||
foundValidIngest = true
|
||||
updatedAt = status.UpdatedAt
|
||||
startedAt = status.StartedAt
|
||||
total = status.Total
|
||||
offset = status.Offset
|
||||
} else {
|
||||
logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
if !foundValidIngest {
|
||||
startedAt = time.Now()
|
||||
updatedAt = startedAt
|
||||
|
||||
// the ingest is new, we need to setup the target location.
|
||||
// write the ref to a file for later use
|
||||
if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if total > 0 {
|
||||
if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open data file")
|
||||
}
|
||||
|
||||
if _, err := fp.Seek(offset, io.SeekStart); err != nil {
|
||||
return nil, errors.Wrap(err, "could not seek to current write offset")
|
||||
}
|
||||
|
||||
return &writer{
|
||||
s: s,
|
||||
fp: fp,
|
||||
ref: ref,
|
||||
path: path,
|
||||
offset: offset,
|
||||
total: total,
|
||||
digester: digester,
|
||||
startedAt: startedAt,
|
||||
updatedAt: updatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Abort an active transaction keyed by ref. If the ingest is active, it will
|
||||
// be cancelled. Any resources associated with the ingest will be cleaned.
|
||||
func (s *store) Abort(ctx context.Context, ref string) error {
|
||||
root := s.ingestRoot(ref)
|
||||
if err := os.RemoveAll(root); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *store) blobPath(dgst digest.Digest) string {
|
||||
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
|
||||
}
|
||||
|
||||
func (s *store) ingestRoot(ref string) string {
|
||||
dgst := digest.FromString(ref)
|
||||
return filepath.Join(s.root, "ingest", dgst.Hex())
|
||||
}
|
||||
|
||||
// ingestPaths are returned. The paths are the following:
|
||||
//
|
||||
// - root: entire ingest directory
|
||||
// - ref: name of the starting ref, must be unique
|
||||
// - data: file where data is written
|
||||
//
|
||||
func (s *store) ingestPaths(ref string) (string, string, string) {
|
||||
var (
|
||||
fp = s.ingestRoot(ref)
|
||||
rp = filepath.Join(fp, "ref")
|
||||
dp = filepath.Join(fp, "data")
|
||||
)
|
||||
|
||||
return fp, rp, dp
|
||||
}
|
||||
|
||||
func readFileString(path string) (string, error) {
|
||||
p, err := ioutil.ReadFile(path)
|
||||
return string(p), err
|
||||
}
|
||||
|
||||
// readFileTimestamp reads a file with just a timestamp present.
|
||||
func readFileTimestamp(p string) (time.Time, error) {
|
||||
b, err := ioutil.ReadFile(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
var t time.Time
|
||||
if err := t.UnmarshalText(b); err != nil {
|
||||
return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func writeTimestampFile(p string, t time.Time) error {
|
||||
b, err := t.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return atomicWrite(p, b, 0666)
|
||||
}
|
||||
|
||||
func atomicWrite(path string, data []byte, mode os.FileMode) error {
|
||||
tmp := fmt.Sprintf("%s.tmp", path)
|
||||
f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create tmp file")
|
||||
}
|
||||
_, err = f.Write(data)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write atomic data")
|
||||
}
|
||||
return os.Rename(tmp, path)
|
||||
}
|
35
vendor/github.com/containerd/containerd/content/local/store_unix.go
generated
vendored
Normal file
35
vendor/github.com/containerd/containerd/content/local/store_unix.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// +build linux solaris darwin freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/sys"
|
||||
)
|
||||
|
||||
func getATime(fi os.FileInfo) time.Time {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
return sys.StatATimeAsTime(st)
|
||||
}
|
||||
|
||||
return fi.ModTime()
|
||||
}
|
26
vendor/github.com/containerd/containerd/content/local/store_windows.go
generated
vendored
Normal file
26
vendor/github.com/containerd/containerd/content/local/store_windows.go
generated
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func getATime(fi os.FileInfo) time.Time {
|
||||
return fi.ModTime()
|
||||
}
|
207
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
Normal file
207
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
Normal file
@@ -0,0 +1,207 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// writer represents a write transaction against the blob store.
|
||||
type writer struct {
|
||||
s *store
|
||||
fp *os.File // opened data file
|
||||
path string // path to writer dir
|
||||
ref string // ref key
|
||||
offset int64
|
||||
total int64
|
||||
digester digest.Digester
|
||||
startedAt time.Time
|
||||
updatedAt time.Time
|
||||
}
|
||||
|
||||
func (w *writer) Status() (content.Status, error) {
|
||||
return content.Status{
|
||||
Ref: w.ref,
|
||||
Offset: w.offset,
|
||||
Total: w.total,
|
||||
StartedAt: w.startedAt,
|
||||
UpdatedAt: w.updatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Digest returns the current digest of the content, up to the current write.
|
||||
//
|
||||
// Cannot be called concurrently with `Write`.
|
||||
func (w *writer) Digest() digest.Digest {
|
||||
return w.digester.Digest()
|
||||
}
|
||||
|
||||
// Write p to the transaction.
|
||||
//
|
||||
// Note that writes are unbuffered to the backing file. When writing, it is
|
||||
// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer.
|
||||
func (w *writer) Write(p []byte) (n int, err error) {
|
||||
n, err = w.fp.Write(p)
|
||||
w.digester.Hash().Write(p[:n])
|
||||
w.offset += int64(len(p))
|
||||
w.updatedAt = time.Now()
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
// Ensure even on error the writer is fully closed
|
||||
defer unlock(w.ref)
|
||||
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
fp := w.fp
|
||||
w.fp = nil
|
||||
|
||||
if fp == nil {
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
|
||||
}
|
||||
|
||||
if err := fp.Sync(); err != nil {
|
||||
fp.Close()
|
||||
return errors.Wrap(err, "sync failed")
|
||||
}
|
||||
|
||||
fi, err := fp.Stat()
|
||||
closeErr := fp.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat on ingest file failed")
|
||||
}
|
||||
if closeErr != nil {
|
||||
return errors.Wrap(err, "failed to close ingest file")
|
||||
}
|
||||
|
||||
if size > 0 && size != fi.Size() {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size)
|
||||
}
|
||||
|
||||
dgst := w.digester.Digest()
|
||||
if expected != "" && expected != dgst {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
|
||||
}
|
||||
|
||||
var (
|
||||
ingest = filepath.Join(w.path, "data")
|
||||
target = w.s.blobPath(dgst)
|
||||
)
|
||||
|
||||
// make sure parent directories of blob exist
|
||||
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := os.Stat(target); err == nil {
|
||||
// collision with the target file!
|
||||
if err := os.RemoveAll(w.path); err != nil {
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
|
||||
}
|
||||
return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst)
|
||||
}
|
||||
|
||||
if err := os.Rename(ingest, target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ingest has now been made available in the content store, attempt to complete
|
||||
// setting metadata but errors should only be logged and not returned since
|
||||
// the content store cannot be cleanly rolled back.
|
||||
|
||||
commitTime := time.Now()
|
||||
if err := os.Chtimes(target, commitTime, commitTime); err != nil {
|
||||
log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time")
|
||||
}
|
||||
|
||||
// clean up!!
|
||||
if err := os.RemoveAll(w.path); err != nil {
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
|
||||
}
|
||||
|
||||
if w.s.ls != nil && base.Labels != nil {
|
||||
if err := w.s.ls.Set(dgst, base.Labels); err != nil {
|
||||
log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels")
|
||||
}
|
||||
}
|
||||
|
||||
// change to readonly, more important for read, but provides _some_
|
||||
// protection from this point on. We use the existing perms with a mask
|
||||
// only allowing reads honoring the umask on creation.
|
||||
//
|
||||
// This removes write and exec, only allowing read per the creation umask.
|
||||
//
|
||||
// NOTE: Windows does not support this operation
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil {
|
||||
log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close the writer, flushing any unwritten data and leaving the progress in
|
||||
// tact.
|
||||
//
|
||||
// If one needs to resume the transaction, a new writer can be obtained from
|
||||
// `Ingester.Writer` using the same key. The write can then be continued
|
||||
// from it was left off.
|
||||
//
|
||||
// To abandon a transaction completely, first call close then `IngestManager.Abort` to
|
||||
// clean up the associated resources.
|
||||
func (w *writer) Close() (err error) {
|
||||
if w.fp != nil {
|
||||
w.fp.Sync()
|
||||
err = w.fp.Close()
|
||||
writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt)
|
||||
w.fp = nil
|
||||
unlock(w.ref)
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *writer) Truncate(size int64) error {
|
||||
if size != 0 {
|
||||
return errors.New("Truncate: unsupported size")
|
||||
}
|
||||
w.offset = 0
|
||||
w.digester.Hash().Reset()
|
||||
if _, err := w.fp.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
return w.fp.Truncate(0)
|
||||
}
|
65
vendor/github.com/containerd/containerd/content/proxy/content_reader.go
generated
vendored
Normal file
65
vendor/github.com/containerd/containerd/content/proxy/content_reader.go
generated
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type remoteReaderAt struct {
|
||||
ctx context.Context
|
||||
digest digest.Digest
|
||||
size int64
|
||||
client contentapi.ContentClient
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) Size() int64 {
|
||||
return ra.size
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
rr := &contentapi.ReadContentRequest{
|
||||
Digest: ra.digest,
|
||||
Offset: off,
|
||||
Size_: int64(len(p)),
|
||||
}
|
||||
rc, err := ra.client.Read(ra.ctx, rr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for len(p) > 0 {
|
||||
var resp *contentapi.ReadContentResponse
|
||||
// fill our buffer up until we can fill p.
|
||||
resp, err = rc.Recv()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
copied := copy(p, resp.Data)
|
||||
n += copied
|
||||
p = p[copied:]
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) Close() error {
|
||||
return nil
|
||||
}
|
234
vendor/github.com/containerd/containerd/content/proxy/content_store.go
generated
vendored
Normal file
234
vendor/github.com/containerd/containerd/content/proxy/content_store.go
generated
vendored
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
protobuftypes "github.com/gogo/protobuf/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type proxyContentStore struct {
|
||||
client contentapi.ContentClient
|
||||
}
|
||||
|
||||
// NewContentStore returns a new content store which communicates over a GRPC
|
||||
// connection using the containerd content GRPC API.
|
||||
func NewContentStore(client contentapi.ContentClient) content.Store {
|
||||
return &proxyContentStore{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
||||
resp, err := pcs.client.Info(ctx, &contentapi.InfoRequest{
|
||||
Digest: dgst,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Info{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return infoFromGRPC(resp.Info), nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
|
||||
session, err := pcs.client.List(ctx, &contentapi.ListContentRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
for {
|
||||
msg, err := session.Recv()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
for _, info := range msg.Info {
|
||||
if err := fn(infoFromGRPC(info)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
if _, err := pcs.client.Delete(ctx, &contentapi.DeleteContentRequest{
|
||||
Digest: dgst,
|
||||
}); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReaderAt ignores MediaType.
|
||||
func (pcs *proxyContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
i, err := pcs.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &remoteReaderAt{
|
||||
ctx: ctx,
|
||||
digest: desc.Digest,
|
||||
size: i.Size,
|
||||
client: pcs.client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Status(ctx context.Context, ref string) (content.Status, error) {
|
||||
resp, err := pcs.client.Status(ctx, &contentapi.StatusRequest{
|
||||
Ref: ref,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
status := resp.Status
|
||||
return content.Status{
|
||||
Ref: status.Ref,
|
||||
StartedAt: status.StartedAt,
|
||||
UpdatedAt: status.UpdatedAt,
|
||||
Offset: status.Offset,
|
||||
Total: status.Total,
|
||||
Expected: status.Expected,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
||||
resp, err := pcs.client.Update(ctx, &contentapi.UpdateRequest{
|
||||
Info: infoToGRPC(info),
|
||||
UpdateMask: &protobuftypes.FieldMask{
|
||||
Paths: fieldpaths,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return content.Info{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
return infoFromGRPC(resp.Info), nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
|
||||
resp, err := pcs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
var statuses []content.Status
|
||||
for _, status := range resp.Statuses {
|
||||
statuses = append(statuses, content.Status{
|
||||
Ref: status.Ref,
|
||||
StartedAt: status.StartedAt,
|
||||
UpdatedAt: status.UpdatedAt,
|
||||
Offset: status.Offset,
|
||||
Total: status.Total,
|
||||
Expected: status.Expected,
|
||||
})
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
// Writer ignores MediaType.
|
||||
func (pcs *proxyContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
||||
var wOpts content.WriterOpts
|
||||
for _, opt := range opts {
|
||||
if err := opt(&wOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
wrclient, offset, err := pcs.negotiate(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return &remoteWriter{
|
||||
ref: wOpts.Ref,
|
||||
client: wrclient,
|
||||
offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Abort implements asynchronous abort. It starts a new write session on the ref l
|
||||
func (pcs *proxyContentStore) Abort(ctx context.Context, ref string) error {
|
||||
if _, err := pcs.client.Abort(ctx, &contentapi.AbortRequest{
|
||||
Ref: ref,
|
||||
}); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
|
||||
wrclient, err := pcs.client.Write(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if err := wrclient.Send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionStat,
|
||||
Ref: ref,
|
||||
Total: size,
|
||||
Expected: expected,
|
||||
}); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
resp, err := wrclient.Recv()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return wrclient, resp.Offset, nil
|
||||
}
|
||||
|
||||
func infoToGRPC(info content.Info) contentapi.Info {
|
||||
return contentapi.Info{
|
||||
Digest: info.Digest,
|
||||
Size_: info.Size,
|
||||
CreatedAt: info.CreatedAt,
|
||||
UpdatedAt: info.UpdatedAt,
|
||||
Labels: info.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
func infoFromGRPC(info contentapi.Info) content.Info {
|
||||
return content.Info{
|
||||
Digest: info.Digest,
|
||||
Size: info.Size_,
|
||||
CreatedAt: info.CreatedAt,
|
||||
UpdatedAt: info.UpdatedAt,
|
||||
Labels: info.Labels,
|
||||
}
|
||||
}
|
139
vendor/github.com/containerd/containerd/content/proxy/content_writer.go
generated
vendored
Normal file
139
vendor/github.com/containerd/containerd/content/proxy/content_writer.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type remoteWriter struct {
|
||||
ref string
|
||||
client contentapi.Content_WriteClient
|
||||
offset int64
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
// send performs a synchronous req-resp cycle on the client.
|
||||
func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.WriteContentResponse, error) {
|
||||
if err := rw.client.Send(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := rw.client.Recv()
|
||||
|
||||
if err == nil {
|
||||
// try to keep these in sync
|
||||
if resp.Digest != "" {
|
||||
rw.digest = resp.Digest
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Status() (content.Status, error) {
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionStat,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status")
|
||||
}
|
||||
|
||||
return content.Status{
|
||||
Ref: rw.ref,
|
||||
Offset: resp.Offset,
|
||||
Total: resp.Total,
|
||||
StartedAt: resp.StartedAt,
|
||||
UpdatedAt: resp.UpdatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Digest() digest.Digest {
|
||||
return rw.digest
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
||||
offset := rw.offset
|
||||
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionWrite,
|
||||
Offset: offset,
|
||||
Data: p,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write")
|
||||
}
|
||||
|
||||
n = int(resp.Offset - offset)
|
||||
if n < len(p) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
|
||||
rw.offset += int64(n)
|
||||
if resp.Digest != "" {
|
||||
rw.digest = resp.Digest
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionCommit,
|
||||
Total: size,
|
||||
Offset: rw.offset,
|
||||
Expected: expected,
|
||||
Labels: base.Labels,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(errdefs.FromGRPC(err), "commit failed")
|
||||
}
|
||||
|
||||
if size != 0 && resp.Offset != size {
|
||||
return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
|
||||
}
|
||||
|
||||
if expected != "" && resp.Digest != expected {
|
||||
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
||||
}
|
||||
|
||||
rw.digest = resp.Digest
|
||||
rw.offset = resp.Offset
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Truncate(size int64) error {
|
||||
// This truncation won't actually be validated until a write is issued.
|
||||
rw.offset = size
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Close() error {
|
||||
return rw.client.CloseSend()
|
||||
}
|
131
vendor/github.com/containerd/containerd/diff/apply/apply.go
generated
vendored
Normal file
131
vendor/github.com/containerd/containerd/diff/apply/apply.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apply
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/diff"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/mount"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// NewFileSystemApplier returns an applier which simply mounts
|
||||
// and applies diff onto the mounted filesystem.
|
||||
func NewFileSystemApplier(cs content.Provider) diff.Applier {
|
||||
return &fsApplier{
|
||||
store: cs,
|
||||
}
|
||||
}
|
||||
|
||||
type fsApplier struct {
|
||||
store content.Provider
|
||||
}
|
||||
|
||||
var emptyDesc = ocispec.Descriptor{}
|
||||
|
||||
// Apply applies the content associated with the provided digests onto the
|
||||
// provided mounts. Archive content will be extracted and decompressed if
|
||||
// necessary.
|
||||
func (s *fsApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) {
|
||||
t1 := time.Now()
|
||||
defer func() {
|
||||
if err == nil {
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"d": time.Since(t1),
|
||||
"dgst": desc.Digest,
|
||||
"size": desc.Size,
|
||||
"media": desc.MediaType,
|
||||
}).Debugf("diff applied")
|
||||
}
|
||||
}()
|
||||
|
||||
var config diff.ApplyConfig
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, desc, &config); err != nil {
|
||||
return emptyDesc, errors.Wrap(err, "failed to apply config opt")
|
||||
}
|
||||
}
|
||||
|
||||
ra, err := s.store.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return emptyDesc, errors.Wrap(err, "failed to get reader from content store")
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
var processors []diff.StreamProcessor
|
||||
processor := diff.NewProcessorChain(desc.MediaType, content.NewReader(ra))
|
||||
processors = append(processors, processor)
|
||||
for {
|
||||
if processor, err = diff.GetProcessor(ctx, processor, config.ProcessorPayloads); err != nil {
|
||||
return emptyDesc, errors.Wrapf(err, "failed to get stream processor for %s", desc.MediaType)
|
||||
}
|
||||
processors = append(processors, processor)
|
||||
if processor.MediaType() == ocispec.MediaTypeImageLayer {
|
||||
break
|
||||
}
|
||||
}
|
||||
defer processor.Close()
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
rc := &readCounter{
|
||||
r: io.TeeReader(processor, digester.Hash()),
|
||||
}
|
||||
|
||||
if err := apply(ctx, mounts, rc); err != nil {
|
||||
return emptyDesc, err
|
||||
}
|
||||
|
||||
// Read any trailing data
|
||||
if _, err := io.Copy(ioutil.Discard, rc); err != nil {
|
||||
return emptyDesc, err
|
||||
}
|
||||
|
||||
for _, p := range processors {
|
||||
if ep, ok := p.(interface {
|
||||
Err() error
|
||||
}); ok {
|
||||
if err := ep.Err(); err != nil {
|
||||
return emptyDesc, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return ocispec.Descriptor{
|
||||
MediaType: ocispec.MediaTypeImageLayer,
|
||||
Size: rc.c,
|
||||
Digest: digester.Digest(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type readCounter struct {
|
||||
r io.Reader
|
||||
c int64
|
||||
}
|
||||
|
||||
func (rc *readCounter) Read(p []byte) (n int, err error) {
|
||||
n, err = rc.r.Read(p)
|
||||
rc.c += int64(n)
|
||||
return
|
||||
}
|
134
vendor/github.com/containerd/containerd/diff/apply/apply_linux.go
generated
vendored
Normal file
134
vendor/github.com/containerd/containerd/diff/apply/apply_linux.go
generated
vendored
Normal file
@@ -0,0 +1,134 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apply
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/archive"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func apply(ctx context.Context, mounts []mount.Mount, r io.Reader) error {
|
||||
switch {
|
||||
case len(mounts) == 1 && mounts[0].Type == "overlay":
|
||||
// OverlayConvertWhiteout (mknod c 0 0) doesn't work in userns.
|
||||
// https://github.com/containerd/containerd/issues/3762
|
||||
if system.RunningInUserNS() {
|
||||
break
|
||||
}
|
||||
path, parents, err := getOverlayPath(mounts[0].Options)
|
||||
if err != nil {
|
||||
if errdefs.IsInvalidArgument(err) {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
opts := []archive.ApplyOpt{
|
||||
archive.WithConvertWhiteout(archive.OverlayConvertWhiteout),
|
||||
}
|
||||
if len(parents) > 0 {
|
||||
opts = append(opts, archive.WithParents(parents))
|
||||
}
|
||||
_, err = archive.Apply(ctx, path, r, opts...)
|
||||
return err
|
||||
case len(mounts) == 1 && mounts[0].Type == "aufs":
|
||||
path, parents, err := getAufsPath(mounts[0].Options)
|
||||
if err != nil {
|
||||
if errdefs.IsInvalidArgument(err) {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
opts := []archive.ApplyOpt{
|
||||
archive.WithConvertWhiteout(archive.AufsConvertWhiteout),
|
||||
}
|
||||
if len(parents) > 0 {
|
||||
opts = append(opts, archive.WithParents(parents))
|
||||
}
|
||||
_, err = archive.Apply(ctx, path, r, opts...)
|
||||
return err
|
||||
}
|
||||
return mount.WithTempMount(ctx, mounts, func(root string) error {
|
||||
_, err := archive.Apply(ctx, root, r)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func getOverlayPath(options []string) (upper string, lower []string, err error) {
|
||||
const upperdirPrefix = "upperdir="
|
||||
const lowerdirPrefix = "lowerdir="
|
||||
|
||||
for _, o := range options {
|
||||
if strings.HasPrefix(o, upperdirPrefix) {
|
||||
upper = strings.TrimPrefix(o, upperdirPrefix)
|
||||
} else if strings.HasPrefix(o, lowerdirPrefix) {
|
||||
lower = strings.Split(strings.TrimPrefix(o, lowerdirPrefix), ":")
|
||||
}
|
||||
}
|
||||
if upper == "" {
|
||||
return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "upperdir not found")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// getAufsPath handles options as given by the containerd aufs package only,
|
||||
// formatted as "br:<upper>=rw[:<lower>=ro+wh]*"
|
||||
func getAufsPath(options []string) (upper string, lower []string, err error) {
|
||||
const (
|
||||
sep = ":"
|
||||
brPrefix = "br:"
|
||||
rwSuffix = "=rw"
|
||||
roSuffix = "=ro+wh"
|
||||
)
|
||||
for _, o := range options {
|
||||
if strings.HasPrefix(o, brPrefix) {
|
||||
o = strings.TrimPrefix(o, brPrefix)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, b := range strings.Split(o, sep) {
|
||||
if strings.HasSuffix(b, rwSuffix) {
|
||||
if upper != "" {
|
||||
return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "multiple rw branch found")
|
||||
}
|
||||
upper = strings.TrimSuffix(b, rwSuffix)
|
||||
} else if strings.HasSuffix(b, roSuffix) {
|
||||
if upper == "" {
|
||||
return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "rw branch be first")
|
||||
}
|
||||
lower = append(lower, strings.TrimSuffix(b, roSuffix))
|
||||
} else {
|
||||
return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "unhandled aufs suffix")
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
if upper == "" {
|
||||
return "", nil, errors.Wrap(errdefs.ErrInvalidArgument, "rw branch not found")
|
||||
}
|
||||
return
|
||||
}
|
34
vendor/github.com/containerd/containerd/diff/apply/apply_other.go
generated
vendored
Normal file
34
vendor/github.com/containerd/containerd/diff/apply/apply_other.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// +build !linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apply
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/archive"
|
||||
"github.com/containerd/containerd/mount"
|
||||
)
|
||||
|
||||
func apply(ctx context.Context, mounts []mount.Mount, r io.Reader) error {
|
||||
return mount.WithTempMount(ctx, mounts, func(root string) error {
|
||||
_, err := archive.Apply(ctx, root, r)
|
||||
return err
|
||||
})
|
||||
}
|
107
vendor/github.com/containerd/containerd/diff/diff.go
generated
vendored
Normal file
107
vendor/github.com/containerd/containerd/diff/diff.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Config is used to hold parameters needed for a diff operation
|
||||
type Config struct {
|
||||
// MediaType is the type of diff to generate
|
||||
// Default depends on the differ,
|
||||
// i.e. application/vnd.oci.image.layer.v1.tar+gzip
|
||||
MediaType string
|
||||
|
||||
// Reference is the content upload reference
|
||||
// Default will use a random reference string
|
||||
Reference string
|
||||
|
||||
// Labels are the labels to apply to the generated content
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// Opt is used to configure a diff operation
|
||||
type Opt func(*Config) error
|
||||
|
||||
// Comparer allows creation of filesystem diffs between mounts
|
||||
type Comparer interface {
|
||||
// Compare computes the difference between two mounts and returns a
|
||||
// descriptor for the computed diff. The options can provide
|
||||
// a ref which can be used to track the content creation of the diff.
|
||||
// The media type which is used to determine the format of the created
|
||||
// content can also be provided as an option.
|
||||
Compare(ctx context.Context, lower, upper []mount.Mount, opts ...Opt) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
// ApplyConfig is used to hold parameters needed for a apply operation
|
||||
type ApplyConfig struct {
|
||||
// ProcessorPayloads specifies the payload sent to various processors
|
||||
ProcessorPayloads map[string]*types.Any
|
||||
}
|
||||
|
||||
// ApplyOpt is used to configure an Apply operation
|
||||
type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error
|
||||
|
||||
// Applier allows applying diffs between mounts
|
||||
type Applier interface {
|
||||
// Apply applies the content referred to by the given descriptor to
|
||||
// the provided mount. The method of applying is based on the
|
||||
// implementation and content descriptor. For example, in the common
|
||||
// case the descriptor is a file system difference in tar format,
|
||||
// that tar would be applied on top of the mounts.
|
||||
Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount, opts ...ApplyOpt) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
// WithMediaType sets the media type to use for creating the diff, without
|
||||
// specifying the differ will choose a default.
|
||||
func WithMediaType(m string) Opt {
|
||||
return func(c *Config) error {
|
||||
c.MediaType = m
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithReference is used to set the content upload reference used by
|
||||
// the diff operation. This allows the caller to track the upload through
|
||||
// the content store.
|
||||
func WithReference(ref string) Opt {
|
||||
return func(c *Config) error {
|
||||
c.Reference = ref
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLabels is used to set content labels on the created diff content.
|
||||
func WithLabels(labels map[string]string) Opt {
|
||||
return func(c *Config) error {
|
||||
c.Labels = labels
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPayloads sets the apply processor payloads to the config
|
||||
func WithPayloads(payloads map[string]*types.Any) ApplyOpt {
|
||||
return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error {
|
||||
c.ProcessorPayloads = payloads
|
||||
return nil
|
||||
}
|
||||
}
|
187
vendor/github.com/containerd/containerd/diff/stream.go
generated
vendored
Normal file
187
vendor/github.com/containerd/containerd/diff/stream.go
generated
vendored
Normal file
@@ -0,0 +1,187 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
handlers []Handler
|
||||
|
||||
// ErrNoProcessor is returned when no stream processor is available for a media-type
|
||||
ErrNoProcessor = errors.New("no processor for media-type")
|
||||
)
|
||||
|
||||
func init() {
|
||||
// register the default compression handler
|
||||
RegisterProcessor(compressedHandler)
|
||||
}
|
||||
|
||||
// RegisterProcessor registers a stream processor for media-types
|
||||
func RegisterProcessor(handler Handler) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
||||
|
||||
// GetProcessor returns the processor for a media-type
|
||||
func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||
// reverse this list so that user configured handlers come up first
|
||||
for i := len(handlers) - 1; i >= 0; i-- {
|
||||
processor, ok := handlers[i](ctx, stream.MediaType())
|
||||
if ok {
|
||||
return processor(ctx, stream, payloads)
|
||||
}
|
||||
}
|
||||
return nil, ErrNoProcessor
|
||||
}
|
||||
|
||||
// Handler checks a media-type and initializes the processor
|
||||
type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool)
|
||||
|
||||
// StaticHandler returns the processor init func for a static media-type
|
||||
func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler {
|
||||
return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
|
||||
if mediaType == expectedMediaType {
|
||||
return fn, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
// StreamProcessorInit returns the initialized stream processor
|
||||
type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error)
|
||||
|
||||
// RawProcessor provides access to direct fd for processing
|
||||
type RawProcessor interface {
|
||||
// File returns the fd for the read stream of the underlying processor
|
||||
File() *os.File
|
||||
}
|
||||
|
||||
// StreamProcessor handles processing a content stream and transforming it into a different media-type
|
||||
type StreamProcessor interface {
|
||||
io.ReadCloser
|
||||
|
||||
// MediaType is the resulting media-type that the processor processes the stream into
|
||||
MediaType() string
|
||||
}
|
||||
|
||||
func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
|
||||
compressed, err := images.DiffCompression(ctx, mediaType)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
if compressed != "" {
|
||||
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||
ds, err := compression.DecompressStream(stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &compressedProcessor{
|
||||
rc: ds,
|
||||
}, nil
|
||||
}, true
|
||||
}
|
||||
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||
return &stdProcessor{
|
||||
rc: stream,
|
||||
}, nil
|
||||
}, true
|
||||
}
|
||||
|
||||
// NewProcessorChain initialized the root StreamProcessor
|
||||
func NewProcessorChain(mt string, r io.Reader) StreamProcessor {
|
||||
return &processorChain{
|
||||
mt: mt,
|
||||
rc: r,
|
||||
}
|
||||
}
|
||||
|
||||
type processorChain struct {
|
||||
mt string
|
||||
rc io.Reader
|
||||
}
|
||||
|
||||
func (c *processorChain) MediaType() string {
|
||||
return c.mt
|
||||
}
|
||||
|
||||
func (c *processorChain) Read(p []byte) (int, error) {
|
||||
return c.rc.Read(p)
|
||||
}
|
||||
|
||||
func (c *processorChain) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type stdProcessor struct {
|
||||
rc StreamProcessor
|
||||
}
|
||||
|
||||
func (c *stdProcessor) MediaType() string {
|
||||
return ocispec.MediaTypeImageLayer
|
||||
}
|
||||
|
||||
func (c *stdProcessor) Read(p []byte) (int, error) {
|
||||
return c.rc.Read(p)
|
||||
}
|
||||
|
||||
func (c *stdProcessor) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type compressedProcessor struct {
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
func (c *compressedProcessor) MediaType() string {
|
||||
return ocispec.MediaTypeImageLayer
|
||||
}
|
||||
|
||||
func (c *compressedProcessor) Read(p []byte) (int, error) {
|
||||
return c.rc.Read(p)
|
||||
}
|
||||
|
||||
func (c *compressedProcessor) Close() error {
|
||||
return c.rc.Close()
|
||||
}
|
||||
|
||||
func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args []string) Handler {
|
||||
set := make(map[string]struct{}, len(mediaTypes))
|
||||
for _, m := range mediaTypes {
|
||||
set[m] = struct{}{}
|
||||
}
|
||||
return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) {
|
||||
if _, ok := set[mediaType]; ok {
|
||||
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||
payload := payloads[id]
|
||||
return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, payload)
|
||||
}, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE"
|
146
vendor/github.com/containerd/containerd/diff/stream_unix.go
generated
vendored
Normal file
146
vendor/github.com/containerd/containerd/diff/stream_unix.go
generated
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewBinaryProcessor returns a binary processor for use with processing content streams
|
||||
func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) {
|
||||
cmd := exec.CommandContext(ctx, name, args...)
|
||||
cmd.Env = os.Environ()
|
||||
|
||||
var payloadC io.Closer
|
||||
if payload != nil {
|
||||
data, err := proto.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
io.Copy(w, bytes.NewReader(data))
|
||||
w.Close()
|
||||
}()
|
||||
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
|
||||
payloadC = r
|
||||
}
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
|
||||
var (
|
||||
stdin io.Reader
|
||||
closer func() error
|
||||
err error
|
||||
)
|
||||
if f, ok := stream.(RawProcessor); ok {
|
||||
stdin = f.File()
|
||||
closer = f.File().Close
|
||||
} else {
|
||||
stdin = stream
|
||||
}
|
||||
cmd.Stdin = stdin
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd.Stdout = w
|
||||
|
||||
stderr := bytes.NewBuffer(nil)
|
||||
cmd.Stderr = stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := &binaryProcessor{
|
||||
cmd: cmd,
|
||||
r: r,
|
||||
mt: rmt,
|
||||
stderr: stderr,
|
||||
}
|
||||
go p.wait()
|
||||
|
||||
// close after start and dup
|
||||
w.Close()
|
||||
if closer != nil {
|
||||
closer()
|
||||
}
|
||||
if payloadC != nil {
|
||||
payloadC.Close()
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type binaryProcessor struct {
|
||||
cmd *exec.Cmd
|
||||
r *os.File
|
||||
mt string
|
||||
stderr *bytes.Buffer
|
||||
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) wait() {
|
||||
if err := c.cmd.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); ok {
|
||||
c.mu.Lock()
|
||||
c.err = errors.New(c.stderr.String())
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) File() *os.File {
|
||||
return c.r
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) MediaType() string {
|
||||
return c.mt
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Read(p []byte) (int, error) {
|
||||
return c.r.Read(p)
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Close() error {
|
||||
err := c.r.Close()
|
||||
if kerr := c.cmd.Process.Kill(); err == nil {
|
||||
err = kerr
|
||||
}
|
||||
return err
|
||||
}
|
165
vendor/github.com/containerd/containerd/diff/stream_windows.go
generated
vendored
Normal file
165
vendor/github.com/containerd/containerd/diff/stream_windows.go
generated
vendored
Normal file
@@ -0,0 +1,165 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const processorPipe = "STREAM_PROCESSOR_PIPE"
|
||||
|
||||
// NewBinaryProcessor returns a binary processor for use with processing content streams
|
||||
func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) {
|
||||
cmd := exec.CommandContext(ctx, name, args...)
|
||||
cmd.Env = os.Environ()
|
||||
|
||||
if payload != nil {
|
||||
data, err := proto.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up, err := getUiqPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up)
|
||||
l, err := winio.ListenPipe(path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
defer l.Close()
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("accept npipe connection")
|
||||
return
|
||||
}
|
||||
io.Copy(conn, bytes.NewReader(data))
|
||||
conn.Close()
|
||||
}()
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path))
|
||||
}
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
|
||||
var (
|
||||
stdin io.Reader
|
||||
closer func() error
|
||||
err error
|
||||
)
|
||||
if f, ok := stream.(RawProcessor); ok {
|
||||
stdin = f.File()
|
||||
closer = f.File().Close
|
||||
} else {
|
||||
stdin = stream
|
||||
}
|
||||
cmd.Stdin = stdin
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd.Stdout = w
|
||||
stderr := bytes.NewBuffer(nil)
|
||||
cmd.Stderr = stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := &binaryProcessor{
|
||||
cmd: cmd,
|
||||
r: r,
|
||||
mt: rmt,
|
||||
stderr: stderr,
|
||||
}
|
||||
go p.wait()
|
||||
|
||||
// close after start and dup
|
||||
w.Close()
|
||||
if closer != nil {
|
||||
closer()
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type binaryProcessor struct {
|
||||
cmd *exec.Cmd
|
||||
r *os.File
|
||||
mt string
|
||||
stderr *bytes.Buffer
|
||||
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) wait() {
|
||||
if err := c.cmd.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); ok {
|
||||
c.mu.Lock()
|
||||
c.err = errors.New(c.stderr.String())
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) File() *os.File {
|
||||
return c.r
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) MediaType() string {
|
||||
return c.mt
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Read(p []byte) (int, error) {
|
||||
return c.r.Read(p)
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Close() error {
|
||||
err := c.r.Close()
|
||||
if kerr := c.cmd.Process.Kill(); err == nil {
|
||||
err = kerr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getUiqPath() (string, error) {
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
os.Remove(dir)
|
||||
return filepath.Base(dir), nil
|
||||
}
|
184
vendor/github.com/containerd/containerd/diff/walking/differ.go
generated
vendored
Normal file
184
vendor/github.com/containerd/containerd/diff/walking/differ.go
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package walking
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/archive"
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/diff"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/mount"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type walkingDiff struct {
|
||||
store content.Store
|
||||
}
|
||||
|
||||
var emptyDesc = ocispec.Descriptor{}
|
||||
var uncompressed = "containerd.io/uncompressed"
|
||||
|
||||
// NewWalkingDiff is a generic implementation of diff.Comparer. The diff is
|
||||
// calculated by mounting both the upper and lower mount sets and walking the
|
||||
// mounted directories concurrently. Changes are calculated by comparing files
|
||||
// against each other or by comparing file existence between directories.
|
||||
// NewWalkingDiff uses no special characteristics of the mount sets and is
|
||||
// expected to work with any filesystem.
|
||||
func NewWalkingDiff(store content.Store) diff.Comparer {
|
||||
return &walkingDiff{
|
||||
store: store,
|
||||
}
|
||||
}
|
||||
|
||||
// Compare creates a diff between the given mounts and uploads the result
|
||||
// to the content store.
|
||||
func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) {
|
||||
var config diff.Config
|
||||
for _, opt := range opts {
|
||||
if err := opt(&config); err != nil {
|
||||
return emptyDesc, err
|
||||
}
|
||||
}
|
||||
|
||||
if config.MediaType == "" {
|
||||
config.MediaType = ocispec.MediaTypeImageLayerGzip
|
||||
}
|
||||
|
||||
var isCompressed bool
|
||||
switch config.MediaType {
|
||||
case ocispec.MediaTypeImageLayer:
|
||||
case ocispec.MediaTypeImageLayerGzip:
|
||||
isCompressed = true
|
||||
default:
|
||||
return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType)
|
||||
}
|
||||
|
||||
var ocidesc ocispec.Descriptor
|
||||
if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error {
|
||||
return mount.WithTempMount(ctx, upper, func(upperRoot string) error {
|
||||
var newReference bool
|
||||
if config.Reference == "" {
|
||||
newReference = true
|
||||
config.Reference = uniqueRef()
|
||||
}
|
||||
|
||||
cw, err := s.store.Writer(ctx,
|
||||
content.WithRef(config.Reference),
|
||||
content.WithDescriptor(ocispec.Descriptor{
|
||||
MediaType: config.MediaType, // most contentstore implementations just ignore this
|
||||
}))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to open writer")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cw.Close()
|
||||
if newReference {
|
||||
if err := s.store.Abort(ctx, config.Reference); err != nil {
|
||||
log.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
if !newReference {
|
||||
if err = cw.Truncate(0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if isCompressed {
|
||||
dgstr := digest.SHA256.Digester()
|
||||
var compressed io.WriteCloser
|
||||
compressed, err = compression.CompressStream(cw, compression.Gzip)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get compressed stream")
|
||||
}
|
||||
err = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot)
|
||||
compressed.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to write compressed diff")
|
||||
}
|
||||
|
||||
if config.Labels == nil {
|
||||
config.Labels = map[string]string{}
|
||||
}
|
||||
config.Labels[uncompressed] = dgstr.Digest().String()
|
||||
} else {
|
||||
if err = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); err != nil {
|
||||
return errors.Wrap(err, "failed to write diff")
|
||||
}
|
||||
}
|
||||
|
||||
var commitopts []content.Opt
|
||||
if config.Labels != nil {
|
||||
commitopts = append(commitopts, content.WithLabels(config.Labels))
|
||||
}
|
||||
|
||||
dgst := cw.Digest()
|
||||
if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrap(err, "failed to commit")
|
||||
}
|
||||
}
|
||||
|
||||
info, err := s.store.Info(ctx, dgst)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get info from content store")
|
||||
}
|
||||
if info.Labels == nil {
|
||||
info.Labels = make(map[string]string)
|
||||
}
|
||||
// Set uncompressed label if digest already existed without label
|
||||
if _, ok := info.Labels[uncompressed]; !ok {
|
||||
info.Labels[uncompressed] = config.Labels[uncompressed]
|
||||
if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil {
|
||||
return errors.Wrap(err, "error setting uncompressed label")
|
||||
}
|
||||
}
|
||||
|
||||
ocidesc = ocispec.Descriptor{
|
||||
MediaType: config.MediaType,
|
||||
Size: info.Size,
|
||||
Digest: info.Digest,
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return emptyDesc, err
|
||||
}
|
||||
|
||||
return ocidesc, nil
|
||||
}
|
||||
|
||||
func uniqueRef() string {
|
||||
t := time.Now()
|
||||
var b [3]byte
|
||||
// Ignore read failures, just decreases uniqueness
|
||||
rand.Read(b[:])
|
||||
return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:]))
|
||||
}
|
81
vendor/github.com/containerd/containerd/events/events.go
generated
vendored
Normal file
81
vendor/github.com/containerd/containerd/events/events.go
generated
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/typeurl"
|
||||
"github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
||||
// Envelope provides the packaging for an event.
|
||||
type Envelope struct {
|
||||
Timestamp time.Time
|
||||
Namespace string
|
||||
Topic string
|
||||
Event *types.Any
|
||||
}
|
||||
|
||||
// Field returns the value for the given fieldpath as a string, if defined.
|
||||
// If the value is not defined, the second value will be false.
|
||||
func (e *Envelope) Field(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
// unhandled: timestamp
|
||||
case "namespace":
|
||||
return e.Namespace, len(e.Namespace) > 0
|
||||
case "topic":
|
||||
return e.Topic, len(e.Topic) > 0
|
||||
case "event":
|
||||
decoded, err := typeurl.UnmarshalAny(e.Event)
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
adaptor, ok := decoded.(interface {
|
||||
Field([]string) (string, bool)
|
||||
})
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
return adaptor.Field(fieldpath[1:])
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Event is a generic interface for any type of event
|
||||
type Event interface{}
|
||||
|
||||
// Publisher posts the event.
|
||||
type Publisher interface {
|
||||
Publish(ctx context.Context, topic string, event Event) error
|
||||
}
|
||||
|
||||
// Forwarder forwards an event to the underlying event bus
|
||||
type Forwarder interface {
|
||||
Forward(ctx context.Context, envelope *Envelope) error
|
||||
}
|
||||
|
||||
// Subscriber allows callers to subscribe to events
|
||||
type Subscriber interface {
|
||||
Subscribe(ctx context.Context, filters ...string) (ch <-chan *Envelope, errs <-chan error)
|
||||
}
|
251
vendor/github.com/containerd/containerd/events/exchange/exchange.go
generated
vendored
Normal file
251
vendor/github.com/containerd/containerd/events/exchange/exchange.go
generated
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/identifiers"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/typeurl"
|
||||
goevents "github.com/docker/go-events"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Exchange broadcasts events
|
||||
type Exchange struct {
|
||||
broadcaster *goevents.Broadcaster
|
||||
}
|
||||
|
||||
// NewExchange returns a new event Exchange
|
||||
func NewExchange() *Exchange {
|
||||
return &Exchange{
|
||||
broadcaster: goevents.NewBroadcaster(),
|
||||
}
|
||||
}
|
||||
|
||||
var _ events.Publisher = &Exchange{}
|
||||
var _ events.Forwarder = &Exchange{}
|
||||
var _ events.Subscriber = &Exchange{}
|
||||
|
||||
// Forward accepts an envelope to be directly distributed on the exchange.
|
||||
//
|
||||
// This is useful when an event is forwarded on behalf of another namespace or
|
||||
// when the event is propagated on behalf of another publisher.
|
||||
func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) {
|
||||
if err := validateEnvelope(envelope); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
logger := log.G(ctx).WithFields(logrus.Fields{
|
||||
"topic": envelope.Topic,
|
||||
"ns": envelope.Namespace,
|
||||
"type": envelope.Event.TypeUrl,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("error forwarding event")
|
||||
} else {
|
||||
logger.Debug("event forwarded")
|
||||
}
|
||||
}()
|
||||
|
||||
return e.broadcaster.Write(envelope)
|
||||
}
|
||||
|
||||
// Publish packages and sends an event. The caller will be considered the
|
||||
// initial publisher of the event. This means the timestamp will be calculated
|
||||
// at this point and this method may read from the calling context.
|
||||
func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) {
|
||||
var (
|
||||
namespace string
|
||||
encoded *types.Any
|
||||
envelope events.Envelope
|
||||
)
|
||||
|
||||
namespace, err = namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed publishing event")
|
||||
}
|
||||
if err := validateTopic(topic); err != nil {
|
||||
return errors.Wrapf(err, "envelope topic %q", topic)
|
||||
}
|
||||
|
||||
encoded, err = typeurl.MarshalAny(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
envelope.Timestamp = time.Now().UTC()
|
||||
envelope.Namespace = namespace
|
||||
envelope.Topic = topic
|
||||
envelope.Event = encoded
|
||||
|
||||
defer func() {
|
||||
logger := log.G(ctx).WithFields(logrus.Fields{
|
||||
"topic": envelope.Topic,
|
||||
"ns": envelope.Namespace,
|
||||
"type": envelope.Event.TypeUrl,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("error publishing event")
|
||||
} else {
|
||||
logger.Debug("event published")
|
||||
}
|
||||
}()
|
||||
|
||||
return e.broadcaster.Write(&envelope)
|
||||
}
|
||||
|
||||
// Subscribe to events on the exchange. Events are sent through the returned
|
||||
// channel ch. If an error is encountered, it will be sent on channel errs and
|
||||
// errs will be closed. To end the subscription, cancel the provided context.
|
||||
//
|
||||
// Zero or more filters may be provided as strings. Only events that match
|
||||
// *any* of the provided filters will be sent on the channel. The filters use
|
||||
// the standard containerd filters package syntax.
|
||||
func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) {
|
||||
var (
|
||||
evch = make(chan *events.Envelope)
|
||||
errq = make(chan error, 1)
|
||||
channel = goevents.NewChannel(0)
|
||||
queue = goevents.NewQueue(channel)
|
||||
dst goevents.Sink = queue
|
||||
)
|
||||
|
||||
closeAll := func() {
|
||||
channel.Close()
|
||||
queue.Close()
|
||||
e.broadcaster.Remove(dst)
|
||||
close(errq)
|
||||
}
|
||||
|
||||
ch = evch
|
||||
errs = errq
|
||||
|
||||
if len(fs) > 0 {
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
errq <- errors.Wrapf(err, "failed parsing subscription filters")
|
||||
closeAll()
|
||||
return
|
||||
}
|
||||
|
||||
dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool {
|
||||
return filter.Match(adapt(gev))
|
||||
}))
|
||||
}
|
||||
|
||||
e.broadcaster.Add(dst)
|
||||
|
||||
go func() {
|
||||
defer closeAll()
|
||||
|
||||
var err error
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case ev := <-channel.C:
|
||||
env, ok := ev.(*events.Envelope)
|
||||
if !ok {
|
||||
// TODO(stevvooe): For the most part, we are well protected
|
||||
// from this condition. Both Forward and Publish protect
|
||||
// from this.
|
||||
err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev)
|
||||
break
|
||||
}
|
||||
|
||||
select {
|
||||
case evch <- env:
|
||||
case <-ctx.Done():
|
||||
break loop
|
||||
}
|
||||
case <-ctx.Done():
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if cerr := ctx.Err(); cerr != context.Canceled {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
|
||||
errq <- err
|
||||
}()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateTopic(topic string) error {
|
||||
if topic == "" {
|
||||
return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty")
|
||||
}
|
||||
|
||||
if topic[0] != '/' {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'")
|
||||
}
|
||||
|
||||
if len(topic) == 1 {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component")
|
||||
}
|
||||
|
||||
components := strings.Split(topic[1:], "/")
|
||||
for _, component := range components {
|
||||
if err := identifiers.Validate(component); err != nil {
|
||||
return errors.Wrapf(err, "failed validation on component %q", component)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateEnvelope(envelope *events.Envelope) error {
|
||||
if err := identifiers.Validate(envelope.Namespace); err != nil {
|
||||
return errors.Wrapf(err, "event envelope has invalid namespace")
|
||||
}
|
||||
|
||||
if err := validateTopic(envelope.Topic); err != nil {
|
||||
return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
|
||||
}
|
||||
|
||||
if envelope.Timestamp.IsZero() {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "timestamp must be set on forwarded event")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func adapt(ev interface{}) filters.Adaptor {
|
||||
if adaptor, ok := ev.(filters.Adaptor); ok {
|
||||
return adaptor
|
||||
}
|
||||
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
return "", false
|
||||
})
|
||||
}
|
33
vendor/github.com/containerd/containerd/filters/adaptor.go
generated
vendored
Normal file
33
vendor/github.com/containerd/containerd/filters/adaptor.go
generated
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
// Adaptor specifies the mapping of fieldpaths to a type. For the given field
|
||||
// path, the value and whether it is present should be returned. The mapping of
|
||||
// the fieldpath to a field is deferred to the adaptor implementation, but
|
||||
// should generally follow protobuf field path/mask semantics.
|
||||
type Adaptor interface {
|
||||
Field(fieldpath []string) (value string, present bool)
|
||||
}
|
||||
|
||||
// AdapterFunc allows implementation specific matching of fieldpaths
|
||||
type AdapterFunc func(fieldpath []string) (string, bool)
|
||||
|
||||
// Field returns the field name and true if it exists
|
||||
func (fn AdapterFunc) Field(fieldpath []string) (string, bool) {
|
||||
return fn(fieldpath)
|
||||
}
|
179
vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
Normal file
179
vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
Normal file
@@ -0,0 +1,179 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package filters defines a syntax and parser that can be used for the
|
||||
// filtration of items across the containerd API. The core is built on the
|
||||
// concept of protobuf field paths, with quoting. Several operators allow the
|
||||
// user to flexibly select items based on field presence, equality, inequality
|
||||
// and regular expressions. Flexible adaptors support working with any type.
|
||||
//
|
||||
// The syntax is fairly familiar, if you've used container ecosystem
|
||||
// projects. At the core, we base it on the concept of protobuf field
|
||||
// paths, augmenting with the ability to quote portions of the field path
|
||||
// to match arbitrary labels. These "selectors" come in the following
|
||||
// syntax:
|
||||
//
|
||||
// ```
|
||||
// <fieldpath>[<operator><value>]
|
||||
// ```
|
||||
//
|
||||
// A basic example is as follows:
|
||||
//
|
||||
// ```
|
||||
// name==foo
|
||||
// ```
|
||||
//
|
||||
// This would match all objects that have a field `name` with the value
|
||||
// `foo`. If we only want to test if the field is present, we can omit the
|
||||
// operator. This is most useful for matching labels in containerd. The
|
||||
// following will match objects that have the field "labels" and have the
|
||||
// label "foo" defined:
|
||||
//
|
||||
// ```
|
||||
// labels.foo
|
||||
// ```
|
||||
//
|
||||
// We also allow for quoting of parts of the field path to allow matching
|
||||
// of arbitrary items:
|
||||
//
|
||||
// ```
|
||||
// labels."very complex label"==something
|
||||
// ```
|
||||
//
|
||||
// We also define `!=` and `~=` as operators. The `!=` will match all
|
||||
// objects that don't match the value for a field and `~=` will compile the
|
||||
// target value as a regular expression and match the field value against that.
|
||||
//
|
||||
// Selectors can be combined using a comma, such that the resulting
|
||||
// selector will require all selectors are matched for the object to match.
|
||||
// The following example will match objects that are named `foo` and have
|
||||
// the label `bar`:
|
||||
//
|
||||
// ```
|
||||
// name==foo,labels.bar
|
||||
// ```
|
||||
//
|
||||
package filters
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
)
|
||||
|
||||
// Filter matches specific resources based the provided filter
|
||||
type Filter interface {
|
||||
Match(adaptor Adaptor) bool
|
||||
}
|
||||
|
||||
// FilterFunc is a function that handles matching with an adaptor
|
||||
type FilterFunc func(Adaptor) bool
|
||||
|
||||
// Match matches the FilterFunc returning true if the object matches the filter
|
||||
func (fn FilterFunc) Match(adaptor Adaptor) bool {
|
||||
return fn(adaptor)
|
||||
}
|
||||
|
||||
// Always is a filter that always returns true for any type of object
|
||||
var Always FilterFunc = func(adaptor Adaptor) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Any allows multiple filters to be matched against the object
|
||||
type Any []Filter
|
||||
|
||||
// Match returns true if any of the provided filters are true
|
||||
func (m Any) Match(adaptor Adaptor) bool {
|
||||
for _, m := range m {
|
||||
if m.Match(adaptor) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// All allows multiple filters to be matched against the object
|
||||
type All []Filter
|
||||
|
||||
// Match only returns true if all filters match the object
|
||||
func (m All) Match(adaptor Adaptor) bool {
|
||||
for _, m := range m {
|
||||
if !m.Match(adaptor) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type operator int
|
||||
|
||||
const (
|
||||
operatorPresent = iota
|
||||
operatorEqual
|
||||
operatorNotEqual
|
||||
operatorMatches
|
||||
)
|
||||
|
||||
func (op operator) String() string {
|
||||
switch op {
|
||||
case operatorPresent:
|
||||
return "?"
|
||||
case operatorEqual:
|
||||
return "=="
|
||||
case operatorNotEqual:
|
||||
return "!="
|
||||
case operatorMatches:
|
||||
return "~="
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
type selector struct {
|
||||
fieldpath []string
|
||||
operator operator
|
||||
value string
|
||||
re *regexp.Regexp
|
||||
}
|
||||
|
||||
func (m selector) Match(adaptor Adaptor) bool {
|
||||
value, present := adaptor.Field(m.fieldpath)
|
||||
|
||||
switch m.operator {
|
||||
case operatorPresent:
|
||||
return present
|
||||
case operatorEqual:
|
||||
return present && value == m.value
|
||||
case operatorNotEqual:
|
||||
return value != m.value
|
||||
case operatorMatches:
|
||||
if m.re == nil {
|
||||
r, err := regexp.Compile(m.value)
|
||||
if err != nil {
|
||||
log.L.Errorf("error compiling regexp %q", m.value)
|
||||
return false
|
||||
}
|
||||
|
||||
m.re = r
|
||||
}
|
||||
|
||||
return m.re.MatchString(value)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
292
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
Normal file
292
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
Normal file
@@ -0,0 +1,292 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
/*
|
||||
Parse the strings into a filter that may be used with an adaptor.
|
||||
|
||||
The filter is made up of zero or more selectors.
|
||||
|
||||
The format is a comma separated list of expressions, in the form of
|
||||
`<fieldpath><op><value>`, known as selectors. All selectors must match the
|
||||
target object for the filter to be true.
|
||||
|
||||
We define the operators "==" for equality, "!=" for not equal and "~=" for a
|
||||
regular expression. If the operator and value are not present, the matcher will
|
||||
test for the presence of a value, as defined by the target object.
|
||||
|
||||
The formal grammar is as follows:
|
||||
|
||||
selectors := selector ("," selector)*
|
||||
selector := fieldpath (operator value)
|
||||
fieldpath := field ('.' field)*
|
||||
field := quoted | [A-Za-z] [A-Za-z0-9_]+
|
||||
operator := "==" | "!=" | "~="
|
||||
value := quoted | [^\s,]+
|
||||
quoted := <go string syntax>
|
||||
|
||||
*/
|
||||
func Parse(s string) (Filter, error) {
|
||||
// special case empty to match all
|
||||
if s == "" {
|
||||
return Always, nil
|
||||
}
|
||||
|
||||
p := parser{input: s}
|
||||
return p.parse()
|
||||
}
|
||||
|
||||
// ParseAll parses each filter in ss and returns a filter that will return true
|
||||
// if any filter matches the expression.
|
||||
//
|
||||
// If no filters are provided, the filter will match anything.
|
||||
func ParseAll(ss ...string) (Filter, error) {
|
||||
if len(ss) == 0 {
|
||||
return Always, nil
|
||||
}
|
||||
|
||||
var fs []Filter
|
||||
for _, s := range ss {
|
||||
f, err := Parse(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
}
|
||||
|
||||
return Any(fs), nil
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
input string
|
||||
scanner scanner
|
||||
}
|
||||
|
||||
func (p *parser) parse() (Filter, error) {
|
||||
p.scanner.init(p.input)
|
||||
|
||||
ss, err := p.selectors()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filters")
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (p *parser) selectors() (Filter, error) {
|
||||
s, err := p.selector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss := All{s}
|
||||
|
||||
loop:
|
||||
for {
|
||||
tok := p.scanner.peek()
|
||||
switch tok {
|
||||
case ',':
|
||||
pos, tok, _ := p.scanner.scan()
|
||||
if tok != tokenSeparator {
|
||||
return nil, p.mkerr(pos, "expected a separator")
|
||||
}
|
||||
|
||||
s, err := p.selector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss = append(ss, s)
|
||||
case tokenEOF:
|
||||
break loop
|
||||
default:
|
||||
return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok))
|
||||
}
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (p *parser) selector() (selector, error) {
|
||||
fieldpath, err := p.fieldpath()
|
||||
if err != nil {
|
||||
return selector{}, err
|
||||
}
|
||||
|
||||
switch p.scanner.peek() {
|
||||
case ',', tokenSeparator, tokenEOF:
|
||||
return selector{
|
||||
fieldpath: fieldpath,
|
||||
operator: operatorPresent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
op, err := p.operator()
|
||||
if err != nil {
|
||||
return selector{}, err
|
||||
}
|
||||
|
||||
var allowAltQuotes bool
|
||||
if op == operatorMatches {
|
||||
allowAltQuotes = true
|
||||
}
|
||||
|
||||
value, err := p.value(allowAltQuotes)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return selector{}, io.ErrUnexpectedEOF
|
||||
}
|
||||
return selector{}, err
|
||||
}
|
||||
|
||||
return selector{
|
||||
fieldpath: fieldpath,
|
||||
value: value,
|
||||
operator: op,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *parser) fieldpath() ([]string, error) {
|
||||
f, err := p.field()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := []string{f}
|
||||
loop:
|
||||
for {
|
||||
tok := p.scanner.peek() // lookahead to consume field separator
|
||||
|
||||
switch tok {
|
||||
case '.':
|
||||
pos, tok, _ := p.scanner.scan() // consume separator
|
||||
if tok != tokenSeparator {
|
||||
return nil, p.mkerr(pos, "expected a field separator (`.`)")
|
||||
}
|
||||
|
||||
f, err := p.field()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
default:
|
||||
// let the layer above handle the other bad cases.
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (p *parser) field() (string, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
switch tok {
|
||||
case tokenField:
|
||||
return s, nil
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s, false)
|
||||
case tokenIllegal:
|
||||
return "", p.mkerr(pos, p.scanner.err)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected field or quoted")
|
||||
}
|
||||
|
||||
func (p *parser) operator() (operator, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
switch tok {
|
||||
case tokenOperator:
|
||||
switch s {
|
||||
case "==":
|
||||
return operatorEqual, nil
|
||||
case "!=":
|
||||
return operatorNotEqual, nil
|
||||
case "~=":
|
||||
return operatorMatches, nil
|
||||
default:
|
||||
return 0, p.mkerr(pos, "unsupported operator %q", s)
|
||||
}
|
||||
case tokenIllegal:
|
||||
return 0, p.mkerr(pos, p.scanner.err)
|
||||
}
|
||||
|
||||
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
|
||||
}
|
||||
|
||||
func (p *parser) value(allowAltQuotes bool) (string, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
|
||||
switch tok {
|
||||
case tokenValue, tokenField:
|
||||
return s, nil
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s, allowAltQuotes)
|
||||
case tokenIllegal:
|
||||
return "", p.mkerr(pos, p.scanner.err)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected value or quoted")
|
||||
}
|
||||
|
||||
func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
|
||||
if !allowAlts && s[0] != '\'' && s[0] != '"' {
|
||||
return "", p.mkerr(pos, "invalid quote encountered")
|
||||
}
|
||||
|
||||
uq, err := unquote(s)
|
||||
if err != nil {
|
||||
return "", p.mkerr(pos, "unquoting failed: %v", err)
|
||||
}
|
||||
|
||||
return uq, nil
|
||||
}
|
||||
|
||||
type parseError struct {
|
||||
input string
|
||||
pos int
|
||||
msg string
|
||||
}
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
if pe.pos < len(pe.input) {
|
||||
before := pe.input[:pe.pos]
|
||||
location := pe.input[pe.pos : pe.pos+1] // need to handle end
|
||||
after := pe.input[pe.pos+1:]
|
||||
|
||||
return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("[%s]: %v", pe.input, pe.msg)
|
||||
}
|
||||
|
||||
func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
|
||||
return errors.Wrap(parseError{
|
||||
input: p.input,
|
||||
pos: pos,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}, "parse error")
|
||||
}
|
253
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
Normal file
253
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
Normal file
@@ -0,0 +1,253 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
|
||||
// strconv package and modified to be able to handle quoting with `/` and `|`
|
||||
// as delimiters. The copyright is held by the Go authors.
|
||||
|
||||
var errQuoteSyntax = errors.New("quote syntax error")
|
||||
|
||||
// UnquoteChar decodes the first character or byte in the escaped string
|
||||
// or character literal represented by the string s.
|
||||
// It returns four values:
|
||||
//
|
||||
// 1) value, the decoded Unicode code point or byte value;
|
||||
// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
|
||||
// 3) tail, the remainder of the string after the character; and
|
||||
// 4) an error that will be nil if the character is syntactically valid.
|
||||
//
|
||||
// The second argument, quote, specifies the type of literal being parsed
|
||||
// and therefore which escaped quote character is permitted.
|
||||
// If set to a single quote, it permits the sequence \' and disallows unescaped '.
|
||||
// If set to a double quote, it permits \" and disallows unescaped ".
|
||||
// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
|
||||
//
|
||||
// This is from Go strconv package, modified to support `|` and `/` as double
|
||||
// quotes for use with regular expressions.
|
||||
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
||||
// easy cases
|
||||
switch c := s[0]; {
|
||||
case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
case c >= utf8.RuneSelf:
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
return r, true, s[size:], nil
|
||||
case c != '\\':
|
||||
return rune(s[0]), false, s[1:], nil
|
||||
}
|
||||
|
||||
// hard case: c is backslash
|
||||
if len(s) <= 1 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
c := s[1]
|
||||
s = s[2:]
|
||||
|
||||
switch c {
|
||||
case 'a':
|
||||
value = '\a'
|
||||
case 'b':
|
||||
value = '\b'
|
||||
case 'f':
|
||||
value = '\f'
|
||||
case 'n':
|
||||
value = '\n'
|
||||
case 'r':
|
||||
value = '\r'
|
||||
case 't':
|
||||
value = '\t'
|
||||
case 'v':
|
||||
value = '\v'
|
||||
case 'x', 'u', 'U':
|
||||
n := 0
|
||||
switch c {
|
||||
case 'x':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
var v rune
|
||||
if len(s) < n {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
x, ok := unhex(s[j])
|
||||
if !ok {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
v = v<<4 | x
|
||||
}
|
||||
s = s[n:]
|
||||
if c == 'x' {
|
||||
// single-byte string, possibly not UTF-8
|
||||
value = v
|
||||
break
|
||||
}
|
||||
if v > utf8.MaxRune {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
multibyte = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
v := rune(c) - '0'
|
||||
if len(s) < 2 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < 2; j++ { // one digit already; two more
|
||||
x := rune(s[j]) - '0'
|
||||
if x < 0 || x > 7 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
v = (v << 3) | x
|
||||
}
|
||||
s = s[2:]
|
||||
if v > 255 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
case '\\':
|
||||
value = '\\'
|
||||
case '\'', '"', '|', '/':
|
||||
if c != quote {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = rune(c)
|
||||
default:
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
tail = s
|
||||
return
|
||||
}
|
||||
|
||||
// unquote interprets s as a single-quoted, double-quoted,
|
||||
// or backquoted Go string literal, returning the string value
|
||||
// that s quotes. (If s is single-quoted, it would be a Go
|
||||
// character literal; Unquote returns the corresponding
|
||||
// one-character string.)
|
||||
//
|
||||
// This is modified from the standard library to support `|` and `/` as quote
|
||||
// characters for use with regular expressions.
|
||||
func unquote(s string) (string, error) {
|
||||
n := len(s)
|
||||
if n < 2 {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
quote := s[0]
|
||||
if quote != s[n-1] {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
s = s[1 : n-1]
|
||||
|
||||
if quote == '`' {
|
||||
if contains(s, '`') {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
if contains(s, '\r') {
|
||||
// -1 because we know there is at least one \r to remove.
|
||||
buf := make([]byte, 0, len(s)-1)
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] != '\r' {
|
||||
buf = append(buf, s[i])
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
if contains(s, '\n') {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
|
||||
// Is it trivial? Avoid allocation.
|
||||
if !contains(s, '\\') && !contains(s, quote) {
|
||||
switch quote {
|
||||
case '"', '/', '|': // pipe and slash are treated like double quote
|
||||
return s, nil
|
||||
case '\'':
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var runeTmp [utf8.UTFMax]byte
|
||||
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
||||
for len(s) > 0 {
|
||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s = ss
|
||||
if c < utf8.RuneSelf || !multibyte {
|
||||
buf = append(buf, byte(c))
|
||||
} else {
|
||||
n := utf8.EncodeRune(runeTmp[:], c)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
}
|
||||
if quote == '\'' && len(s) != 0 {
|
||||
// single-quoted must be single character
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// contains reports whether the string contains the byte c.
|
||||
func contains(s string, c byte) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unhex(b byte) (v rune, ok bool) {
|
||||
c := rune(b)
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
return
|
||||
}
|
297
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
Normal file
297
vendor/github.com/containerd/containerd/filters/scanner.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
tokenEOF = -(iota + 1)
|
||||
tokenQuoted
|
||||
tokenValue
|
||||
tokenField
|
||||
tokenSeparator
|
||||
tokenOperator
|
||||
tokenIllegal
|
||||
)
|
||||
|
||||
type token rune
|
||||
|
||||
func (t token) String() string {
|
||||
switch t {
|
||||
case tokenEOF:
|
||||
return "EOF"
|
||||
case tokenQuoted:
|
||||
return "Quoted"
|
||||
case tokenValue:
|
||||
return "Value"
|
||||
case tokenField:
|
||||
return "Field"
|
||||
case tokenSeparator:
|
||||
return "Separator"
|
||||
case tokenOperator:
|
||||
return "Operator"
|
||||
case tokenIllegal:
|
||||
return "Illegal"
|
||||
}
|
||||
|
||||
return string(t)
|
||||
}
|
||||
|
||||
func (t token) GoString() string {
|
||||
return "token" + t.String()
|
||||
}
|
||||
|
||||
type scanner struct {
|
||||
input string
|
||||
pos int
|
||||
ppos int // bounds the current rune in the string
|
||||
value bool
|
||||
err string
|
||||
}
|
||||
|
||||
func (s *scanner) init(input string) {
|
||||
s.input = input
|
||||
s.pos = 0
|
||||
s.ppos = 0
|
||||
}
|
||||
|
||||
func (s *scanner) next() rune {
|
||||
if s.pos >= len(s.input) {
|
||||
return tokenEOF
|
||||
}
|
||||
s.pos = s.ppos
|
||||
|
||||
r, w := utf8.DecodeRuneInString(s.input[s.ppos:])
|
||||
s.ppos += w
|
||||
if r == utf8.RuneError {
|
||||
if w > 0 {
|
||||
s.error("rune error")
|
||||
return tokenIllegal
|
||||
}
|
||||
return tokenEOF
|
||||
}
|
||||
|
||||
if r == 0 {
|
||||
s.error("unexpected null")
|
||||
return tokenIllegal
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (s *scanner) peek() rune {
|
||||
pos := s.pos
|
||||
ppos := s.ppos
|
||||
ch := s.next()
|
||||
s.pos = pos
|
||||
s.ppos = ppos
|
||||
return ch
|
||||
}
|
||||
|
||||
func (s *scanner) scan() (nextp int, tk token, text string) {
|
||||
var (
|
||||
ch = s.next()
|
||||
pos = s.pos
|
||||
)
|
||||
|
||||
chomp:
|
||||
switch {
|
||||
case ch == tokenEOF:
|
||||
case ch == tokenIllegal:
|
||||
case isQuoteRune(ch):
|
||||
if !s.scanQuoted(ch) {
|
||||
return pos, tokenIllegal, s.input[pos:s.ppos]
|
||||
}
|
||||
return pos, tokenQuoted, s.input[pos:s.ppos]
|
||||
case isSeparatorRune(ch):
|
||||
s.value = false
|
||||
return pos, tokenSeparator, s.input[pos:s.ppos]
|
||||
case isOperatorRune(ch):
|
||||
s.scanOperator()
|
||||
s.value = true
|
||||
return pos, tokenOperator, s.input[pos:s.ppos]
|
||||
case unicode.IsSpace(ch):
|
||||
// chomp
|
||||
ch = s.next()
|
||||
pos = s.pos
|
||||
goto chomp
|
||||
case s.value:
|
||||
s.scanValue()
|
||||
s.value = false
|
||||
return pos, tokenValue, s.input[pos:s.ppos]
|
||||
case isFieldRune(ch):
|
||||
s.scanField()
|
||||
return pos, tokenField, s.input[pos:s.ppos]
|
||||
}
|
||||
|
||||
return s.pos, token(ch), ""
|
||||
}
|
||||
|
||||
func (s *scanner) scanField() {
|
||||
for {
|
||||
ch := s.peek()
|
||||
if !isFieldRune(ch) {
|
||||
break
|
||||
}
|
||||
s.next()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner) scanOperator() {
|
||||
for {
|
||||
ch := s.peek()
|
||||
switch ch {
|
||||
case '=', '!', '~':
|
||||
s.next()
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner) scanValue() {
|
||||
for {
|
||||
ch := s.peek()
|
||||
if !isValueRune(ch) {
|
||||
break
|
||||
}
|
||||
s.next()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *scanner) scanQuoted(quote rune) bool {
|
||||
var illegal bool
|
||||
ch := s.next() // read character after quote
|
||||
for ch != quote {
|
||||
if ch == '\n' || ch < 0 {
|
||||
s.error("quoted literal not terminated")
|
||||
return false
|
||||
}
|
||||
if ch == '\\' {
|
||||
var legal bool
|
||||
ch, legal = s.scanEscape(quote)
|
||||
if !legal {
|
||||
illegal = true
|
||||
}
|
||||
} else {
|
||||
ch = s.next()
|
||||
}
|
||||
}
|
||||
return !illegal
|
||||
}
|
||||
|
||||
func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) {
|
||||
ch = s.next() // read character after '/'
|
||||
switch ch {
|
||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
|
||||
// nothing to do
|
||||
ch = s.next()
|
||||
legal = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
ch, legal = s.scanDigits(ch, 8, 3)
|
||||
case 'x':
|
||||
ch, legal = s.scanDigits(s.next(), 16, 2)
|
||||
case 'u':
|
||||
ch, legal = s.scanDigits(s.next(), 16, 4)
|
||||
case 'U':
|
||||
ch, legal = s.scanDigits(s.next(), 16, 8)
|
||||
default:
|
||||
s.error("illegal escape sequence")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) {
|
||||
for n > 0 && digitVal(ch) < base {
|
||||
ch = s.next()
|
||||
n--
|
||||
}
|
||||
if n > 0 {
|
||||
s.error("illegal numeric escape sequence")
|
||||
return ch, false
|
||||
}
|
||||
return ch, true
|
||||
}
|
||||
|
||||
func (s *scanner) error(msg string) {
|
||||
if s.err == "" {
|
||||
s.err = msg
|
||||
}
|
||||
}
|
||||
|
||||
func digitVal(ch rune) int {
|
||||
switch {
|
||||
case '0' <= ch && ch <= '9':
|
||||
return int(ch - '0')
|
||||
case 'a' <= ch && ch <= 'f':
|
||||
return int(ch - 'a' + 10)
|
||||
case 'A' <= ch && ch <= 'F':
|
||||
return int(ch - 'A' + 10)
|
||||
}
|
||||
return 16 // larger than any legal digit val
|
||||
}
|
||||
|
||||
func isFieldRune(r rune) bool {
|
||||
return (r == '_' || isAlphaRune(r) || isDigitRune(r))
|
||||
}
|
||||
|
||||
func isAlphaRune(r rune) bool {
|
||||
return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z'
|
||||
}
|
||||
|
||||
func isDigitRune(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isOperatorRune(r rune) bool {
|
||||
switch r {
|
||||
case '=', '!', '~':
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuoteRune(r rune) bool {
|
||||
switch r {
|
||||
case '/', '|', '"': // maybe add single quoting?
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isSeparatorRune(r rune) bool {
|
||||
switch r {
|
||||
case ',', '.':
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func isValueRune(r rune) bool {
|
||||
return r != ',' && !unicode.IsSpace(r) &&
|
||||
(unicode.IsLetter(r) ||
|
||||
unicode.IsDigit(r) ||
|
||||
unicode.IsNumber(r) ||
|
||||
unicode.IsGraphic(r) ||
|
||||
unicode.IsPunct(r))
|
||||
}
|
189
vendor/github.com/containerd/containerd/gc/gc.go
generated
vendored
Normal file
189
vendor/github.com/containerd/containerd/gc/gc.go
generated
vendored
Normal file
@@ -0,0 +1,189 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package gc experiments with providing central gc tooling to ensure
|
||||
// deterministic resource removal within containerd.
|
||||
//
|
||||
// For now, we just have a single exported implementation that can be used
|
||||
// under certain use cases.
|
||||
package gc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ResourceType represents type of resource at a node
|
||||
type ResourceType uint8
|
||||
|
||||
// ResourceMax represents the max resource.
|
||||
// Upper bits are stripped out during the mark phase, allowing the upper 3 bits
|
||||
// to be used by the caller reference function.
|
||||
const ResourceMax = ResourceType(0x1F)
|
||||
|
||||
// Node presents a resource which has a type and key,
|
||||
// this node can be used to lookup other nodes.
|
||||
type Node struct {
|
||||
Type ResourceType
|
||||
Namespace string
|
||||
Key string
|
||||
}
|
||||
|
||||
// Stats about a garbage collection run
|
||||
type Stats interface {
|
||||
Elapsed() time.Duration
|
||||
}
|
||||
|
||||
// Tricolor implements basic, single-thread tri-color GC. Given the roots, the
|
||||
// complete set and a refs function, this function returns a map of all
|
||||
// reachable objects.
|
||||
//
|
||||
// Correct usage requires that the caller not allow the arguments to change
|
||||
// until the result is used to delete objects in the system.
|
||||
//
|
||||
// It will allocate memory proportional to the size of the reachable set.
|
||||
//
|
||||
// We can probably use this to inform a design for incremental GC by injecting
|
||||
// callbacks to the set modification algorithms.
|
||||
func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struct{}, error) {
|
||||
var (
|
||||
grays []Node // maintain a gray "stack"
|
||||
seen = map[Node]struct{}{} // or not "white", basically "seen"
|
||||
reachable = map[Node]struct{}{} // or "black", in tri-color parlance
|
||||
)
|
||||
|
||||
grays = append(grays, roots...)
|
||||
|
||||
for len(grays) > 0 {
|
||||
// Pick any gray object
|
||||
id := grays[len(grays)-1] // effectively "depth first" because first element
|
||||
grays = grays[:len(grays)-1]
|
||||
seen[id] = struct{}{} // post-mark this as not-white
|
||||
rs, err := refs(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// mark all the referenced objects as gray
|
||||
for _, target := range rs {
|
||||
if _, ok := seen[target]; !ok {
|
||||
grays = append(grays, target)
|
||||
}
|
||||
}
|
||||
|
||||
// strip bits above max resource type
|
||||
id.Type = id.Type & ResourceMax
|
||||
// mark as black when done
|
||||
reachable[id] = struct{}{}
|
||||
}
|
||||
|
||||
return reachable, nil
|
||||
}
|
||||
|
||||
// ConcurrentMark implements simple, concurrent GC. All the roots are scanned
|
||||
// and the complete set of references is formed by calling the refs function
|
||||
// for each seen object. This function returns a map of all object reachable
|
||||
// from a root.
|
||||
//
|
||||
// Correct usage requires that the caller not allow the arguments to change
|
||||
// until the result is used to delete objects in the system.
|
||||
//
|
||||
// It will allocate memory proportional to the size of the reachable set.
|
||||
func ConcurrentMark(ctx context.Context, root <-chan Node, refs func(context.Context, Node, func(Node)) error) (map[Node]struct{}, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var (
|
||||
grays = make(chan Node)
|
||||
seen = map[Node]struct{}{} // or not "white", basically "seen"
|
||||
wg sync.WaitGroup
|
||||
|
||||
errOnce sync.Once
|
||||
refErr error
|
||||
)
|
||||
|
||||
go func() {
|
||||
for gray := range grays {
|
||||
if _, ok := seen[gray]; ok {
|
||||
wg.Done()
|
||||
continue
|
||||
}
|
||||
seen[gray] = struct{}{} // post-mark this as non-white
|
||||
|
||||
go func(gray Node) {
|
||||
defer wg.Done()
|
||||
|
||||
send := func(n Node) {
|
||||
wg.Add(1)
|
||||
select {
|
||||
case grays <- n:
|
||||
case <-ctx.Done():
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
if err := refs(ctx, gray, send); err != nil {
|
||||
errOnce.Do(func() {
|
||||
refErr = err
|
||||
cancel()
|
||||
})
|
||||
}
|
||||
|
||||
}(gray)
|
||||
}
|
||||
}()
|
||||
|
||||
for r := range root {
|
||||
wg.Add(1)
|
||||
select {
|
||||
case grays <- r:
|
||||
case <-ctx.Done():
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Wait for outstanding grays to be processed
|
||||
wg.Wait()
|
||||
|
||||
close(grays)
|
||||
|
||||
if refErr != nil {
|
||||
return nil, refErr
|
||||
}
|
||||
if cErr := ctx.Err(); cErr != nil {
|
||||
return nil, cErr
|
||||
}
|
||||
|
||||
return seen, nil
|
||||
}
|
||||
|
||||
// Sweep removes all nodes returned through the slice which are not in
|
||||
// the reachable set by calling the provided remove function.
|
||||
func Sweep(reachable map[Node]struct{}, all []Node, remove func(Node) error) error {
|
||||
// All black objects are now reachable, and all white objects are
|
||||
// unreachable. Free those that are white!
|
||||
for _, node := range all {
|
||||
if _, ok := reachable[node]; !ok {
|
||||
if err := remove(node); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
73
vendor/github.com/containerd/containerd/identifiers/validate.go
generated
vendored
Normal file
73
vendor/github.com/containerd/containerd/identifiers/validate.go
generated
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package identifiers provides common validation for identifiers and keys
|
||||
// across containerd.
|
||||
//
|
||||
// Identifiers in containerd must be a alphanumeric, allowing limited
|
||||
// underscores, dashes and dots.
|
||||
//
|
||||
// While the character set may be expanded in the future, identifiers
|
||||
// are guaranteed to be safely used as filesystem path components.
|
||||
package identifiers
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
maxLength = 76
|
||||
alphanum = `[A-Za-z0-9]+`
|
||||
separators = `[._-]`
|
||||
)
|
||||
|
||||
var (
|
||||
// identifierRe defines the pattern for valid identifiers.
|
||||
identifierRe = regexp.MustCompile(reAnchor(alphanum + reGroup(separators+reGroup(alphanum)) + "*"))
|
||||
)
|
||||
|
||||
// Validate returns nil if the string s is a valid identifier.
|
||||
//
|
||||
// identifiers are similar to the domain name rules according to RFC 1035, section 2.3.1. However
|
||||
// rules in this package are relaxed to allow numerals to follow period (".") and mixed case is
|
||||
// allowed.
|
||||
//
|
||||
// In general identifiers that pass this validation should be safe for use as filesystem path components.
|
||||
func Validate(s string) error {
|
||||
if len(s) == 0 {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty")
|
||||
}
|
||||
|
||||
if len(s) > maxLength {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength)
|
||||
}
|
||||
|
||||
if !identifierRe.MatchString(s) {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func reGroup(s string) string {
|
||||
return `(?:` + s + `)`
|
||||
}
|
||||
|
||||
func reAnchor(s string) string {
|
||||
return `^` + s + `$`
|
||||
}
|
23
vendor/github.com/containerd/containerd/images/annotations.go
generated
vendored
Normal file
23
vendor/github.com/containerd/containerd/images/annotations.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
const (
|
||||
// AnnotationImageName is an annotation on a Descriptor in an index.json
|
||||
// containing the `Name` value as used by an `Image` struct
|
||||
AnnotationImageName = "io.containerd.image.name"
|
||||
)
|
468
vendor/github.com/containerd/containerd/images/archive/exporter.go
generated
vendored
Normal file
468
vendor/github.com/containerd/containerd/images/archive/exporter.go
generated
vendored
Normal file
@@ -0,0 +1,468 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type exportOptions struct {
|
||||
manifests []ocispec.Descriptor
|
||||
platform platforms.MatchComparer
|
||||
allPlatforms bool
|
||||
skipDockerManifest bool
|
||||
}
|
||||
|
||||
// ExportOpt defines options for configuring exported descriptors
|
||||
type ExportOpt func(context.Context, *exportOptions) error
|
||||
|
||||
// WithPlatform defines the platform to require manifest lists have
|
||||
// not exporting all platforms.
|
||||
// Additionally, platform is used to resolve image configs for
|
||||
// Docker v1.1, v1.2 format compatibility.
|
||||
func WithPlatform(p platforms.MatchComparer) ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
o.platform = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAllPlatforms exports all manifests from a manifest list.
|
||||
// Missing content will fail the export.
|
||||
func WithAllPlatforms() ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
o.allPlatforms = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSkipDockerManifest skips creation of the Docker compatible
|
||||
// manifest.json file.
|
||||
func WithSkipDockerManifest() ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
o.skipDockerManifest = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImage adds the provided images to the exported archive.
|
||||
func WithImage(is images.Store, name string) ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
img, err := is.Get(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
img.Target.Annotations = addNameAnnotation(name, img.Target.Annotations)
|
||||
o.manifests = append(o.manifests, img.Target)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithManifest adds a manifest to the exported archive.
|
||||
// When names are given they will be set on the manifest in the
|
||||
// exported archive, creating an index record for each name.
|
||||
// When no names are provided, it is up to caller to put name annotation to
|
||||
// on the manifest descriptor if needed.
|
||||
func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
if len(names) == 0 {
|
||||
o.manifests = append(o.manifests, manifest)
|
||||
}
|
||||
for _, name := range names {
|
||||
mc := manifest
|
||||
mc.Annotations = addNameAnnotation(name, manifest.Annotations)
|
||||
o.manifests = append(o.manifests, mc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func addNameAnnotation(name string, base map[string]string) map[string]string {
|
||||
annotations := map[string]string{}
|
||||
for k, v := range base {
|
||||
annotations[k] = v
|
||||
}
|
||||
|
||||
annotations[images.AnnotationImageName] = name
|
||||
annotations[ocispec.AnnotationRefName] = ociReferenceName(name)
|
||||
|
||||
return annotations
|
||||
}
|
||||
|
||||
// Export implements Exporter.
|
||||
func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error {
|
||||
var eo exportOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, &eo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
records := []tarRecord{
|
||||
ociLayoutFile(""),
|
||||
ociIndexRecord(eo.manifests),
|
||||
}
|
||||
|
||||
algorithms := map[string]struct{}{}
|
||||
dManifests := map[digest.Digest]*exportManifest{}
|
||||
resolvedIndex := map[digest.Digest]digest.Digest{}
|
||||
for _, desc := range eo.manifests {
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
mt, ok := dManifests[desc.Digest]
|
||||
if !ok {
|
||||
// TODO(containerd): Skip if already added
|
||||
r, err := getRecords(ctx, store, desc, algorithms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
records = append(records, r...)
|
||||
|
||||
mt = &exportManifest{
|
||||
manifest: desc,
|
||||
}
|
||||
dManifests[desc.Digest] = mt
|
||||
}
|
||||
|
||||
name := desc.Annotations[images.AnnotationImageName]
|
||||
if name != "" && !eo.skipDockerManifest {
|
||||
mt.names = append(mt.names, name)
|
||||
}
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
d, ok := resolvedIndex[desc.Digest]
|
||||
if !ok {
|
||||
records = append(records, blobRecord(store, desc))
|
||||
|
||||
p, err := content.ReadBlob(ctx, store, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
if err := json.Unmarshal(p, &index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifests []ocispec.Descriptor
|
||||
for _, m := range index.Manifests {
|
||||
if eo.platform != nil {
|
||||
if m.Platform == nil || eo.platform.Match(*m.Platform) {
|
||||
manifests = append(manifests, m)
|
||||
} else if !eo.allPlatforms {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
r, err := getRecords(ctx, store, m, algorithms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
records = append(records, r...)
|
||||
}
|
||||
|
||||
if !eo.skipDockerManifest {
|
||||
if len(manifests) >= 1 {
|
||||
if len(manifests) > 1 {
|
||||
sort.SliceStable(manifests, func(i, j int) bool {
|
||||
if manifests[i].Platform == nil {
|
||||
return false
|
||||
}
|
||||
if manifests[j].Platform == nil {
|
||||
return true
|
||||
}
|
||||
return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform)
|
||||
})
|
||||
}
|
||||
d = manifests[0].Digest
|
||||
dManifests[d] = &exportManifest{
|
||||
manifest: manifests[0],
|
||||
}
|
||||
} else if eo.platform != nil {
|
||||
return errors.Wrap(errdefs.ErrNotFound, "no manifest found for platform")
|
||||
}
|
||||
}
|
||||
resolvedIndex[desc.Digest] = d
|
||||
}
|
||||
if d != "" {
|
||||
if name := desc.Annotations[images.AnnotationImageName]; name != "" {
|
||||
mt := dManifests[d]
|
||||
mt.names = append(mt.names, name)
|
||||
}
|
||||
|
||||
}
|
||||
default:
|
||||
return errors.Wrap(errdefs.ErrInvalidArgument, "only manifests may be exported")
|
||||
}
|
||||
}
|
||||
|
||||
if len(dManifests) > 0 {
|
||||
tr, err := manifestsRecord(ctx, store, dManifests)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to create manifests file")
|
||||
}
|
||||
|
||||
records = append(records, tr)
|
||||
}
|
||||
|
||||
if len(algorithms) > 0 {
|
||||
records = append(records, directoryRecord("blobs/", 0755))
|
||||
for alg := range algorithms {
|
||||
records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
|
||||
}
|
||||
}
|
||||
|
||||
tw := tar.NewWriter(writer)
|
||||
defer tw.Close()
|
||||
return writeTar(ctx, tw, records)
|
||||
}
|
||||
|
||||
func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}) ([]tarRecord, error) {
|
||||
var records []tarRecord
|
||||
exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
records = append(records, blobRecord(store, desc))
|
||||
algorithms[desc.Digest.Algorithm().String()] = struct{}{}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
childrenHandler := images.ChildrenHandler(store)
|
||||
|
||||
handlers := images.Handlers(
|
||||
childrenHandler,
|
||||
images.HandlerFunc(exportHandler),
|
||||
)
|
||||
|
||||
// Walk sequentially since the number of fetches is likely one and doing in
|
||||
// parallel requires locking the export handler
|
||||
if err := images.Walk(ctx, handlers, desc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return records, nil
|
||||
}
|
||||
|
||||
type tarRecord struct {
|
||||
Header *tar.Header
|
||||
CopyTo func(context.Context, io.Writer) (int64, error)
|
||||
}
|
||||
|
||||
func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord {
|
||||
path := path.Join("blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: path,
|
||||
Mode: 0444,
|
||||
Size: desc.Size,
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
r, err := cs.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get reader")
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Verify digest
|
||||
dgstr := desc.Digest.Algorithm().Digester()
|
||||
|
||||
n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to copy to tar")
|
||||
}
|
||||
if dgstr.Digest() != desc.Digest {
|
||||
return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
|
||||
}
|
||||
return n, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func directoryRecord(name string, mode int64) tarRecord {
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: name,
|
||||
Mode: mode,
|
||||
Typeflag: tar.TypeDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ociLayoutFile(version string) tarRecord {
|
||||
if version == "" {
|
||||
version = ocispec.ImageLayoutVersion
|
||||
}
|
||||
layout := ocispec.ImageLayout{
|
||||
Version: version,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(layout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: ocispec.ImageLayoutFile,
|
||||
Mode: 0444,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord {
|
||||
index := ocispec.Index{
|
||||
Versioned: ocispecs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Manifests: manifests,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: "index.json",
|
||||
Mode: 0644,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type exportManifest struct {
|
||||
manifest ocispec.Descriptor
|
||||
names []string
|
||||
}
|
||||
|
||||
func manifestsRecord(ctx context.Context, store content.Provider, manifests map[digest.Digest]*exportManifest) (tarRecord, error) {
|
||||
mfsts := make([]struct {
|
||||
Config string
|
||||
RepoTags []string
|
||||
Layers []string
|
||||
}, len(manifests))
|
||||
|
||||
var i int
|
||||
for _, m := range manifests {
|
||||
p, err := content.ReadBlob(ctx, store, m.manifest)
|
||||
if err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(p, &manifest); err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
if err := manifest.Config.Digest.Validate(); err != nil {
|
||||
return tarRecord{}, errors.Wrapf(err, "invalid manifest %q", m.manifest.Digest)
|
||||
}
|
||||
|
||||
dgst := manifest.Config.Digest
|
||||
mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded())
|
||||
for _, l := range manifest.Layers {
|
||||
path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded())
|
||||
mfsts[i].Layers = append(mfsts[i].Layers, path)
|
||||
}
|
||||
|
||||
for _, name := range m.names {
|
||||
nname, err := familiarizeReference(name)
|
||||
if err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
|
||||
mfsts[i].RepoTags = append(mfsts[i].RepoTags, nname)
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
b, err := json.Marshal(mfsts)
|
||||
if err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: "manifest.json",
|
||||
Mode: 0644,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
|
||||
sort.Slice(records, func(i, j int) bool {
|
||||
return records[i].Header.Name < records[j].Header.Name
|
||||
})
|
||||
|
||||
var last string
|
||||
for _, record := range records {
|
||||
if record.Header.Name == last {
|
||||
continue
|
||||
}
|
||||
last = record.Header.Name
|
||||
if err := tw.WriteHeader(record.Header); err != nil {
|
||||
return err
|
||||
}
|
||||
if record.CopyTo != nil {
|
||||
n, err := record.CopyTo(ctx, tw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != record.Header.Size {
|
||||
return errors.Errorf("unexpected copy size for %s", record.Header.Name)
|
||||
}
|
||||
} else if record.Header.Size > 0 {
|
||||
return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
371
vendor/github.com/containerd/containerd/images/archive/importer.go
generated
vendored
Normal file
371
vendor/github.com/containerd/containerd/images/archive/importer.go
generated
vendored
Normal file
@@ -0,0 +1,371 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package archive provides a Docker and OCI compatible importer
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type importOpts struct {
|
||||
compress bool
|
||||
}
|
||||
|
||||
// ImportOpt is an option for importing an OCI index
|
||||
type ImportOpt func(*importOpts) error
|
||||
|
||||
// WithImportCompression compresses uncompressed layers on import.
|
||||
// This is used for import formats which do not include the manifest.
|
||||
func WithImportCompression() ImportOpt {
|
||||
return func(io *importOpts) error {
|
||||
io.compress = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ImportIndex imports an index from a tar archive image bundle
|
||||
// - implements Docker v1.1, v1.2 and OCI v1.
|
||||
// - prefers OCI v1 when provided
|
||||
// - creates OCI index for Docker formats
|
||||
// - normalizes Docker references and adds as OCI ref name
|
||||
// e.g. alpine:latest -> docker.io/library/alpine:latest
|
||||
// - existing OCI reference names are untouched
|
||||
func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) {
|
||||
var (
|
||||
tr = tar.NewReader(reader)
|
||||
|
||||
ociLayout ocispec.ImageLayout
|
||||
mfsts []struct {
|
||||
Config string
|
||||
RepoTags []string
|
||||
Layers []string
|
||||
}
|
||||
symlinks = make(map[string]string)
|
||||
blobs = make(map[string]ocispec.Descriptor)
|
||||
iopts importOpts
|
||||
)
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(&iopts); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
symlinks[hdr.Name] = path.Join(path.Dir(hdr.Name), hdr.Linkname)
|
||||
}
|
||||
|
||||
if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
|
||||
if hdr.Typeflag != tar.TypeDir {
|
||||
log.G(ctx).WithField("file", hdr.Name).Debug("file type ignored")
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
hdrName := path.Clean(hdr.Name)
|
||||
if hdrName == ocispec.ImageLayoutFile {
|
||||
if err = onUntarJSON(tr, &ociLayout); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrapf(err, "untar oci layout %q", hdr.Name)
|
||||
}
|
||||
} else if hdrName == "manifest.json" {
|
||||
if err = onUntarJSON(tr, &mfsts); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrapf(err, "untar manifest %q", hdr.Name)
|
||||
}
|
||||
} else {
|
||||
dgst, err := onUntarBlob(ctx, tr, store, hdr.Size, "tar-"+hdrName)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrapf(err, "failed to ingest %q", hdr.Name)
|
||||
}
|
||||
|
||||
blobs[hdrName] = ocispec.Descriptor{
|
||||
Digest: dgst,
|
||||
Size: hdr.Size,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If OCI layout was given, interpret the tar as an OCI layout.
|
||||
// When not provided, the layout of the tar will be interpreted
|
||||
// as Docker v1.1 or v1.2.
|
||||
if ociLayout.Version != "" {
|
||||
if ociLayout.Version != ocispec.ImageLayoutVersion {
|
||||
return ocispec.Descriptor{}, errors.Errorf("unsupported OCI version %s", ociLayout.Version)
|
||||
}
|
||||
|
||||
idx, ok := blobs["index.json"]
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, errors.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion)
|
||||
}
|
||||
|
||||
idx.MediaType = ocispec.MediaTypeImageIndex
|
||||
return idx, nil
|
||||
}
|
||||
|
||||
if mfsts == nil {
|
||||
return ocispec.Descriptor{}, errors.Errorf("unrecognized image format")
|
||||
}
|
||||
|
||||
for name, linkname := range symlinks {
|
||||
desc, ok := blobs[linkname]
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, errors.Errorf("no target for symlink layer from %q to %q", name, linkname)
|
||||
}
|
||||
blobs[name] = desc
|
||||
}
|
||||
|
||||
idx := ocispec.Index{
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
}
|
||||
for _, mfst := range mfsts {
|
||||
config, ok := blobs[mfst.Config]
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config)
|
||||
}
|
||||
config.MediaType = images.MediaTypeDockerSchema2Config
|
||||
|
||||
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
|
||||
}
|
||||
|
||||
manifest := struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType"`
|
||||
Config ocispec.Descriptor `json:"config"`
|
||||
Layers []ocispec.Descriptor `json:"layers"`
|
||||
}{
|
||||
SchemaVersion: 2,
|
||||
MediaType: images.MediaTypeDockerSchema2Manifest,
|
||||
Config: config,
|
||||
Layers: layers,
|
||||
}
|
||||
|
||||
desc, err := writeManifest(ctx, store, manifest, manifest.MediaType)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "write docker manifest")
|
||||
}
|
||||
|
||||
platforms, err := images.Platforms(ctx, store, desc)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "unable to resolve platform")
|
||||
}
|
||||
if len(platforms) > 0 {
|
||||
// Only one platform can be resolved from non-index manifest,
|
||||
// The platform can only come from the config included above,
|
||||
// if the config has no platform it can be safely omitted.
|
||||
desc.Platform = &platforms[0]
|
||||
}
|
||||
|
||||
if len(mfst.RepoTags) == 0 {
|
||||
idx.Manifests = append(idx.Manifests, desc)
|
||||
} else {
|
||||
// Add descriptor per tag
|
||||
for _, ref := range mfst.RepoTags {
|
||||
mfstdesc := desc
|
||||
|
||||
normalized, err := normalizeReference(ref)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
mfstdesc.Annotations = map[string]string{
|
||||
images.AnnotationImageName: normalized,
|
||||
ocispec.AnnotationRefName: ociReferenceName(normalized),
|
||||
}
|
||||
|
||||
idx.Manifests = append(idx.Manifests, mfstdesc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return writeManifest(ctx, store, idx, ocispec.MediaTypeImageIndex)
|
||||
}
|
||||
|
||||
func onUntarJSON(r io.Reader, j interface{}) error {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(b, j)
|
||||
}
|
||||
|
||||
func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size int64, ref string) (digest.Digest, error) {
|
||||
dgstr := digest.Canonical.Digester()
|
||||
|
||||
if err := content.WriteBlob(ctx, store, ref, io.TeeReader(r, dgstr.Hash()), ocispec.Descriptor{Size: size}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return dgstr.Digest(), nil
|
||||
}
|
||||
|
||||
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) {
|
||||
layers := make([]ocispec.Descriptor, len(layerFiles))
|
||||
descs := map[digest.Digest]*ocispec.Descriptor{}
|
||||
filters := []string{}
|
||||
for i, f := range layerFiles {
|
||||
desc, ok := blobs[f]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("layer %q not found", f)
|
||||
}
|
||||
layers[i] = desc
|
||||
descs[desc.Digest] = &layers[i]
|
||||
filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String())
|
||||
}
|
||||
|
||||
err := store.Walk(ctx, func(info content.Info) error {
|
||||
dgst, ok := info.Labels["containerd.io/uncompressed"]
|
||||
if ok {
|
||||
desc := descs[digest.Digest(dgst)]
|
||||
if desc != nil {
|
||||
desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
desc.Digest = info.Digest
|
||||
desc.Size = info.Size
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, filters...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failure checking for compressed blobs")
|
||||
}
|
||||
|
||||
for i, desc := range layers {
|
||||
if desc.MediaType != "" {
|
||||
continue
|
||||
}
|
||||
// Open blob, resolve media type
|
||||
ra, err := store.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest)
|
||||
}
|
||||
s, err := compression.DecompressStream(content.NewReader(ra))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i])
|
||||
}
|
||||
if s.GetCompression() == compression.Uncompressed {
|
||||
if compress {
|
||||
ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
|
||||
labels := map[string]string{
|
||||
"containerd.io/uncompressed": desc.Digest.String(),
|
||||
}
|
||||
layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
|
||||
if err != nil {
|
||||
s.Close()
|
||||
return nil, err
|
||||
}
|
||||
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
} else {
|
||||
layers[i].MediaType = images.MediaTypeDockerSchema2Layer
|
||||
}
|
||||
} else {
|
||||
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
}
|
||||
s.Close()
|
||||
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) {
|
||||
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
w.Close()
|
||||
if err != nil {
|
||||
cs.Abort(ctx, ref)
|
||||
}
|
||||
}()
|
||||
if err := w.Truncate(0); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer")
|
||||
}
|
||||
|
||||
cw, err := compression.CompressStream(w, compression.Gzip)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(cw, r); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
if err := cw.Close(); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
cst, err := w.Status()
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status")
|
||||
}
|
||||
|
||||
desc.Digest = w.Digest()
|
||||
desc.Size = cst.Offset
|
||||
|
||||
if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit")
|
||||
}
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) {
|
||||
manifestBytes, err := json.Marshal(manifest)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: mediaType,
|
||||
Digest: digest.FromBytes(manifestBytes),
|
||||
Size: int64(len(manifestBytes)),
|
||||
}
|
||||
if err := content.WriteBlob(ctx, cs, "manifest-"+desc.Digest.String(), bytes.NewReader(manifestBytes), desc); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
}
|
112
vendor/github.com/containerd/containerd/images/archive/reference.go
generated
vendored
Normal file
112
vendor/github.com/containerd/containerd/images/archive/reference.go
generated
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/reference"
|
||||
distref "github.com/containerd/containerd/reference/docker"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// FilterRefPrefix restricts references to having the given image
|
||||
// prefix. Tag-only references will have the prefix prepended.
|
||||
func FilterRefPrefix(image string) func(string) string {
|
||||
return refTranslator(image, true)
|
||||
}
|
||||
|
||||
// AddRefPrefix prepends the given image prefix to tag-only references,
|
||||
// while leaving returning full references unmodified.
|
||||
func AddRefPrefix(image string) func(string) string {
|
||||
return refTranslator(image, false)
|
||||
}
|
||||
|
||||
// refTranslator creates a reference which only has a tag or verifies
|
||||
// a full reference.
|
||||
func refTranslator(image string, checkPrefix bool) func(string) string {
|
||||
return func(ref string) string {
|
||||
// Check if ref is full reference
|
||||
if strings.ContainsAny(ref, "/:@") {
|
||||
// If not prefixed, don't include image
|
||||
if checkPrefix && !isImagePrefix(ref, image) {
|
||||
return ""
|
||||
}
|
||||
return ref
|
||||
}
|
||||
return image + ":" + ref
|
||||
}
|
||||
}
|
||||
|
||||
func isImagePrefix(s, prefix string) bool {
|
||||
if !strings.HasPrefix(s, prefix) {
|
||||
return false
|
||||
}
|
||||
if len(s) > len(prefix) {
|
||||
switch s[len(prefix)] {
|
||||
case '/', ':', '@':
|
||||
// Prevent matching partial namespaces
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func normalizeReference(ref string) (string, error) {
|
||||
// TODO: Replace this function to not depend on reference package
|
||||
normalized, err := distref.ParseDockerRef(ref)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "normalize image ref %q", ref)
|
||||
}
|
||||
|
||||
return normalized.String(), nil
|
||||
}
|
||||
|
||||
func familiarizeReference(ref string) (string, error) {
|
||||
named, err := distref.ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse %q", ref)
|
||||
}
|
||||
named = distref.TagNameOnly(named)
|
||||
|
||||
return distref.FamiliarString(named), nil
|
||||
}
|
||||
|
||||
func ociReferenceName(name string) string {
|
||||
// OCI defines the reference name as only a tag excluding the
|
||||
// repository. The containerd annotation contains the full image name
|
||||
// since the tag is insufficient for correctly naming and referring to an
|
||||
// image
|
||||
var ociRef string
|
||||
if spec, err := reference.Parse(name); err == nil {
|
||||
ociRef = spec.Object
|
||||
} else {
|
||||
ociRef = name
|
||||
}
|
||||
|
||||
return ociRef
|
||||
}
|
||||
|
||||
// DigestTranslator creates a digest reference by adding the
|
||||
// digest to an image name
|
||||
func DigestTranslator(prefix string) func(digest.Digest) string {
|
||||
return func(dgst digest.Digest) string {
|
||||
return prefix + "@" + dgst.String()
|
||||
}
|
||||
}
|
263
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
Normal file
263
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrSkipDesc is used to skip processing of a descriptor and
|
||||
// its descendants.
|
||||
ErrSkipDesc = fmt.Errorf("skip descriptor")
|
||||
|
||||
// ErrStopHandler is used to signify that the descriptor
|
||||
// has been handled and should not be handled further.
|
||||
// This applies only to a single descriptor in a handler
|
||||
// chain and does not apply to descendant descriptors.
|
||||
ErrStopHandler = fmt.Errorf("stop handler")
|
||||
)
|
||||
|
||||
// Handler handles image manifests
|
||||
type Handler interface {
|
||||
Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
|
||||
}
|
||||
|
||||
// HandlerFunc function implementing the Handler interface
|
||||
type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
|
||||
|
||||
// Handle image manifests
|
||||
func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
|
||||
return fn(ctx, desc)
|
||||
}
|
||||
|
||||
// Handlers returns a handler that will run the handlers in sequence.
|
||||
//
|
||||
// A handler may return `ErrStopHandler` to stop calling additional handlers
|
||||
func Handlers(handlers ...Handler) HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
|
||||
var children []ocispec.Descriptor
|
||||
for _, handler := range handlers {
|
||||
ch, err := handler.Handle(ctx, desc)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrStopHandler {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
children = append(children, ch...)
|
||||
}
|
||||
|
||||
return children, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the resources of an image and call the handler for each. If the handler
|
||||
// decodes the sub-resources for each image,
|
||||
//
|
||||
// This differs from dispatch in that each sibling resource is considered
|
||||
// synchronously.
|
||||
func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
|
||||
for _, desc := range descs {
|
||||
|
||||
children, err := handler.Handle(ctx, desc)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrSkipDesc {
|
||||
continue // don't traverse the children.
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
if err := Walk(ctx, handler, children...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dispatch runs the provided handler for content specified by the descriptors.
|
||||
// If the handler decode subresources, they will be visited, as well.
|
||||
//
|
||||
// Handlers for siblings are run in parallel on the provided descriptors. A
|
||||
// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse
|
||||
// any children.
|
||||
//
|
||||
// A concurrency limiter can be passed in to limit the number of concurrent
|
||||
// handlers running. When limiter is nil, there is no limit.
|
||||
//
|
||||
// Typically, this function will be used with `FetchHandler`, often composed
|
||||
// with other handlers.
|
||||
//
|
||||
// If any handler returns an error, the dispatch session will be canceled.
|
||||
func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error {
|
||||
eg, ctx2 := errgroup.WithContext(ctx)
|
||||
for _, desc := range descs {
|
||||
desc := desc
|
||||
|
||||
if limiter != nil {
|
||||
if err := limiter.Acquire(ctx, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
desc := desc
|
||||
|
||||
children, err := handler.Handle(ctx2, desc)
|
||||
if limiter != nil {
|
||||
limiter.Release(1)
|
||||
}
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrSkipDesc {
|
||||
return nil // don't traverse the children.
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
return Dispatch(ctx2, handler, limiter, children...)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// ChildrenHandler decodes well-known manifest types and returns their children.
|
||||
//
|
||||
// This is useful for supporting recursive fetch and other use cases where you
|
||||
// want to do a full walk of resources.
|
||||
//
|
||||
// One can also replace this with another implementation to allow descending of
|
||||
// arbitrary types.
|
||||
func ChildrenHandler(provider content.Provider) HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
return Children(ctx, provider, desc)
|
||||
}
|
||||
}
|
||||
|
||||
// SetChildrenLabels is a handler wrapper which sets labels for the content on
|
||||
// the children returned by the handler and passes through the children.
|
||||
// Must follow a handler that returns the children to be labeled.
|
||||
func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
return children, err
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
info := content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{},
|
||||
}
|
||||
fields := []string{}
|
||||
for i, ch := range children {
|
||||
info.Labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = ch.Digest.String()
|
||||
fields = append(fields, fmt.Sprintf("labels.containerd.io/gc.ref.content.%d", i))
|
||||
}
|
||||
|
||||
_, err := manager.Update(ctx, info, fields...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return children, err
|
||||
}
|
||||
}
|
||||
|
||||
// FilterPlatforms is a handler wrapper which limits the descriptors returned
|
||||
// based on matching the specified platform matcher.
|
||||
func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
return children, err
|
||||
}
|
||||
|
||||
var descs []ocispec.Descriptor
|
||||
|
||||
if m == nil {
|
||||
descs = children
|
||||
} else {
|
||||
for _, d := range children {
|
||||
if d.Platform == nil || m.Match(*d.Platform) {
|
||||
descs = append(descs, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return descs, nil
|
||||
}
|
||||
}
|
||||
|
||||
// LimitManifests is a handler wrapper which filters the manifest descriptors
|
||||
// returned using the provided platform.
|
||||
// The results will be ordered according to the comparison operator and
|
||||
// use the ordering in the manifests for equal matches.
|
||||
// A limit of 0 or less is considered no limit.
|
||||
// A not found error is returned if no manifest is matched.
|
||||
func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
return children, err
|
||||
}
|
||||
|
||||
switch desc.MediaType {
|
||||
case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList:
|
||||
sort.SliceStable(children, func(i, j int) bool {
|
||||
if children[i].Platform == nil {
|
||||
return false
|
||||
}
|
||||
if children[j].Platform == nil {
|
||||
return true
|
||||
}
|
||||
return m.Less(*children[i].Platform, *children[j].Platform)
|
||||
})
|
||||
|
||||
if n > 0 {
|
||||
if len(children) == 0 {
|
||||
return children, errors.Wrap(errdefs.ErrNotFound, "no match for platform in manifest")
|
||||
}
|
||||
if len(children) > n {
|
||||
children = children[:n]
|
||||
}
|
||||
}
|
||||
default:
|
||||
// only limit manifests from an index
|
||||
}
|
||||
return children, nil
|
||||
}
|
||||
}
|
386
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
Normal file
386
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
Normal file
@@ -0,0 +1,386 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Image provides the model for how containerd views container images.
|
||||
type Image struct {
|
||||
// Name of the image.
|
||||
//
|
||||
// To be pulled, it must be a reference compatible with resolvers.
|
||||
//
|
||||
// This field is required.
|
||||
Name string
|
||||
|
||||
// Labels provide runtime decoration for the image record.
|
||||
//
|
||||
// There is no default behavior for how these labels are propagated. They
|
||||
// only decorate the static metadata object.
|
||||
//
|
||||
// This field is optional.
|
||||
Labels map[string]string
|
||||
|
||||
// Target describes the root content for this image. Typically, this is
|
||||
// a manifest, index or manifest list.
|
||||
Target ocispec.Descriptor
|
||||
|
||||
CreatedAt, UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// DeleteOptions provide options on image delete
|
||||
type DeleteOptions struct {
|
||||
Synchronous bool
|
||||
}
|
||||
|
||||
// DeleteOpt allows configuring a delete operation
|
||||
type DeleteOpt func(context.Context, *DeleteOptions) error
|
||||
|
||||
// SynchronousDelete is used to indicate that an image deletion and removal of
|
||||
// the image resources should occur synchronously before returning a result.
|
||||
func SynchronousDelete() DeleteOpt {
|
||||
return func(ctx context.Context, o *DeleteOptions) error {
|
||||
o.Synchronous = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Store and interact with images
|
||||
type Store interface {
|
||||
Get(ctx context.Context, name string) (Image, error)
|
||||
List(ctx context.Context, filters ...string) ([]Image, error)
|
||||
Create(ctx context.Context, image Image) (Image, error)
|
||||
|
||||
// Update will replace the data in the store with the provided image. If
|
||||
// one or more fieldpaths are provided, only those fields will be updated.
|
||||
Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error)
|
||||
|
||||
Delete(ctx context.Context, name string, opts ...DeleteOpt) error
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Many of these functions make strong platform assumptions,
|
||||
// which are untrue in a lot of cases. More refactoring must be done here to
|
||||
// make this work in all cases.
|
||||
|
||||
// Config resolves the image configuration descriptor.
|
||||
//
|
||||
// The caller can then use the descriptor to resolve and process the
|
||||
// configuration of the image.
|
||||
func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) {
|
||||
return Config(ctx, provider, image.Target, platform)
|
||||
}
|
||||
|
||||
// RootFS returns the unpacked diffids that make up and images rootfs.
|
||||
//
|
||||
// These are used to verify that a set of layers unpacked to the expected
|
||||
// values.
|
||||
func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) {
|
||||
desc, err := image.Config(ctx, provider, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return RootFS(ctx, provider, desc)
|
||||
}
|
||||
|
||||
// Size returns the total size of an image's packed resources.
|
||||
func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) {
|
||||
var size int64
|
||||
return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if desc.Size < 0 {
|
||||
return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
|
||||
}
|
||||
size += desc.Size
|
||||
return nil, nil
|
||||
}), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target)
|
||||
}
|
||||
|
||||
type platformManifest struct {
|
||||
p *ocispec.Platform
|
||||
m *ocispec.Manifest
|
||||
}
|
||||
|
||||
// Manifest resolves a manifest from the image for the given platform.
|
||||
//
|
||||
// When a manifest descriptor inside of a manifest index does not have
|
||||
// a platform defined, the platform from the image config is considered.
|
||||
//
|
||||
// If the descriptor points to a non-index manifest, then the manifest is
|
||||
// unmarshalled and returned without considering the platform inside of the
|
||||
// config.
|
||||
//
|
||||
// TODO(stevvooe): This violates the current platform agnostic approach to this
|
||||
// package by returning a specific manifest type. We'll need to refactor this
|
||||
// to return a manifest descriptor or decide that we want to bring the API in
|
||||
// this direction because this abstraction is not needed.`
|
||||
func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) {
|
||||
var (
|
||||
limit = 1
|
||||
m []platformManifest
|
||||
wasIndex bool
|
||||
)
|
||||
|
||||
if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(p, &manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if desc.Digest != image.Digest && platform != nil {
|
||||
if desc.Platform != nil && !platform.Match(*desc.Platform) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if desc.Platform == nil {
|
||||
p, err := content.ReadBlob(ctx, provider, manifest.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var image ocispec.Image
|
||||
if err := json.Unmarshal(p, &image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
m = append(m, platformManifest{
|
||||
p: desc.Platform,
|
||||
m: &manifest,
|
||||
})
|
||||
|
||||
return nil, nil
|
||||
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
if err := json.Unmarshal(p, &idx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if platform == nil {
|
||||
return idx.Manifests, nil
|
||||
}
|
||||
|
||||
var descs []ocispec.Descriptor
|
||||
for _, d := range idx.Manifests {
|
||||
if d.Platform == nil || platform.Match(*d.Platform) {
|
||||
descs = append(descs, d)
|
||||
}
|
||||
}
|
||||
|
||||
sort.SliceStable(descs, func(i, j int) bool {
|
||||
if descs[i].Platform == nil {
|
||||
return false
|
||||
}
|
||||
if descs[j].Platform == nil {
|
||||
return true
|
||||
}
|
||||
return platform.Less(*descs[i].Platform, *descs[j].Platform)
|
||||
})
|
||||
|
||||
wasIndex = true
|
||||
|
||||
if len(descs) > limit {
|
||||
return descs[:limit], nil
|
||||
}
|
||||
return descs, nil
|
||||
}
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
|
||||
}), image); err != nil {
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
|
||||
if len(m) == 0 {
|
||||
err := errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
|
||||
if wasIndex {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "no match for platform in manifest %v", image.Digest)
|
||||
}
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
return *m[0].m, nil
|
||||
}
|
||||
|
||||
// Config resolves the image configuration descriptor using a content provided
|
||||
// to resolve child resources on the image.
|
||||
//
|
||||
// The caller can then use the descriptor to resolve and process the
|
||||
// configuration of the image.
|
||||
func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) {
|
||||
manifest, err := Manifest(ctx, provider, image, platform)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
return manifest.Config, err
|
||||
}
|
||||
|
||||
// Platforms returns one or more platforms supported by the image.
|
||||
func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) {
|
||||
var platformSpecs []ocispec.Platform
|
||||
return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if desc.Platform != nil {
|
||||
platformSpecs = append(platformSpecs, *desc.Platform)
|
||||
return nil, ErrSkipDesc
|
||||
}
|
||||
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var image ocispec.Image
|
||||
if err := json.Unmarshal(p, &image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
platformSpecs = append(platformSpecs,
|
||||
platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture}))
|
||||
}
|
||||
return nil, nil
|
||||
}), ChildrenHandler(provider)), image)
|
||||
}
|
||||
|
||||
// Check returns nil if the all components of an image are available in the
|
||||
// provider for the specified platform.
|
||||
//
|
||||
// If available is true, the caller can assume that required represents the
|
||||
// complete set of content required for the image.
|
||||
//
|
||||
// missing will have the components that are part of required but not avaiiable
|
||||
// in the provider.
|
||||
//
|
||||
// If there is a problem resolving content, an error will be returned.
|
||||
func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) {
|
||||
mfst, err := Manifest(ctx, provider, image, platform)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
|
||||
}
|
||||
|
||||
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): It is possible that referenced conponents could have
|
||||
// children, but this is rare. For now, we ignore this and only verify
|
||||
// that manifest components are present.
|
||||
required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...)
|
||||
|
||||
for _, desc := range required {
|
||||
ra, err := provider.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
missing = append(missing, desc)
|
||||
continue
|
||||
} else {
|
||||
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
|
||||
}
|
||||
}
|
||||
ra.Close()
|
||||
present = append(present, desc)
|
||||
|
||||
}
|
||||
|
||||
return true, required, present, missing, nil
|
||||
}
|
||||
|
||||
// Children returns the immediate children of content described by the descriptor.
|
||||
func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
var descs []ocispec.Descriptor
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): We just assume oci manifest, for now. There may be
|
||||
// subtle differences from the docker version.
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(p, &manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
descs = append(descs, manifest.Config)
|
||||
descs = append(descs, manifest.Layers...)
|
||||
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
if err := json.Unmarshal(p, &index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
descs = append(descs, index.Manifests...)
|
||||
default:
|
||||
if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) {
|
||||
// childless data types.
|
||||
return nil, nil
|
||||
}
|
||||
log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
|
||||
}
|
||||
|
||||
return descs, nil
|
||||
}
|
||||
|
||||
// RootFS returns the unpacked diffids that make up and images rootfs.
|
||||
//
|
||||
// These are used to verify that a set of layers unpacked to the expected
|
||||
// values.
|
||||
func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) {
|
||||
p, err := content.ReadBlob(ctx, provider, configDesc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var config ocispec.Image
|
||||
if err := json.Unmarshal(p, &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config.RootFS.DiffIDs, nil
|
||||
}
|
37
vendor/github.com/containerd/containerd/images/importexport.go
generated
vendored
Normal file
37
vendor/github.com/containerd/containerd/images/importexport.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Importer is the interface for image importer.
|
||||
type Importer interface {
|
||||
// Import imports an image from a tar stream.
|
||||
Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
// Exporter is the interface for image exporter.
|
||||
type Exporter interface {
|
||||
// Export exports an image to a tar stream.
|
||||
Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error
|
||||
}
|
126
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
Normal file
126
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// mediatype definitions for image components handled in containerd.
|
||||
//
|
||||
// oci components are generally referenced directly, although we may centralize
|
||||
// here for clarity.
|
||||
const (
|
||||
MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar"
|
||||
MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar"
|
||||
MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json"
|
||||
MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
// Checkpoint/Restore Media Types
|
||||
MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar"
|
||||
MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar"
|
||||
MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar"
|
||||
MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar"
|
||||
MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto"
|
||||
MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto"
|
||||
MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name"
|
||||
MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto"
|
||||
// Legacy Docker schema1 manifest
|
||||
MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||
)
|
||||
|
||||
// DiffCompression returns the compression as defined by the layer diff media
|
||||
// type. For Docker media types without compression, "unknown" is returned to
|
||||
// indicate that the media type may be compressed. If the media type is not
|
||||
// recognized as a layer diff, then it returns errdefs.ErrNotImplemented
|
||||
func DiffCompression(ctx context.Context, mediaType string) (string, error) {
|
||||
base, ext := parseMediaTypes(mediaType)
|
||||
switch base {
|
||||
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign:
|
||||
if len(ext) > 0 {
|
||||
// Type is wrapped
|
||||
return "", nil
|
||||
}
|
||||
// These media types may have been compressed but failed to
|
||||
// use the correct media type. The decompression function
|
||||
// should detect and handle this case.
|
||||
return "unknown", nil
|
||||
case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip:
|
||||
if len(ext) > 0 {
|
||||
// Type is wrapped
|
||||
return "", nil
|
||||
}
|
||||
return "gzip", nil
|
||||
case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable:
|
||||
if len(ext) > 0 {
|
||||
switch ext[len(ext)-1] {
|
||||
case "gzip":
|
||||
return "gzip", nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
default:
|
||||
return "", errdefs.ErrNotImplemented
|
||||
}
|
||||
}
|
||||
|
||||
// parseMediaTypes splits the media type into the base type and
|
||||
// an array of sorted extensions
|
||||
func parseMediaTypes(mt string) (string, []string) {
|
||||
if mt == "" {
|
||||
return "", []string{}
|
||||
}
|
||||
|
||||
s := strings.Split(mt, "+")
|
||||
ext := s[1:]
|
||||
sort.Strings(ext)
|
||||
|
||||
return s[0], ext
|
||||
}
|
||||
|
||||
// IsLayerTypes returns true if the media type is a layer
|
||||
func IsLayerType(mt string) bool {
|
||||
if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Parse Docker media types, strip off any + suffixes first
|
||||
base, _ := parseMediaTypes(mt)
|
||||
switch base {
|
||||
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
|
||||
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsKnownConfig returns true if the media type is a known config type
|
||||
func IsKnownConfig(mt string) bool {
|
||||
switch mt {
|
||||
case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
|
||||
MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
37
vendor/github.com/containerd/containerd/labels/validate.go
generated
vendored
Normal file
37
vendor/github.com/containerd/containerd/labels/validate.go
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package labels
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
maxSize = 4096
|
||||
)
|
||||
|
||||
// Validate a label's key and value are under 4096 bytes
|
||||
func Validate(k, v string) error {
|
||||
if (len(k) + len(v)) > maxSize {
|
||||
if len(k) > 10 {
|
||||
k = k[:10]
|
||||
}
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k)
|
||||
}
|
||||
return nil
|
||||
}
|
40
vendor/github.com/containerd/containerd/leases/context.go
generated
vendored
Normal file
40
vendor/github.com/containerd/containerd/leases/context.go
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package leases
|
||||
|
||||
import "context"
|
||||
|
||||
type leaseKey struct{}
|
||||
|
||||
// WithLease sets a given lease on the context
|
||||
func WithLease(ctx context.Context, lid string) context.Context {
|
||||
ctx = context.WithValue(ctx, leaseKey{}, lid)
|
||||
|
||||
// also store on the grpc headers so it gets picked up by any clients that
|
||||
// are using this.
|
||||
return withGRPCLeaseHeader(ctx, lid)
|
||||
}
|
||||
|
||||
// FromContext returns the lease from the context.
|
||||
func FromContext(ctx context.Context) (string, bool) {
|
||||
lid, ok := ctx.Value(leaseKey{}).(string)
|
||||
if !ok {
|
||||
return fromGRPCHeader(ctx)
|
||||
}
|
||||
|
||||
return lid, ok
|
||||
}
|
58
vendor/github.com/containerd/containerd/leases/grpc.go
generated
vendored
Normal file
58
vendor/github.com/containerd/containerd/leases/grpc.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package leases
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
// GRPCHeader defines the header name for specifying a containerd lease.
|
||||
GRPCHeader = "containerd-lease"
|
||||
)
|
||||
|
||||
func withGRPCLeaseHeader(ctx context.Context, lid string) context.Context {
|
||||
// also store on the grpc headers so it gets picked up by any clients
|
||||
// that are using this.
|
||||
txheader := metadata.Pairs(GRPCHeader, lid)
|
||||
md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context.
|
||||
if !ok {
|
||||
md = txheader
|
||||
} else {
|
||||
// order ensures the latest is first in this list.
|
||||
md = metadata.Join(txheader, md)
|
||||
}
|
||||
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
func fromGRPCHeader(ctx context.Context) (string, bool) {
|
||||
// try to extract for use in grpc servers.
|
||||
md, ok := metadata.FromIncomingContext(ctx)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
values := md[GRPCHeader]
|
||||
if len(values) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return values[0], true
|
||||
}
|
43
vendor/github.com/containerd/containerd/leases/id.go
generated
vendored
Normal file
43
vendor/github.com/containerd/containerd/leases/id.go
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package leases
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// WithRandomID sets the lease ID to a random unique value
|
||||
func WithRandomID() Opt {
|
||||
return func(l *Lease) error {
|
||||
t := time.Now()
|
||||
var b [3]byte
|
||||
rand.Read(b[:])
|
||||
l.ID = fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:]))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithID sets the ID for the lease
|
||||
func WithID(id string) Opt {
|
||||
return func(l *Lease) error {
|
||||
l.ID = id
|
||||
return nil
|
||||
}
|
||||
}
|
86
vendor/github.com/containerd/containerd/leases/lease.go
generated
vendored
Normal file
86
vendor/github.com/containerd/containerd/leases/lease.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package leases
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Opt is used to set options on a lease
|
||||
type Opt func(*Lease) error
|
||||
|
||||
// DeleteOpt allows configuring a delete operation
|
||||
type DeleteOpt func(context.Context, *DeleteOptions) error
|
||||
|
||||
// Manager is used to create, list, and remove leases
|
||||
type Manager interface {
|
||||
Create(context.Context, ...Opt) (Lease, error)
|
||||
Delete(context.Context, Lease, ...DeleteOpt) error
|
||||
List(context.Context, ...string) ([]Lease, error)
|
||||
AddResource(context.Context, Lease, Resource) error
|
||||
DeleteResource(context.Context, Lease, Resource) error
|
||||
ListResources(context.Context, Lease) ([]Resource, error)
|
||||
}
|
||||
|
||||
// Lease retains resources to prevent cleanup before
|
||||
// the resources can be fully referenced.
|
||||
type Lease struct {
|
||||
ID string
|
||||
CreatedAt time.Time
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// Resource represents low level resource of image, like content, ingest and
|
||||
// snapshotter.
|
||||
type Resource struct {
|
||||
ID string
|
||||
Type string
|
||||
}
|
||||
|
||||
// DeleteOptions provide options on image delete
|
||||
type DeleteOptions struct {
|
||||
Synchronous bool
|
||||
}
|
||||
|
||||
// SynchronousDelete is used to indicate that a lease deletion and removal of
|
||||
// any unreferenced resources should occur synchronously before returning the
|
||||
// result.
|
||||
func SynchronousDelete(ctx context.Context, o *DeleteOptions) error {
|
||||
o.Synchronous = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithLabels sets labels on a lease
|
||||
func WithLabels(labels map[string]string) Opt {
|
||||
return func(l *Lease) error {
|
||||
l.Labels = labels
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithExpiration sets an expiration on the lease
|
||||
func WithExpiration(d time.Duration) Opt {
|
||||
return func(l *Lease) error {
|
||||
if l.Labels == nil {
|
||||
l.Labels = map[string]string{}
|
||||
}
|
||||
l.Labels["containerd.io/gc.expire"] = time.Now().Add(d).Format(time.RFC3339)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
178
vendor/github.com/containerd/containerd/metadata/adaptors.go
generated
vendored
Normal file
178
vendor/github.com/containerd/containerd/metadata/adaptors.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
)
|
||||
|
||||
func adaptImage(o interface{}) filters.Adaptor {
|
||||
obj := o.(images.Image)
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
case "name":
|
||||
return obj.Name, len(obj.Name) > 0
|
||||
case "target":
|
||||
if len(fieldpath) < 2 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[1] {
|
||||
case "digest":
|
||||
return obj.Target.Digest.String(), len(obj.Target.Digest) > 0
|
||||
case "mediatype":
|
||||
return obj.Target.MediaType, len(obj.Target.MediaType) > 0
|
||||
}
|
||||
case "labels":
|
||||
return checkMap(fieldpath[1:], obj.Labels)
|
||||
// TODO(stevvooe): Greater/Less than filters would be awesome for
|
||||
// size. Let's do it!
|
||||
case "annotations":
|
||||
return checkMap(fieldpath[1:], obj.Target.Annotations)
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
func adaptContainer(o interface{}) filters.Adaptor {
|
||||
obj := o.(containers.Container)
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
case "id":
|
||||
return obj.ID, len(obj.ID) > 0
|
||||
case "runtime":
|
||||
if len(fieldpath) <= 1 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[1] {
|
||||
case "name":
|
||||
return obj.Runtime.Name, len(obj.Runtime.Name) > 0
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
case "image":
|
||||
return obj.Image, len(obj.Image) > 0
|
||||
case "labels":
|
||||
return checkMap(fieldpath[1:], obj.Labels)
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
|
||||
func adaptContentInfo(info content.Info) filters.Adaptor {
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
case "digest":
|
||||
return info.Digest.String(), true
|
||||
case "size":
|
||||
// TODO: support size based filtering
|
||||
case "labels":
|
||||
return checkMap(fieldpath[1:], info.Labels)
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
|
||||
func adaptContentStatus(status content.Status) filters.Adaptor {
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
switch fieldpath[0] {
|
||||
case "ref":
|
||||
return status.Ref, true
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
|
||||
func adaptLease(lease leases.Lease) filters.Adaptor {
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
case "id":
|
||||
return lease.ID, len(lease.ID) > 0
|
||||
case "labels":
|
||||
return checkMap(fieldpath[1:], lease.Labels)
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
|
||||
func adaptSnapshot(info snapshots.Info) filters.Adaptor {
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
case "kind":
|
||||
switch info.Kind {
|
||||
case snapshots.KindActive:
|
||||
return "active", true
|
||||
case snapshots.KindView:
|
||||
return "view", true
|
||||
case snapshots.KindCommitted:
|
||||
return "committed", true
|
||||
}
|
||||
case "name":
|
||||
return info.Name, true
|
||||
case "parent":
|
||||
return info.Parent, true
|
||||
case "labels":
|
||||
return checkMap(fieldpath[1:], info.Labels)
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
|
||||
func checkMap(fieldpath []string, m map[string]string) (string, bool) {
|
||||
if len(m) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
value, ok := m[strings.Join(fieldpath, ".")]
|
||||
return value, ok
|
||||
}
|
61
vendor/github.com/containerd/containerd/metadata/bolt.go
generated
vendored
Normal file
61
vendor/github.com/containerd/containerd/metadata/bolt.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type transactionKey struct{}
|
||||
|
||||
// WithTransactionContext returns a new context holding the provided
|
||||
// bolt transaction. Functions which require a bolt transaction will
|
||||
// first check to see if a transaction is already created on the
|
||||
// context before creating their own.
|
||||
func WithTransactionContext(ctx context.Context, tx *bolt.Tx) context.Context {
|
||||
return context.WithValue(ctx, transactionKey{}, tx)
|
||||
}
|
||||
|
||||
type transactor interface {
|
||||
View(fn func(*bolt.Tx) error) error
|
||||
Update(fn func(*bolt.Tx) error) error
|
||||
}
|
||||
|
||||
// view gets a bolt db transaction either from the context
|
||||
// or starts a new one with the provided bolt database.
|
||||
func view(ctx context.Context, db transactor, fn func(*bolt.Tx) error) error {
|
||||
tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx)
|
||||
if !ok {
|
||||
return db.View(fn)
|
||||
}
|
||||
return fn(tx)
|
||||
}
|
||||
|
||||
// update gets a writable bolt db transaction either from the context
|
||||
// or starts a new one with the provided bolt database.
|
||||
func update(ctx context.Context, db transactor, fn func(*bolt.Tx) error) error {
|
||||
tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx)
|
||||
if !ok {
|
||||
return db.Update(fn)
|
||||
} else if !tx.Writable() {
|
||||
return errors.Wrap(bolt.ErrTxNotWritable, "unable to use transaction from context")
|
||||
}
|
||||
return fn(tx)
|
||||
}
|
147
vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
generated
vendored
Normal file
147
vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package boltutil
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
bucketKeyAnnotations = []byte("annotations")
|
||||
bucketKeyLabels = []byte("labels")
|
||||
bucketKeyCreatedAt = []byte("createdat")
|
||||
bucketKeyUpdatedAt = []byte("updatedat")
|
||||
)
|
||||
|
||||
// ReadLabels reads the labels key from the bucket
|
||||
// Uses the key "labels"
|
||||
func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) {
|
||||
return readMap(bkt, bucketKeyLabels)
|
||||
}
|
||||
|
||||
// ReadAnnotations reads the OCI Descriptor Annotations key from the bucket
|
||||
// Uses the key "annotations"
|
||||
func ReadAnnotations(bkt *bolt.Bucket) (map[string]string, error) {
|
||||
return readMap(bkt, bucketKeyAnnotations)
|
||||
}
|
||||
|
||||
func readMap(bkt *bolt.Bucket, bucketName []byte) (map[string]string, error) {
|
||||
lbkt := bkt.Bucket(bucketName)
|
||||
if lbkt == nil {
|
||||
return nil, nil
|
||||
}
|
||||
labels := map[string]string{}
|
||||
if err := lbkt.ForEach(func(k, v []byte) error {
|
||||
labels[string(k)] = string(v)
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
// WriteLabels will write a new labels bucket to the provided bucket at key
|
||||
// bucketKeyLabels, replacing the contents of the bucket with the provided map.
|
||||
//
|
||||
// The provide map labels will be modified to have the final contents of the
|
||||
// bucket. Typically, this removes zero-value entries.
|
||||
// Uses the key "labels"
|
||||
func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error {
|
||||
return writeMap(bkt, bucketKeyLabels, labels)
|
||||
}
|
||||
|
||||
// WriteAnnotations writes the OCI Descriptor Annotations
|
||||
func WriteAnnotations(bkt *bolt.Bucket, labels map[string]string) error {
|
||||
return writeMap(bkt, bucketKeyAnnotations, labels)
|
||||
}
|
||||
|
||||
func writeMap(bkt *bolt.Bucket, bucketName []byte, labels map[string]string) error {
|
||||
// Remove existing labels to keep from merging
|
||||
if lbkt := bkt.Bucket(bucketName); lbkt != nil {
|
||||
if err := bkt.DeleteBucket(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(labels) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
lbkt, err := bkt.CreateBucket(bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range labels {
|
||||
if v == "" {
|
||||
delete(labels, k) // remove since we don't actually set it
|
||||
continue
|
||||
}
|
||||
|
||||
if err := lbkt.Put([]byte(k), []byte(v)); err != nil {
|
||||
return errors.Wrapf(err, "failed to set label %q=%q", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadTimestamps reads created and updated timestamps from a bucket.
|
||||
// Uses keys "createdat" and "updatedat"
|
||||
func ReadTimestamps(bkt *bolt.Bucket, created, updated *time.Time) error {
|
||||
for _, f := range []struct {
|
||||
b []byte
|
||||
t *time.Time
|
||||
}{
|
||||
{bucketKeyCreatedAt, created},
|
||||
{bucketKeyUpdatedAt, updated},
|
||||
} {
|
||||
v := bkt.Get(f.b)
|
||||
if v != nil {
|
||||
if err := f.t.UnmarshalBinary(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteTimestamps writes created and updated timestamps to a bucket.
|
||||
// Uses keys "createdat" and "updatedat"
|
||||
func WriteTimestamps(bkt *bolt.Bucket, created, updated time.Time) error {
|
||||
createdAt, err := created.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
updatedAt, err := updated.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range [][2][]byte{
|
||||
{bucketKeyCreatedAt, createdAt},
|
||||
{bucketKeyUpdatedAt, updatedAt},
|
||||
} {
|
||||
if err := bkt.Put(v[0], v[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
272
vendor/github.com/containerd/containerd/metadata/buckets.go
generated
vendored
Normal file
272
vendor/github.com/containerd/containerd/metadata/buckets.go
generated
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package metadata stores all labels and object specific metadata by namespace.
|
||||
// This package also contains the main garbage collection logic for cleaning up
|
||||
// resources consistently and atomically. Resources used by backends will be
|
||||
// tracked in the metadata store to be exposed to consumers of this package.
|
||||
//
|
||||
// The layout where a "/" delineates a bucket is described in the following
|
||||
// section. Please try to follow this as closely as possible when adding
|
||||
// functionality. We can bolster this with helpers and more structure if that
|
||||
// becomes an issue.
|
||||
//
|
||||
// Generically, we try to do the following:
|
||||
//
|
||||
// <version>/<namespace>/<object>/<key> -> <field>
|
||||
//
|
||||
// version: Currently, this is "v1". Additions can be made to v1 in a backwards
|
||||
// compatible way. If the layout changes, a new version must be made, along
|
||||
// with a migration.
|
||||
//
|
||||
// namespace: the namespace to which this object belongs.
|
||||
//
|
||||
// object: defines which object set is stored in the bucket. There are two
|
||||
// special objects, "labels" and "indexes". The "labels" bucket stores the
|
||||
// labels for the parent namespace. The "indexes" object is reserved for
|
||||
// indexing objects, if we require in the future.
|
||||
//
|
||||
// key: object-specific key identifying the storage bucket for the objects
|
||||
// contents.
|
||||
//
|
||||
// Below is the current database schema. This should be updated each time
|
||||
// the structure is changed in addition to adding a migration and incrementing
|
||||
// the database version. Note that `╘══*...*` refers to maps with arbitrary
|
||||
// keys.
|
||||
// ├──version : <varint> - Latest version, see migrations
|
||||
// └──v1 - Schema version bucket
|
||||
// ╘══*namespace*
|
||||
// ├──labels
|
||||
// │ ╘══*key* : <string> - Label value
|
||||
// ├──image
|
||||
// │ ╘══*image name*
|
||||
// │ ├──createdat : <binary time> - Created at
|
||||
// │ ├──updatedat : <binary time> - Updated at
|
||||
// │ ├──target
|
||||
// │ │ ├──digest : <digest> - Descriptor digest
|
||||
// │ │ ├──mediatype : <string> - Descriptor media type
|
||||
// │ │ └──size : <varint> - Descriptor size
|
||||
// │ └──labels
|
||||
// │ ╘══*key* : <string> - Label value
|
||||
// ├──containers
|
||||
// │ ╘══*container id*
|
||||
// │ ├──createdat : <binary time> - Created at
|
||||
// │ ├──updatedat : <binary time> - Updated at
|
||||
// │ ├──spec : <binary> - Proto marshaled spec
|
||||
// │ ├──image : <string> - Image name
|
||||
// │ ├──snapshotter : <string> - Snapshotter name
|
||||
// │ ├──snapshotKey : <string> - Snapshot key
|
||||
// │ ├──runtime
|
||||
// │ │ ├──name : <string> - Runtime name
|
||||
// │ │ ├──extensions
|
||||
// │ │ │ ╘══*name* : <binary> - Proto marshaled extension
|
||||
// │ │ └──options : <binary> - Proto marshaled options
|
||||
// │ └──labels
|
||||
// │ ╘══*key* : <string> - Label value
|
||||
// ├──snapshots
|
||||
// │ ╘══*snapshotter*
|
||||
// │ ╘══*snapshot key*
|
||||
// │ ├──name : <string> - Snapshot name in backend
|
||||
// │ ├──createdat : <binary time> - Created at
|
||||
// │ ├──updatedat : <binary time> - Updated at
|
||||
// │ ├──parent : <string> - Parent snapshot name
|
||||
// │ ├──children
|
||||
// │ │ ╘══*snapshot key* : <nil> - Child snapshot reference
|
||||
// │ └──labels
|
||||
// │ ╘══*key* : <string> - Label value
|
||||
// ├──content
|
||||
// │ ├──blob
|
||||
// │ │ ╘══*blob digest*
|
||||
// │ │ ├──createdat : <binary time> - Created at
|
||||
// │ │ ├──updatedat : <binary time> - Updated at
|
||||
// │ │ ├──size : <varint> - Blob size
|
||||
// │ │ └──labels
|
||||
// │ │ ╘══*key* : <string> - Label value
|
||||
// │ └──ingests
|
||||
// │ ╘══*ingest reference*
|
||||
// │ ├──ref : <string> - Ingest reference in backend
|
||||
// │ ├──expireat : <binary time> - Time to expire ingest
|
||||
// │ └──expected : <digest> - Expected commit digest
|
||||
// └──leases
|
||||
// ╘══*lease id*
|
||||
// ├──createdat : <binary time> - Created at
|
||||
// ├──labels
|
||||
// │ ╘══*key* : <string> - Label value
|
||||
// ├──snapshots
|
||||
// │ ╘══*snapshotter*
|
||||
// │ ╘══*snapshot key* : <nil> - Snapshot reference
|
||||
// ├──content
|
||||
// │ ╘══*blob digest* : <nil> - Content blob reference
|
||||
// └──ingests
|
||||
// ╘══*ingest reference* : <nil> - Content ingest reference
|
||||
package metadata
|
||||
|
||||
import (
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var (
|
||||
bucketKeyVersion = []byte(schemaVersion)
|
||||
bucketKeyDBVersion = []byte("version") // stores the version of the schema
|
||||
bucketKeyObjectLabels = []byte("labels") // stores the labels for a namespace.
|
||||
bucketKeyObjectImages = []byte("images") // stores image objects
|
||||
bucketKeyObjectContainers = []byte("containers") // stores container objects
|
||||
bucketKeyObjectSnapshots = []byte("snapshots") // stores snapshot references
|
||||
bucketKeyObjectContent = []byte("content") // stores content references
|
||||
bucketKeyObjectBlob = []byte("blob") // stores content links
|
||||
bucketKeyObjectIngests = []byte("ingests") // stores ingest objects
|
||||
bucketKeyObjectLeases = []byte("leases") // stores leases
|
||||
|
||||
bucketKeyDigest = []byte("digest")
|
||||
bucketKeyMediaType = []byte("mediatype")
|
||||
bucketKeySize = []byte("size")
|
||||
bucketKeyImage = []byte("image")
|
||||
bucketKeyRuntime = []byte("runtime")
|
||||
bucketKeyName = []byte("name")
|
||||
bucketKeyParent = []byte("parent")
|
||||
bucketKeyChildren = []byte("children")
|
||||
bucketKeyOptions = []byte("options")
|
||||
bucketKeySpec = []byte("spec")
|
||||
bucketKeySnapshotKey = []byte("snapshotKey")
|
||||
bucketKeySnapshotter = []byte("snapshotter")
|
||||
bucketKeyTarget = []byte("target")
|
||||
bucketKeyExtensions = []byte("extensions")
|
||||
bucketKeyCreatedAt = []byte("createdat")
|
||||
bucketKeyExpected = []byte("expected")
|
||||
bucketKeyRef = []byte("ref")
|
||||
bucketKeyExpireAt = []byte("expireat")
|
||||
|
||||
deprecatedBucketKeyObjectIngest = []byte("ingest") // stores ingest links, deprecated in v1.2
|
||||
)
|
||||
|
||||
func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket {
|
||||
bkt := tx.Bucket(keys[0])
|
||||
|
||||
for _, key := range keys[1:] {
|
||||
if bkt == nil {
|
||||
break
|
||||
}
|
||||
bkt = bkt.Bucket(key)
|
||||
}
|
||||
|
||||
return bkt
|
||||
}
|
||||
|
||||
func createBucketIfNotExists(tx *bolt.Tx, keys ...[]byte) (*bolt.Bucket, error) {
|
||||
bkt, err := tx.CreateBucketIfNotExists(keys[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, key := range keys[1:] {
|
||||
bkt, err = bkt.CreateBucketIfNotExists(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return bkt, nil
|
||||
}
|
||||
|
||||
func namespaceLabelsBucketPath(namespace string) [][]byte {
|
||||
return [][]byte{bucketKeyVersion, []byte(namespace), bucketKeyObjectLabels}
|
||||
}
|
||||
|
||||
func withNamespacesLabelsBucket(tx *bolt.Tx, namespace string, fn func(bkt *bolt.Bucket) error) error {
|
||||
bkt, err := createBucketIfNotExists(tx, namespaceLabelsBucketPath(namespace)...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fn(bkt)
|
||||
}
|
||||
|
||||
func getNamespaceLabelsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
|
||||
return getBucket(tx, namespaceLabelsBucketPath(namespace)...)
|
||||
}
|
||||
|
||||
func imagesBucketPath(namespace string) [][]byte {
|
||||
return [][]byte{bucketKeyVersion, []byte(namespace), bucketKeyObjectImages}
|
||||
}
|
||||
|
||||
func createImagesBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) {
|
||||
return createBucketIfNotExists(tx, imagesBucketPath(namespace)...)
|
||||
}
|
||||
|
||||
func getImagesBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
|
||||
return getBucket(tx, imagesBucketPath(namespace)...)
|
||||
}
|
||||
|
||||
func createContainersBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) {
|
||||
return createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers)
|
||||
}
|
||||
|
||||
func getContainersBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
|
||||
return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers)
|
||||
}
|
||||
|
||||
func getContainerBucket(tx *bolt.Tx, namespace, id string) *bolt.Bucket {
|
||||
return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers, []byte(id))
|
||||
}
|
||||
|
||||
func createSnapshotterBucket(tx *bolt.Tx, namespace, snapshotter string) (*bolt.Bucket, error) {
|
||||
bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots, []byte(snapshotter))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
||||
func getSnapshottersBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
|
||||
return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots)
|
||||
}
|
||||
|
||||
func getSnapshotterBucket(tx *bolt.Tx, namespace, snapshotter string) *bolt.Bucket {
|
||||
return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots, []byte(snapshotter))
|
||||
}
|
||||
|
||||
func createBlobBucket(tx *bolt.Tx, namespace string, dgst digest.Digest) (*bolt.Bucket, error) {
|
||||
bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bkt.CreateBucket([]byte(dgst.String()))
|
||||
}
|
||||
|
||||
func getBlobsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
|
||||
return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob)
|
||||
}
|
||||
|
||||
func getBlobBucket(tx *bolt.Tx, namespace string, dgst digest.Digest) *bolt.Bucket {
|
||||
return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(dgst.String()))
|
||||
}
|
||||
|
||||
func getIngestsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
|
||||
return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngests)
|
||||
}
|
||||
|
||||
func createIngestBucket(tx *bolt.Tx, namespace, ref string) (*bolt.Bucket, error) {
|
||||
bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(ref))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bkt, nil
|
||||
}
|
||||
|
||||
func getIngestBucket(tx *bolt.Tx, namespace, ref string) *bolt.Bucket {
|
||||
return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(ref))
|
||||
}
|
457
vendor/github.com/containerd/containerd/metadata/containers.go
generated
vendored
Normal file
457
vendor/github.com/containerd/containerd/metadata/containers.go
generated
vendored
Normal file
@@ -0,0 +1,457 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/identifiers"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/containerd/containerd/metadata/boltutil"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type containerStore struct {
|
||||
db *DB
|
||||
}
|
||||
|
||||
// NewContainerStore returns a Store backed by an underlying bolt DB
|
||||
func NewContainerStore(db *DB) containers.Store {
|
||||
return &containerStore{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *containerStore) Get(ctx context.Context, id string) (containers.Container, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return containers.Container{}, err
|
||||
}
|
||||
|
||||
container := containers.Container{ID: id}
|
||||
|
||||
if err := view(ctx, s.db, func(tx *bolt.Tx) error {
|
||||
bkt := getContainerBucket(tx, namespace, id)
|
||||
if bkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "container %q in namespace %q", id, namespace)
|
||||
}
|
||||
|
||||
if err := readContainer(&container, bkt); err != nil {
|
||||
return errors.Wrapf(err, "failed to read container %q", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return containers.Container{}, err
|
||||
}
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.Container, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
var m []containers.Container
|
||||
|
||||
if err := view(ctx, s.db, func(tx *bolt.Tx) error {
|
||||
bkt := getContainersBucket(tx, namespace)
|
||||
if bkt == nil {
|
||||
return nil // empty store
|
||||
}
|
||||
|
||||
return bkt.ForEach(func(k, v []byte) error {
|
||||
cbkt := bkt.Bucket(k)
|
||||
if cbkt == nil {
|
||||
return nil
|
||||
}
|
||||
container := containers.Container{ID: string(k)}
|
||||
|
||||
if err := readContainer(&container, cbkt); err != nil {
|
||||
return errors.Wrapf(err, "failed to read container %q", string(k))
|
||||
}
|
||||
|
||||
if filter.Match(adaptContainer(container)) {
|
||||
m = append(m, container)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (s *containerStore) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return containers.Container{}, err
|
||||
}
|
||||
|
||||
if err := validateContainer(&container); err != nil {
|
||||
return containers.Container{}, errors.Wrap(err, "create container failed validation")
|
||||
}
|
||||
|
||||
if err := update(ctx, s.db, func(tx *bolt.Tx) error {
|
||||
bkt, err := createContainersBucket(tx, namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cbkt, err := bkt.CreateBucket([]byte(container.ID))
|
||||
if err != nil {
|
||||
if err == bolt.ErrBucketExists {
|
||||
err = errors.Wrapf(errdefs.ErrAlreadyExists, "container %q", container.ID)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
container.CreatedAt = time.Now().UTC()
|
||||
container.UpdatedAt = container.CreatedAt
|
||||
if err := writeContainer(cbkt, &container); err != nil {
|
||||
return errors.Wrapf(err, "failed to write container %q", container.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return containers.Container{}, err
|
||||
}
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (s *containerStore) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return containers.Container{}, err
|
||||
}
|
||||
|
||||
if container.ID == "" {
|
||||
return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "must specify a container id")
|
||||
}
|
||||
|
||||
var updated containers.Container
|
||||
if err := update(ctx, s.db, func(tx *bolt.Tx) error {
|
||||
bkt := getContainersBucket(tx, namespace)
|
||||
if bkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "cannot update container %q in namespace %q", container.ID, namespace)
|
||||
}
|
||||
|
||||
cbkt := bkt.Bucket([]byte(container.ID))
|
||||
if cbkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
|
||||
}
|
||||
|
||||
if err := readContainer(&updated, cbkt); err != nil {
|
||||
return errors.Wrapf(err, "failed to read container %q", container.ID)
|
||||
}
|
||||
createdat := updated.CreatedAt
|
||||
updated.ID = container.ID
|
||||
|
||||
if len(fieldpaths) == 0 {
|
||||
// only allow updates to these field on full replace.
|
||||
fieldpaths = []string{"labels", "spec", "extensions", "image", "snapshotkey"}
|
||||
|
||||
// Fields that are immutable must cause an error when no field paths
|
||||
// are provided. This allows these fields to become mutable in the
|
||||
// future.
|
||||
if updated.Snapshotter != container.Snapshotter {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable")
|
||||
}
|
||||
|
||||
if updated.Runtime.Name != container.Runtime.Name {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable")
|
||||
}
|
||||
}
|
||||
|
||||
// apply the field mask. If you update this code, you better follow the
|
||||
// field mask rules in field_mask.proto. If you don't know what this
|
||||
// is, do not update this code.
|
||||
for _, path := range fieldpaths {
|
||||
if strings.HasPrefix(path, "labels.") {
|
||||
if updated.Labels == nil {
|
||||
updated.Labels = map[string]string{}
|
||||
}
|
||||
key := strings.TrimPrefix(path, "labels.")
|
||||
updated.Labels[key] = container.Labels[key]
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(path, "extensions.") {
|
||||
if updated.Extensions == nil {
|
||||
updated.Extensions = map[string]types.Any{}
|
||||
}
|
||||
key := strings.TrimPrefix(path, "extensions.")
|
||||
updated.Extensions[key] = container.Extensions[key]
|
||||
continue
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "labels":
|
||||
updated.Labels = container.Labels
|
||||
case "spec":
|
||||
updated.Spec = container.Spec
|
||||
case "extensions":
|
||||
updated.Extensions = container.Extensions
|
||||
case "image":
|
||||
updated.Image = container.Image
|
||||
case "snapshotkey":
|
||||
updated.SnapshotKey = container.SnapshotKey
|
||||
default:
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID)
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateContainer(&updated); err != nil {
|
||||
return errors.Wrap(err, "update failed validation")
|
||||
}
|
||||
|
||||
updated.CreatedAt = createdat
|
||||
updated.UpdatedAt = time.Now().UTC()
|
||||
if err := writeContainer(cbkt, &updated); err != nil {
|
||||
return errors.Wrapf(err, "failed to write container %q", container.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return containers.Container{}, err
|
||||
}
|
||||
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
func (s *containerStore) Delete(ctx context.Context, id string) error {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return update(ctx, s.db, func(tx *bolt.Tx) error {
|
||||
bkt := getContainersBucket(tx, namespace)
|
||||
if bkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %q in namespace %q", id, namespace)
|
||||
}
|
||||
|
||||
if err := bkt.DeleteBucket([]byte(id)); err != nil {
|
||||
if err == bolt.ErrBucketNotFound {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "container %v", id)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
atomic.AddUint32(&s.db.dirty, 1)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func validateContainer(container *containers.Container) error {
|
||||
if err := identifiers.Validate(container.ID); err != nil {
|
||||
return errors.Wrap(err, "container.ID")
|
||||
}
|
||||
|
||||
for k := range container.Extensions {
|
||||
if k == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Extension keys must not be zero-length")
|
||||
}
|
||||
}
|
||||
|
||||
// image has no validation
|
||||
for k, v := range container.Labels {
|
||||
if err := labels.Validate(k, v); err == nil {
|
||||
return errors.Wrapf(err, "containers.Labels")
|
||||
}
|
||||
}
|
||||
|
||||
if container.Runtime.Name == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name must be set")
|
||||
}
|
||||
|
||||
if container.Spec == nil {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Spec must be set")
|
||||
}
|
||||
|
||||
if container.SnapshotKey != "" && container.Snapshotter == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set if container.SnapshotKey is set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
|
||||
labels, err := boltutil.ReadLabels(bkt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.Labels = labels
|
||||
|
||||
if err := boltutil.ReadTimestamps(bkt, &container.CreatedAt, &container.UpdatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bkt.ForEach(func(k, v []byte) error {
|
||||
switch string(k) {
|
||||
case string(bucketKeyImage):
|
||||
container.Image = string(v)
|
||||
case string(bucketKeyRuntime):
|
||||
rbkt := bkt.Bucket(bucketKeyRuntime)
|
||||
if rbkt == nil {
|
||||
return nil // skip runtime. should be an error?
|
||||
}
|
||||
|
||||
n := rbkt.Get(bucketKeyName)
|
||||
if n != nil {
|
||||
container.Runtime.Name = string(n)
|
||||
}
|
||||
|
||||
obkt := rbkt.Get(bucketKeyOptions)
|
||||
if obkt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var any types.Any
|
||||
if err := proto.Unmarshal(obkt, &any); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Runtime.Options = &any
|
||||
case string(bucketKeySpec):
|
||||
var any types.Any
|
||||
if err := proto.Unmarshal(v, &any); err != nil {
|
||||
return err
|
||||
}
|
||||
container.Spec = &any
|
||||
case string(bucketKeySnapshotKey):
|
||||
container.SnapshotKey = string(v)
|
||||
case string(bucketKeySnapshotter):
|
||||
container.Snapshotter = string(v)
|
||||
case string(bucketKeyExtensions):
|
||||
ebkt := bkt.Bucket(bucketKeyExtensions)
|
||||
if ebkt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
extensions := make(map[string]types.Any)
|
||||
if err := ebkt.ForEach(func(k, v []byte) error {
|
||||
var a types.Any
|
||||
if err := proto.Unmarshal(v, &a); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
extensions[string(k)] = a
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
container.Extensions = extensions
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func writeContainer(bkt *bolt.Bucket, container *containers.Container) error {
|
||||
if err := boltutil.WriteTimestamps(bkt, container.CreatedAt, container.UpdatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.Spec != nil {
|
||||
spec, err := container.Spec.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := bkt.Put(bucketKeySpec, spec); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range [][2][]byte{
|
||||
{bucketKeyImage, []byte(container.Image)},
|
||||
{bucketKeySnapshotter, []byte(container.Snapshotter)},
|
||||
{bucketKeySnapshotKey, []byte(container.SnapshotKey)},
|
||||
} {
|
||||
if err := bkt.Put(v[0], v[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if rbkt := bkt.Bucket(bucketKeyRuntime); rbkt != nil {
|
||||
if err := bkt.DeleteBucket(bucketKeyRuntime); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
rbkt, err := bkt.CreateBucket(bucketKeyRuntime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := rbkt.Put(bucketKeyName, []byte(container.Runtime.Name)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(container.Extensions) > 0 {
|
||||
ebkt, err := bkt.CreateBucketIfNotExists(bucketKeyExtensions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for name, ext := range container.Extensions {
|
||||
p, err := proto.Marshal(&ext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ebkt.Put([]byte(name), p); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if container.Runtime.Options != nil {
|
||||
data, err := proto.Marshal(container.Runtime.Options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := rbkt.Put(bucketKeyOptions, data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return boltutil.WriteLabels(bkt, container.Labels)
|
||||
}
|
891
vendor/github.com/containerd/containerd/metadata/content.go
generated
vendored
Normal file
891
vendor/github.com/containerd/containerd/metadata/content.go
generated
vendored
Normal file
@@ -0,0 +1,891 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/metadata/boltutil"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type contentStore struct {
|
||||
content.Store
|
||||
db *DB
|
||||
shared bool
|
||||
l sync.RWMutex
|
||||
}
|
||||
|
||||
// newContentStore returns a namespaced content store using an existing
|
||||
// content store interface.
|
||||
// policy defines the sharing behavior for content between namespaces. Both
|
||||
// modes will result in shared storage in the backend for committed. Choose
|
||||
// "shared" to prevent separate namespaces from having to pull the same content
|
||||
// twice. Choose "isolated" if the content must not be shared between
|
||||
// namespaces.
|
||||
//
|
||||
// If the policy is "shared", writes will try to resolve the "expected" digest
|
||||
// against the backend, allowing imports of content from other namespaces. In
|
||||
// "isolated" mode, the client must prove they have the content by providing
|
||||
// the entire blob before the content can be added to another namespace.
|
||||
//
|
||||
// Since we have only two policies right now, it's simpler using bool to
|
||||
// represent it internally.
|
||||
func newContentStore(db *DB, shared bool, cs content.Store) *contentStore {
|
||||
return &contentStore{
|
||||
Store: cs,
|
||||
db: db,
|
||||
shared: shared,
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *contentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return content.Info{}, err
|
||||
}
|
||||
|
||||
var info content.Info
|
||||
if err := view(ctx, cs.db, func(tx *bolt.Tx) error {
|
||||
bkt := getBlobBucket(tx, ns, dgst)
|
||||
if bkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst)
|
||||
}
|
||||
|
||||
info.Digest = dgst
|
||||
return readInfo(&info, bkt)
|
||||
}); err != nil {
|
||||
return content.Info{}, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (cs *contentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return content.Info{}, err
|
||||
}
|
||||
|
||||
cs.l.RLock()
|
||||
defer cs.l.RUnlock()
|
||||
|
||||
updated := content.Info{
|
||||
Digest: info.Digest,
|
||||
}
|
||||
if err := update(ctx, cs.db, func(tx *bolt.Tx) error {
|
||||
bkt := getBlobBucket(tx, ns, info.Digest)
|
||||
if bkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", info.Digest)
|
||||
}
|
||||
|
||||
if err := readInfo(&updated, bkt); err != nil {
|
||||
return errors.Wrapf(err, "info %q", info.Digest)
|
||||
}
|
||||
|
||||
if len(fieldpaths) > 0 {
|
||||
for _, path := range fieldpaths {
|
||||
if strings.HasPrefix(path, "labels.") {
|
||||
if updated.Labels == nil {
|
||||
updated.Labels = map[string]string{}
|
||||
}
|
||||
|
||||
key := strings.TrimPrefix(path, "labels.")
|
||||
updated.Labels[key] = info.Labels[key]
|
||||
continue
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "labels":
|
||||
updated.Labels = info.Labels
|
||||
default:
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Set mutable fields
|
||||
updated.Labels = info.Labels
|
||||
}
|
||||
if err := validateInfo(&updated); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updated.UpdatedAt = time.Now().UTC()
|
||||
return writeInfo(&updated, bkt)
|
||||
}); err != nil {
|
||||
return content.Info{}, err
|
||||
}
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
func (cs *contentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Batch results to keep from reading all info into memory
|
||||
var infos []content.Info
|
||||
if err := view(ctx, cs.db, func(tx *bolt.Tx) error {
|
||||
bkt := getBlobsBucket(tx, ns)
|
||||
if bkt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return bkt.ForEach(func(k, v []byte) error {
|
||||
dgst, err := digest.Parse(string(k))
|
||||
if err != nil {
|
||||
// Not a digest, skip
|
||||
return nil
|
||||
}
|
||||
bbkt := bkt.Bucket(k)
|
||||
if bbkt == nil {
|
||||
return nil
|
||||
}
|
||||
info := content.Info{
|
||||
Digest: dgst,
|
||||
}
|
||||
if err := readInfo(&info, bkt.Bucket(k)); err != nil {
|
||||
return err
|
||||
}
|
||||
if filter.Match(adaptContentInfo(info)) {
|
||||
infos = append(infos, info)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, info := range infos {
|
||||
if err := fn(info); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *contentStore) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cs.l.RLock()
|
||||
defer cs.l.RUnlock()
|
||||
|
||||
return update(ctx, cs.db, func(tx *bolt.Tx) error {
|
||||
bkt := getBlobBucket(tx, ns, dgst)
|
||||
if bkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst)
|
||||
}
|
||||
|
||||
if err := getBlobsBucket(tx, ns).DeleteBucket([]byte(dgst.String())); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := removeContentLease(ctx, tx, dgst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Mark content store as dirty for triggering garbage collection
|
||||
atomic.AddUint32(&cs.db.dirty, 1)
|
||||
cs.db.dirtyCS = true
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (cs *contentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
brefs := map[string]string{}
|
||||
if err := view(ctx, cs.db, func(tx *bolt.Tx) error {
|
||||
bkt := getIngestsBucket(tx, ns)
|
||||
if bkt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return bkt.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
// TODO(dmcgowan): match name and potentially labels here
|
||||
brefs[string(k)] = string(bkt.Bucket(k).Get(bucketKeyRef))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
statuses := make([]content.Status, 0, len(brefs))
|
||||
for k, bref := range brefs {
|
||||
status, err := cs.Store.Status(ctx, bref)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
status.Ref = k
|
||||
|
||||
if filter.Match(adaptContentStatus(status)) {
|
||||
statuses = append(statuses, status)
|
||||
}
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
|
||||
}
|
||||
|
||||
func getRef(tx *bolt.Tx, ns, ref string) string {
|
||||
bkt := getIngestBucket(tx, ns, ref)
|
||||
if bkt == nil {
|
||||
return ""
|
||||
}
|
||||
v := bkt.Get(bucketKeyRef)
|
||||
if len(v) == 0 {
|
||||
return ""
|
||||
}
|
||||
return string(v)
|
||||
}
|
||||
|
||||
func (cs *contentStore) Status(ctx context.Context, ref string) (content.Status, error) {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return content.Status{}, err
|
||||
}
|
||||
|
||||
var bref string
|
||||
if err := view(ctx, cs.db, func(tx *bolt.Tx) error {
|
||||
bref = getRef(tx, ns, ref)
|
||||
if bref == "" {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref)
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return content.Status{}, err
|
||||
}
|
||||
|
||||
st, err := cs.Store.Status(ctx, bref)
|
||||
if err != nil {
|
||||
return content.Status{}, err
|
||||
}
|
||||
st.Ref = ref
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (cs *contentStore) Abort(ctx context.Context, ref string) error {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cs.l.RLock()
|
||||
defer cs.l.RUnlock()
|
||||
|
||||
return update(ctx, cs.db, func(tx *bolt.Tx) error {
|
||||
ibkt := getIngestsBucket(tx, ns)
|
||||
if ibkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref)
|
||||
}
|
||||
bkt := ibkt.Bucket([]byte(ref))
|
||||
if bkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref)
|
||||
}
|
||||
bref := string(bkt.Get(bucketKeyRef))
|
||||
if bref == "" {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref)
|
||||
}
|
||||
expected := string(bkt.Get(bucketKeyExpected))
|
||||
if err := ibkt.DeleteBucket([]byte(ref)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := removeIngestLease(ctx, tx, ref); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if not shared content, delete active ingest on backend
|
||||
if expected == "" {
|
||||
return cs.Store.Abort(ctx, bref)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (cs *contentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
||||
var wOpts content.WriterOpts
|
||||
for _, opt := range opts {
|
||||
if err := opt(&wOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// TODO(AkihiroSuda): we could create a random string or one calculated based on the context
|
||||
// https://github.com/containerd/containerd/issues/2129#issuecomment-380255019
|
||||
if wOpts.Ref == "" {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
|
||||
}
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cs.l.RLock()
|
||||
defer cs.l.RUnlock()
|
||||
|
||||
var (
|
||||
w content.Writer
|
||||
exists bool
|
||||
bref string
|
||||
)
|
||||
if err := update(ctx, cs.db, func(tx *bolt.Tx) error {
|
||||
var shared bool
|
||||
if wOpts.Desc.Digest != "" {
|
||||
cbkt := getBlobBucket(tx, ns, wOpts.Desc.Digest)
|
||||
if cbkt != nil {
|
||||
// Add content to lease to prevent other reference removals
|
||||
// from effecting this object during a provided lease
|
||||
if err := addContentLease(ctx, tx, wOpts.Desc.Digest); err != nil {
|
||||
return errors.Wrap(err, "unable to lease content")
|
||||
}
|
||||
// Return error outside of transaction to ensure
|
||||
// commit succeeds with the lease.
|
||||
exists = true
|
||||
return nil
|
||||
}
|
||||
|
||||
if cs.shared {
|
||||
if st, err := cs.Store.Info(ctx, wOpts.Desc.Digest); err == nil {
|
||||
// Ensure the expected size is the same, it is likely
|
||||
// an error if the size is mismatched but the caller
|
||||
// must resolve this on commit
|
||||
if wOpts.Desc.Size == 0 || wOpts.Desc.Size == st.Size {
|
||||
shared = true
|
||||
wOpts.Desc.Size = st.Size
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bkt, err := createIngestBucket(tx, ns, wOpts.Ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
leased, err := addIngestLease(ctx, tx, wOpts.Ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
brefb := bkt.Get(bucketKeyRef)
|
||||
if brefb == nil {
|
||||
sid, err := bkt.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bref = createKey(sid, ns, wOpts.Ref)
|
||||
if err := bkt.Put(bucketKeyRef, []byte(bref)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
bref = string(brefb)
|
||||
}
|
||||
if !leased {
|
||||
// Add timestamp to allow aborting once stale
|
||||
// When lease is set the ingest should be aborted
|
||||
// after lease it belonged to is deleted.
|
||||
// Expiration can be configurable in the future to
|
||||
// give more control to the daemon, however leases
|
||||
// already give users more control of expiration.
|
||||
expireAt := time.Now().UTC().Add(24 * time.Hour)
|
||||
if err := writeExpireAt(expireAt, bkt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if shared {
|
||||
if err := bkt.Put(bucketKeyExpected, []byte(wOpts.Desc.Digest)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Do not use the passed in expected value here since it was
|
||||
// already checked against the user metadata. The content must
|
||||
// be committed in the namespace before it will be seen as
|
||||
// available in the current namespace.
|
||||
desc := wOpts.Desc
|
||||
desc.Digest = ""
|
||||
w, err = cs.Store.Writer(ctx, content.WithRef(bref), content.WithDescriptor(desc))
|
||||
}
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", wOpts.Desc.Digest)
|
||||
}
|
||||
|
||||
return &namespacedWriter{
|
||||
ctx: ctx,
|
||||
ref: wOpts.Ref,
|
||||
namespace: ns,
|
||||
db: cs.db,
|
||||
provider: cs.Store,
|
||||
l: &cs.l,
|
||||
w: w,
|
||||
bref: bref,
|
||||
started: time.Now(),
|
||||
desc: wOpts.Desc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type namespacedWriter struct {
|
||||
ctx context.Context
|
||||
ref string
|
||||
namespace string
|
||||
db transactor
|
||||
provider interface {
|
||||
content.Provider
|
||||
content.Ingester
|
||||
}
|
||||
l *sync.RWMutex
|
||||
|
||||
w content.Writer
|
||||
|
||||
bref string
|
||||
started time.Time
|
||||
desc ocispec.Descriptor
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) Close() error {
|
||||
if nw.w != nil {
|
||||
return nw.w.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) Write(p []byte) (int, error) {
|
||||
// if no writer, first copy and unshare before performing write
|
||||
if nw.w == nil {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if err := nw.createAndCopy(nw.ctx, nw.desc); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return nw.w.Write(p)
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) Digest() digest.Digest {
|
||||
if nw.w != nil {
|
||||
return nw.w.Digest()
|
||||
}
|
||||
return nw.desc.Digest
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) Truncate(size int64) error {
|
||||
if nw.w != nil {
|
||||
return nw.w.Truncate(size)
|
||||
}
|
||||
desc := nw.desc
|
||||
desc.Size = size
|
||||
desc.Digest = ""
|
||||
return nw.createAndCopy(nw.ctx, desc)
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) createAndCopy(ctx context.Context, desc ocispec.Descriptor) error {
|
||||
nwDescWithoutDigest := desc
|
||||
nwDescWithoutDigest.Digest = ""
|
||||
w, err := nw.provider.Writer(ctx, content.WithRef(nw.bref), content.WithDescriptor(nwDescWithoutDigest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if desc.Size > 0 {
|
||||
ra, err := nw.provider.ReaderAt(ctx, nw.desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
if err := content.CopyReaderAt(w, ra, desc.Size); err != nil {
|
||||
nw.w.Close()
|
||||
nw.w = nil
|
||||
return err
|
||||
}
|
||||
}
|
||||
nw.w = w
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
ctx = namespaces.WithNamespace(ctx, nw.namespace)
|
||||
|
||||
nw.l.RLock()
|
||||
defer nw.l.RUnlock()
|
||||
|
||||
var innerErr error
|
||||
|
||||
if err := update(ctx, nw.db, func(tx *bolt.Tx) error {
|
||||
dgst, err := nw.commit(ctx, tx, size, expected, opts...)
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
innerErr = err
|
||||
}
|
||||
bkt := getIngestsBucket(tx, nw.namespace)
|
||||
if bkt != nil {
|
||||
if err := bkt.DeleteBucket([]byte(nw.ref)); err != nil && err != bolt.ErrBucketNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := removeIngestLease(ctx, tx, nw.ref); err != nil {
|
||||
return err
|
||||
}
|
||||
return addContentLease(ctx, tx, dgst)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return innerErr
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, expected digest.Digest, opts ...content.Opt) (digest.Digest, error) {
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
if nw.w != nil {
|
||||
nw.w.Close()
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
if err := validateInfo(&base); err != nil {
|
||||
if nw.w != nil {
|
||||
nw.w.Close()
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
var actual digest.Digest
|
||||
if nw.w == nil {
|
||||
if size != 0 && size != nw.desc.Size {
|
||||
return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, nw.desc.Size, size)
|
||||
}
|
||||
if expected != "" && expected != nw.desc.Digest {
|
||||
return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q unexpected digest", nw.ref)
|
||||
}
|
||||
size = nw.desc.Size
|
||||
actual = nw.desc.Digest
|
||||
} else {
|
||||
status, err := nw.w.Status()
|
||||
if err != nil {
|
||||
nw.w.Close()
|
||||
return "", err
|
||||
}
|
||||
if size != 0 && size != status.Offset {
|
||||
nw.w.Close()
|
||||
return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "%q failed size validation: %v != %v", nw.ref, status.Offset, size)
|
||||
}
|
||||
size = status.Offset
|
||||
|
||||
if err := nw.w.Commit(ctx, size, expected); err != nil && !errdefs.IsAlreadyExists(err) {
|
||||
return "", err
|
||||
}
|
||||
actual = nw.w.Digest()
|
||||
}
|
||||
|
||||
bkt, err := createBlobBucket(tx, nw.namespace, actual)
|
||||
if err != nil {
|
||||
if err == bolt.ErrBucketExists {
|
||||
return actual, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
commitTime := time.Now().UTC()
|
||||
|
||||
sizeEncoded, err := encodeInt(size)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := boltutil.WriteTimestamps(bkt, commitTime, commitTime); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := boltutil.WriteLabels(bkt, base.Labels); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return actual, bkt.Put(bucketKeySize, sizeEncoded)
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) Status() (st content.Status, err error) {
|
||||
if nw.w != nil {
|
||||
st, err = nw.w.Status()
|
||||
} else {
|
||||
st.Offset = nw.desc.Size
|
||||
st.Total = nw.desc.Size
|
||||
st.StartedAt = nw.started
|
||||
st.UpdatedAt = nw.started
|
||||
st.Expected = nw.desc.Digest
|
||||
}
|
||||
if err == nil {
|
||||
st.Ref = nw.ref
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (cs *contentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
if err := cs.checkAccess(ctx, desc.Digest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cs.Store.ReaderAt(ctx, desc)
|
||||
}
|
||||
|
||||
func (cs *contentStore) checkAccess(ctx context.Context, dgst digest.Digest) error {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return view(ctx, cs.db, func(tx *bolt.Tx) error {
|
||||
bkt := getBlobBucket(tx, ns, dgst)
|
||||
if bkt == nil {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func validateInfo(info *content.Info) error {
|
||||
for k, v := range info.Labels {
|
||||
if err := labels.Validate(k, v); err == nil {
|
||||
return errors.Wrapf(err, "info.Labels")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readInfo(info *content.Info, bkt *bolt.Bucket) error {
|
||||
if err := boltutil.ReadTimestamps(bkt, &info.CreatedAt, &info.UpdatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labels, err := boltutil.ReadLabels(bkt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.Labels = labels
|
||||
|
||||
if v := bkt.Get(bucketKeySize); len(v) > 0 {
|
||||
info.Size, _ = binary.Varint(v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeInfo(info *content.Info, bkt *bolt.Bucket) error {
|
||||
if err := boltutil.WriteTimestamps(bkt, info.CreatedAt, info.UpdatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := boltutil.WriteLabels(bkt, info.Labels); err != nil {
|
||||
return errors.Wrapf(err, "writing labels for info %v", info.Digest)
|
||||
}
|
||||
|
||||
// Write size
|
||||
sizeEncoded, err := encodeInt(info.Size)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bkt.Put(bucketKeySize, sizeEncoded)
|
||||
}
|
||||
|
||||
func readExpireAt(bkt *bolt.Bucket) (*time.Time, error) {
|
||||
v := bkt.Get(bucketKeyExpireAt)
|
||||
if v == nil {
|
||||
return nil, nil
|
||||
}
|
||||
t := &time.Time{}
|
||||
if err := t.UnmarshalBinary(v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func writeExpireAt(expire time.Time, bkt *bolt.Bucket) error {
|
||||
expireAt, err := expire.MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bkt.Put(bucketKeyExpireAt, expireAt)
|
||||
}
|
||||
|
||||
func (cs *contentStore) garbageCollect(ctx context.Context) (d time.Duration, err error) {
|
||||
cs.l.Lock()
|
||||
t1 := time.Now()
|
||||
defer func() {
|
||||
if err == nil {
|
||||
d = time.Since(t1)
|
||||
}
|
||||
cs.l.Unlock()
|
||||
}()
|
||||
|
||||
contentSeen := map[string]struct{}{}
|
||||
ingestSeen := map[string]struct{}{}
|
||||
if err := cs.db.View(func(tx *bolt.Tx) error {
|
||||
v1bkt := tx.Bucket(bucketKeyVersion)
|
||||
if v1bkt == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// iterate through each namespace
|
||||
v1c := v1bkt.Cursor()
|
||||
|
||||
for k, v := v1c.First(); k != nil; k, v = v1c.Next() {
|
||||
if v != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
cbkt := v1bkt.Bucket(k).Bucket(bucketKeyObjectContent)
|
||||
if cbkt == nil {
|
||||
continue
|
||||
}
|
||||
bbkt := cbkt.Bucket(bucketKeyObjectBlob)
|
||||
if bbkt != nil {
|
||||
if err := bbkt.ForEach(func(ck, cv []byte) error {
|
||||
if cv == nil {
|
||||
contentSeen[string(ck)] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ibkt := cbkt.Bucket(bucketKeyObjectIngests)
|
||||
if ibkt != nil {
|
||||
if err := ibkt.ForEach(func(ref, v []byte) error {
|
||||
if v == nil {
|
||||
bkt := ibkt.Bucket(ref)
|
||||
// expected here may be from a different namespace
|
||||
// so much be explicitly retained from the ingest
|
||||
// in case it was removed from the other namespace
|
||||
expected := bkt.Get(bucketKeyExpected)
|
||||
if len(expected) > 0 {
|
||||
contentSeen[string(expected)] = struct{}{}
|
||||
}
|
||||
bref := bkt.Get(bucketKeyRef)
|
||||
if len(bref) > 0 {
|
||||
ingestSeen[string(bref)] = struct{}{}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = cs.Store.Walk(ctx, func(info content.Info) error {
|
||||
if _, ok := contentSeen[info.Digest.String()]; !ok {
|
||||
if err := cs.Store.Delete(ctx, info.Digest); err != nil {
|
||||
return err
|
||||
}
|
||||
log.G(ctx).WithField("digest", info.Digest).Debug("removed content")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If the content store has implemented a more efficient walk function
|
||||
// then use that else fallback to reading all statuses which may
|
||||
// cause reading of unneeded metadata.
|
||||
type statusWalker interface {
|
||||
WalkStatusRefs(context.Context, func(string) error) error
|
||||
}
|
||||
if w, ok := cs.Store.(statusWalker); ok {
|
||||
err = w.WalkStatusRefs(ctx, func(ref string) error {
|
||||
if _, ok := ingestSeen[ref]; !ok {
|
||||
if err := cs.Store.Abort(ctx, ref); err != nil {
|
||||
return err
|
||||
}
|
||||
log.G(ctx).WithField("ref", ref).Debug("cleanup aborting ingest")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
var statuses []content.Status
|
||||
statuses, err = cs.Store.ListStatuses(ctx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, status := range statuses {
|
||||
if _, ok := ingestSeen[status.Ref]; !ok {
|
||||
if err = cs.Store.Abort(ctx, status.Ref); err != nil {
|
||||
return
|
||||
}
|
||||
log.G(ctx).WithField("ref", status.Ref).Debug("cleanup aborting ingest")
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
444
vendor/github.com/containerd/containerd/metadata/db.go
generated
vendored
Normal file
444
vendor/github.com/containerd/containerd/metadata/db.go
generated
vendored
Normal file
@@ -0,0 +1,444 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/gc"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
// schemaVersion represents the schema version of
|
||||
// the database. This schema version represents the
|
||||
// structure of the data in the database. The schema
|
||||
// can envolve at any time but any backwards
|
||||
// incompatible changes or structural changes require
|
||||
// bumping the schema version.
|
||||
schemaVersion = "v1"
|
||||
|
||||
// dbVersion represents updates to the schema
|
||||
// version which are additions and compatible with
|
||||
// prior version of the same schema.
|
||||
dbVersion = 3
|
||||
)
|
||||
|
||||
// DBOpt configures how we set up the DB
|
||||
type DBOpt func(*dbOptions)
|
||||
|
||||
// WithPolicyIsolated isolates contents between namespaces
|
||||
func WithPolicyIsolated(o *dbOptions) {
|
||||
o.shared = false
|
||||
}
|
||||
|
||||
// dbOptions configure db options.
|
||||
type dbOptions struct {
|
||||
shared bool
|
||||
}
|
||||
|
||||
// DB represents a metadata database backed by a bolt
|
||||
// database. The database is fully namespaced and stores
|
||||
// image, container, namespace, snapshot, and content data
|
||||
// while proxying data shared across namespaces to backend
|
||||
// datastores for content and snapshots.
|
||||
type DB struct {
|
||||
db *bolt.DB
|
||||
ss map[string]*snapshotter
|
||||
cs *contentStore
|
||||
|
||||
// wlock is used to protect access to the data structures during garbage
|
||||
// collection. While the wlock is held no writable transactions can be
|
||||
// opened, preventing changes from occurring between the mark and
|
||||
// sweep phases without preventing read transactions.
|
||||
wlock sync.RWMutex
|
||||
|
||||
// dirty flag indicates that references have been removed which require
|
||||
// a garbage collection to ensure the database is clean. This tracks
|
||||
// the number of dirty operations. This should be updated and read
|
||||
// atomically if outside of wlock.Lock.
|
||||
dirty uint32
|
||||
|
||||
// dirtySS and dirtyCS flags keeps track of datastores which have had
|
||||
// deletions since the last garbage collection. These datastores will
|
||||
// be garbage collected during the next garbage collection. These
|
||||
// should only be updated inside of a write transaction or wlock.Lock.
|
||||
dirtySS map[string]struct{}
|
||||
dirtyCS bool
|
||||
|
||||
// mutationCallbacks are called after each mutation with the flag
|
||||
// set indicating whether any dirty flags are set
|
||||
mutationCallbacks []func(bool)
|
||||
|
||||
dbopts dbOptions
|
||||
}
|
||||
|
||||
// NewDB creates a new metadata database using the provided
|
||||
// bolt database, content store, and snapshotters.
|
||||
func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshots.Snapshotter, opts ...DBOpt) *DB {
|
||||
m := &DB{
|
||||
db: db,
|
||||
ss: make(map[string]*snapshotter, len(ss)),
|
||||
dirtySS: map[string]struct{}{},
|
||||
dbopts: dbOptions{
|
||||
shared: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(&m.dbopts)
|
||||
}
|
||||
|
||||
// Initialize data stores
|
||||
m.cs = newContentStore(m, m.dbopts.shared, cs)
|
||||
for name, sn := range ss {
|
||||
m.ss[name] = newSnapshotter(m, name, sn)
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
// Init ensures the database is at the correct version
|
||||
// and performs any needed migrations.
|
||||
func (m *DB) Init(ctx context.Context) error {
|
||||
// errSkip is used when no migration or version needs to be written
|
||||
// to the database and the transaction can be immediately rolled
|
||||
// back rather than performing a much slower and unnecessary commit.
|
||||
var errSkip = errors.New("skip update")
|
||||
|
||||
err := m.db.Update(func(tx *bolt.Tx) error {
|
||||
var (
|
||||
// current schema and version
|
||||
schema = "v0"
|
||||
version = 0
|
||||
)
|
||||
|
||||
// i represents the index of the first migration
|
||||
// which must be run to get the database up to date.
|
||||
// The migration's version will be checked in reverse
|
||||
// order, decrementing i for each migration which
|
||||
// represents a version newer than the current
|
||||
// database version
|
||||
i := len(migrations)
|
||||
|
||||
for ; i > 0; i-- {
|
||||
migration := migrations[i-1]
|
||||
|
||||
bkt := tx.Bucket([]byte(migration.schema))
|
||||
if bkt == nil {
|
||||
// Hasn't encountered another schema, go to next migration
|
||||
if schema == "v0" {
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if schema == "v0" {
|
||||
schema = migration.schema
|
||||
vb := bkt.Get(bucketKeyDBVersion)
|
||||
if vb != nil {
|
||||
v, _ := binary.Varint(vb)
|
||||
version = int(v)
|
||||
}
|
||||
}
|
||||
|
||||
if version >= migration.version {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Previous version of database found
|
||||
if schema != "v0" {
|
||||
updates := migrations[i:]
|
||||
|
||||
// No migration updates, return immediately
|
||||
if len(updates) == 0 {
|
||||
return errSkip
|
||||
}
|
||||
|
||||
for _, m := range updates {
|
||||
t0 := time.Now()
|
||||
if err := m.migrate(tx); err != nil {
|
||||
return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version)
|
||||
}
|
||||
log.G(ctx).WithField("d", time.Since(t0)).Debugf("finished database migration to %s.%d", m.schema, m.version)
|
||||
}
|
||||
}
|
||||
|
||||
bkt, err := tx.CreateBucketIfNotExists(bucketKeyVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
versionEncoded, err := encodeInt(dbVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bkt.Put(bucketKeyDBVersion, versionEncoded)
|
||||
})
|
||||
if err == errSkip {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ContentStore returns a namespaced content store
|
||||
// proxied to a content store.
|
||||
func (m *DB) ContentStore() content.Store {
|
||||
if m.cs == nil {
|
||||
return nil
|
||||
}
|
||||
return m.cs
|
||||
}
|
||||
|
||||
// Snapshotter returns a namespaced content store for
|
||||
// the requested snapshotter name proxied to a snapshotter.
|
||||
func (m *DB) Snapshotter(name string) snapshots.Snapshotter {
|
||||
sn, ok := m.ss[name]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return sn
|
||||
}
|
||||
|
||||
// Snapshotters returns all available snapshotters.
|
||||
func (m *DB) Snapshotters() map[string]snapshots.Snapshotter {
|
||||
ss := make(map[string]snapshots.Snapshotter, len(m.ss))
|
||||
for n, sn := range m.ss {
|
||||
ss[n] = sn
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// View runs a readonly transaction on the metadata store.
|
||||
func (m *DB) View(fn func(*bolt.Tx) error) error {
|
||||
return m.db.View(fn)
|
||||
}
|
||||
|
||||
// Update runs a writable transaction on the metadata store.
|
||||
func (m *DB) Update(fn func(*bolt.Tx) error) error {
|
||||
m.wlock.RLock()
|
||||
defer m.wlock.RUnlock()
|
||||
err := m.db.Update(fn)
|
||||
if err == nil {
|
||||
dirty := atomic.LoadUint32(&m.dirty) > 0
|
||||
for _, fn := range m.mutationCallbacks {
|
||||
fn(dirty)
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// RegisterMutationCallback registers a function to be called after a metadata
|
||||
// mutations has been performed.
|
||||
//
|
||||
// The callback function is an argument for whether a deletion has occurred
|
||||
// since the last garbage collection.
|
||||
func (m *DB) RegisterMutationCallback(fn func(bool)) {
|
||||
m.wlock.Lock()
|
||||
m.mutationCallbacks = append(m.mutationCallbacks, fn)
|
||||
m.wlock.Unlock()
|
||||
}
|
||||
|
||||
// GCStats holds the duration for the different phases of the garbage collector
|
||||
type GCStats struct {
|
||||
MetaD time.Duration
|
||||
ContentD time.Duration
|
||||
SnapshotD map[string]time.Duration
|
||||
}
|
||||
|
||||
// Elapsed returns the duration which elapsed during a collection
|
||||
func (s GCStats) Elapsed() time.Duration {
|
||||
return s.MetaD
|
||||
}
|
||||
|
||||
// GarbageCollect starts garbage collection
|
||||
func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) {
|
||||
m.wlock.Lock()
|
||||
t1 := time.Now()
|
||||
|
||||
marked, err := m.getMarked(ctx)
|
||||
if err != nil {
|
||||
m.wlock.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := m.db.Update(func(tx *bolt.Tx) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
rm := func(ctx context.Context, n gc.Node) error {
|
||||
if _, ok := marked[n]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
if n.Type == ResourceSnapshot {
|
||||
if idx := strings.IndexRune(n.Key, '/'); idx > 0 {
|
||||
m.dirtySS[n.Key[:idx]] = struct{}{}
|
||||
}
|
||||
} else if n.Type == ResourceContent || n.Type == ResourceIngest {
|
||||
m.dirtyCS = true
|
||||
}
|
||||
return remove(ctx, tx, n)
|
||||
}
|
||||
|
||||
if err := scanAll(ctx, tx, rm); err != nil {
|
||||
return errors.Wrap(err, "failed to scan and remove")
|
||||
}
|
||||
|
||||
return nil
|
||||
}); err != nil {
|
||||
m.wlock.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var stats GCStats
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// reset dirty, no need for atomic inside of wlock.Lock
|
||||
m.dirty = 0
|
||||
|
||||
if len(m.dirtySS) > 0 {
|
||||
var sl sync.Mutex
|
||||
stats.SnapshotD = map[string]time.Duration{}
|
||||
wg.Add(len(m.dirtySS))
|
||||
for snapshotterName := range m.dirtySS {
|
||||
log.G(ctx).WithField("snapshotter", snapshotterName).Debug("schedule snapshotter cleanup")
|
||||
go func(snapshotterName string) {
|
||||
st1 := time.Now()
|
||||
m.cleanupSnapshotter(snapshotterName)
|
||||
|
||||
sl.Lock()
|
||||
stats.SnapshotD[snapshotterName] = time.Since(st1)
|
||||
sl.Unlock()
|
||||
|
||||
wg.Done()
|
||||
}(snapshotterName)
|
||||
}
|
||||
m.dirtySS = map[string]struct{}{}
|
||||
}
|
||||
|
||||
if m.dirtyCS {
|
||||
wg.Add(1)
|
||||
log.G(ctx).Debug("schedule content cleanup")
|
||||
go func() {
|
||||
ct1 := time.Now()
|
||||
m.cleanupContent()
|
||||
stats.ContentD = time.Since(ct1)
|
||||
wg.Done()
|
||||
}()
|
||||
m.dirtyCS = false
|
||||
}
|
||||
|
||||
stats.MetaD = time.Since(t1)
|
||||
m.wlock.Unlock()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
return stats, err
|
||||
}
|
||||
|
||||
func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) {
|
||||
var marked map[gc.Node]struct{}
|
||||
if err := m.db.View(func(tx *bolt.Tx) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var (
|
||||
nodes []gc.Node
|
||||
wg sync.WaitGroup
|
||||
roots = make(chan gc.Node)
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for n := range roots {
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
}()
|
||||
// Call roots
|
||||
if err := scanRoots(ctx, tx, roots); err != nil {
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
close(roots)
|
||||
wg.Wait()
|
||||
|
||||
refs := func(n gc.Node) ([]gc.Node, error) {
|
||||
var sn []gc.Node
|
||||
if err := references(ctx, tx, n, func(nn gc.Node) {
|
||||
sn = append(sn, nn)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sn, nil
|
||||
}
|
||||
|
||||
reachable, err := gc.Tricolor(nodes, refs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
marked = reachable
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return marked, nil
|
||||
}
|
||||
|
||||
func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) {
|
||||
ctx := context.Background()
|
||||
sn, ok := m.ss[name]
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
d, err := sn.garbageCollect(ctx)
|
||||
logger := log.G(ctx).WithField("snapshotter", name)
|
||||
if err != nil {
|
||||
logger.WithError(err).Warn("snapshot garbage collection failed")
|
||||
} else {
|
||||
logger.WithField("d", d).Debugf("snapshot garbage collected")
|
||||
}
|
||||
return d, err
|
||||
}
|
||||
|
||||
func (m *DB) cleanupContent() (time.Duration, error) {
|
||||
ctx := context.Background()
|
||||
if m.cs == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
d, err := m.cs.garbageCollect(ctx)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("content garbage collection failed")
|
||||
} else {
|
||||
log.G(ctx).WithField("d", d).Debugf("content garbage collected")
|
||||
}
|
||||
|
||||
return d, err
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user