Merge pull request #3668 from deitch/build-image

This commit is contained in:
Avi Deitcher 2021-05-18 22:36:34 +03:00 committed by GitHub
commit 1096165072
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 177 additions and 139 deletions

View File

@ -6,8 +6,10 @@ import (
"io"
"github.com/containerd/containerd/reference"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/tarball"
lktspec "github.com/linuxkit/linuxkit/src/cmd/linuxkit/spec"
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
)
@ -70,6 +72,28 @@ func (c ImageSource) TarReader() (io.ReadCloser, error) {
return mutate.Extract(image), nil
}
// V1TarReader return an io.ReadCloser to read the image as a v1 tarball
func (c ImageSource) V1TarReader() (io.ReadCloser, error) {
imageName := c.ref.String()
refName, err := name.ParseReference(imageName)
if err != nil {
return nil, fmt.Errorf("error parsing image name: %v", err)
}
// get a reference to the image
image, err := c.provider.findImage(imageName, c.architecture)
if err != nil {
return nil, err
}
// convert the writer to a reader
r, w := io.Pipe()
go func() {
defer w.Close()
tarball.Write(refName, image, w)
}()
return r, nil
}
// Descriptor return the descriptor of the image.
func (c ImageSource) Descriptor() *v1.Descriptor {
return c.descriptor

View File

@ -88,6 +88,16 @@ func Export(container string) (io.ReadCloser, error) {
return responseBody, err
}
// Save save the provided image ref.
func Save(image string) (io.ReadCloser, error) {
log.Debugf("docker save: %s", image)
cli, err := Client()
if err != nil {
return nil, errors.New("could not initialize Docker API client")
}
return cli.ImageSave(context.Background(), []string{image})
}
// Rm remove the given container from docker.
func Rm(container string) error {
log.Debugf("docker rm: %s", container)

View File

@ -77,6 +77,11 @@ func (d ImageSource) TarReader() (io.ReadCloser, error) {
}, nil
}
// V1TarReader return an io.ReadCloser to read the save of the image
func (d ImageSource) V1TarReader() (io.ReadCloser, error) {
return Save(d.ref.String())
}
// Descriptor return the descriptor of the image.
func (d ImageSource) Descriptor() *v1.Descriptor {
return nil

View File

@ -33,24 +33,31 @@ func pkgBuildPush(args []string, withPush bool) {
flags.PrintDefaults()
}
force := flags.Bool("force", false, "Force rebuild")
force := flags.Bool("force", false, "Force rebuild even if image is in local cache")
docker := flags.Bool("docker", false, "Store the built image in the docker image cache instead of the default linuxkit cache")
platforms := flags.String("platforms", "", "Which platforms to build for, defaults to all of those for which the package can be built")
skipPlatforms := flags.String("skip-platforms", "", "Platforms that should be skipped, even if present in build.yml")
builders := flags.String("builders", "", "Which builders to use for which platforms, e.g. linux/arm64=docker-context-arm64, overrides defaults and environment variables, see https://github.com/linuxkit/linuxkit/blob/master/docs/packages.md#Providing-native-builder-nodes")
buildCacheDir := flags.String("cache", defaultLinuxkitCache(), "Directory for storing built image, incompatible with --docker")
// some logic clarification:
// pkg build - always builds unless is in cache
// pkg build --force - always builds even if is in cache
// pkg push - always builds unless is in cache
// pkg push --force - always builds even if is in cache
// pkg push --nobuild - skips build; if not in cache, fails
// pkg push --nobuild --force - nonsensical
var (
release *string
nobuild, manifest, image *bool
imageRef = false
release *string
nobuild, manifest *bool
nobuildRef = false
)
image = &imageRef
nobuild = &nobuildRef
if withPush {
release = flags.String("release", "", "Release the given version")
nobuild = flags.Bool("nobuild", false, "Skip the build")
nobuild = flags.Bool("nobuild", false, "Skip building the image before pushing, conflicts with -force")
manifest = flags.Bool("manifest", true, "Create and push multi-arch manifest")
image = flags.Bool("image", true, "Build and push image for the current platform")
}
pkgs, err := pkglib.NewFromCLI(flags, args...)
@ -59,10 +66,12 @@ func pkgBuildPush(args []string, withPush bool) {
os.Exit(1)
}
var opts []pkglib.BuildOpt
if *image {
opts = append(opts, pkglib.WithBuildImage())
if *nobuild && *force {
fmt.Fprint(os.Stderr, "flags -force and -nobuild conflict")
os.Exit(1)
}
var opts []pkglib.BuildOpt
if *force {
opts = append(opts, pkglib.WithBuildForce())
}

View File

@ -68,14 +68,6 @@ func WithBuildPush() BuildOpt {
}
}
// WithBuildImage builds the image
func WithBuildImage() BuildOpt {
return func(bo *buildOpts) error {
bo.image = true
return nil
}
}
// WithBuildManifest creates a multi-arch manifest for the image
func WithBuildManifest() BuildOpt {
return func(bo *buildOpts) error {
@ -231,7 +223,7 @@ func (p Pkg) Build(bos ...BuildOpt) error {
}
}
if bo.image && !skipBuild {
if !skipBuild {
fmt.Fprintf(writer, "building %s\n", ref)
var (
args []string
@ -297,7 +289,7 @@ func (p Pkg) Build(bos ...BuildOpt) error {
// if requested docker, load the image up
if bo.targetDocker {
cacheSource := c.NewSource(&ref, arch, desc)
reader, err := cacheSource.TarReader()
reader, err := cacheSource.V1TarReader()
if err != nil {
return fmt.Errorf("unable to get reader from cache: %v", err)
}
@ -391,8 +383,6 @@ func (p Pkg) buildArch(d dockerRunner, c lktspec.CacheProvider, arch string, arg
// find the desired builder
builderName := getBuilderForPlatform(arch, bo.builders)
d.setBuildCtx(&buildCtx{sources: p.sources})
// set the target
var (
buildxOutput string
@ -427,10 +417,11 @@ func (p Pkg) buildArch(d dockerRunner, c lktspec.CacheProvider, arch string, arg
})
args = append(args, fmt.Sprintf("--output=%s", buildxOutput))
buildCtx := &buildCtx{sources: p.sources}
platform := fmt.Sprintf("linux/%s", arch)
archArgs := append(args, "--platform")
archArgs = append(archArgs, platform)
if err := d.build(tagArch, p.path, builderName, platform, stdout, archArgs...); err != nil {
if err := d.build(tagArch, p.path, builderName, platform, buildCtx.Reader(), stdout, archArgs...); err != nil {
stdoutCloser()
if strings.Contains(err.Error(), "executor failed running [/dev/.buildkit_qemu_emulator") {
return nil, fmt.Errorf("buildkit was unable to emulate %s. check binfmt has been set up and works for this platform: %v", platform, err)
@ -449,71 +440,90 @@ func (p Pkg) buildArch(d dockerRunner, c lktspec.CacheProvider, arch string, arg
type buildCtx struct {
sources []pkgSource
err error
r io.ReadCloser
}
// Copy iterates over the sources, tars up the content after rewriting the paths.
// Reader gets an io.Reader by iterating over the sources, tarring up the content after rewriting the paths.
// It assumes that sources is sane, ie is well formed and the first part is an absolute path
// and that it exists. NewFromCLI() ensures that.
func (c *buildCtx) Copy(w io.WriteCloser) error {
func (c *buildCtx) Reader() io.ReadCloser {
r, w := io.Pipe()
tw := tar.NewWriter(w)
defer func() {
tw.Close()
w.Close()
}()
for _, s := range c.sources {
log.Debugf("Adding to build context: %s -> %s", s.src, s.dst)
go func() {
defer func() {
tw.Close()
w.Close()
}()
for _, s := range c.sources {
log.Debugf("Adding to build context: %s -> %s", s.src, s.dst)
f := func(p string, i os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("ctx: Walk error on %s: %v", p, err)
}
var link string
if i.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(p)
f := func(p string, i os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("ctx: Failed to read symlink %s: %v", p, err)
return fmt.Errorf("ctx: Walk error on %s: %v", p, err)
}
}
h, err := tar.FileInfoHeader(i, link)
if err != nil {
return fmt.Errorf("ctx: Converting FileInfo for %s: %v", p, err)
}
rel, err := filepath.Rel(s.src, p)
if err != nil {
return err
}
h.Name = filepath.ToSlash(filepath.Join(s.dst, rel))
if err := tw.WriteHeader(h); err != nil {
return fmt.Errorf("ctx: Writing header for %s: %v", p, err)
}
var link string
if i.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(p)
if err != nil {
return fmt.Errorf("ctx: Failed to read symlink %s: %v", p, err)
}
}
if !i.Mode().IsRegular() {
h, err := tar.FileInfoHeader(i, link)
if err != nil {
return fmt.Errorf("ctx: Converting FileInfo for %s: %v", p, err)
}
rel, err := filepath.Rel(s.src, p)
if err != nil {
return err
}
h.Name = filepath.ToSlash(filepath.Join(s.dst, rel))
if err := tw.WriteHeader(h); err != nil {
return fmt.Errorf("ctx: Writing header for %s: %v", p, err)
}
if !i.Mode().IsRegular() {
return nil
}
f, err := os.Open(p)
if err != nil {
return fmt.Errorf("ctx: Open %s: %v", p, err)
}
defer f.Close()
_, err = io.Copy(tw, f)
if err != nil {
return fmt.Errorf("ctx: Writing %s: %v", p, err)
}
return nil
}
f, err := os.Open(p)
if err != nil {
return fmt.Errorf("ctx: Open %s: %v", p, err)
if err := filepath.Walk(s.src, f); err != nil {
c.err = err
return
}
defer f.Close()
_, err = io.Copy(tw, f)
if err != nil {
return fmt.Errorf("ctx: Writing %s: %v", p, err)
}
return nil
}
}()
c.r = r
return c
}
if err := filepath.Walk(s.src, f); err != nil {
return err
}
// Read wraps the usual read, but allows us to include an error
func (c *buildCtx) Read(data []byte) (n int, err error) {
if c.err != nil {
return 0, err
}
return c.r.Read(data)
}
return nil
// Close wraps the usual close
func (c *buildCtx) Close() error {
return c.r.Close()
}
// getBuilderForPlatform given an arch, find the context for the desired builder.

View File

@ -25,7 +25,6 @@ type dockerMocker struct {
enableTag bool
enableBuild bool
enablePull bool
ctx buildContext
fixedReadName string
builds []buildLog
}
@ -51,7 +50,7 @@ func (d *dockerMocker) tag(ref, tag string) error {
d.images[tag] = d.images[ref]
return nil
}
func (d *dockerMocker) build(tag, pkg, dockerContext, platform string, stdout io.Writer, opts ...string) error {
func (d *dockerMocker) build(tag, pkg, dockerContext, platform string, stdin io.Reader, stdout io.Writer, opts ...string) error {
if !d.enableBuild {
return errors.New("build disabled")
}
@ -86,10 +85,6 @@ func (d *dockerMocker) pull(img string) (bool, error) {
}
return false, errors.New("failed to pull")
}
func (d *dockerMocker) setBuildCtx(ctx buildContext) {
d.ctx = ctx
}
type cacheMocker struct {
enablePush bool
@ -275,6 +270,9 @@ func (c cacheMockerSource) Config() (imagespec.ImageConfig, error) {
func (c cacheMockerSource) TarReader() (io.ReadCloser, error) {
return nil, errors.New("unsupported")
}
func (c cacheMockerSource) V1TarReader() (io.ReadCloser, error) {
return nil, errors.New("unsupported")
}
func (c cacheMockerSource) Descriptor() *registry.Descriptor {
return c.descriptor
}
@ -303,9 +301,9 @@ func TestBuild(t *testing.T) {
{"no build cache", Pkg{org: "foo", image: "bar", hash: "abc", arches: []string{"amd64"}, commitHash: "HEAD"}, nil, []string{"amd64"}, &dockerMocker{supportBuildKit: false}, &cacheMocker{}, "must provide linuxkit build cache"},
{"unsupported buildkit", Pkg{org: "foo", image: "bar", hash: "abc", arches: []string{"amd64"}, commitHash: "HEAD"}, []BuildOpt{WithBuildCacheDir(cacheDir)}, []string{"amd64"}, &dockerMocker{supportBuildKit: false}, &cacheMocker{}, "buildkit not supported, check docker version"},
{"load docker without local platform", Pkg{org: "foo", image: "bar", hash: "abc", arches: []string{"amd64", "arm64"}, commitHash: "HEAD"}, []BuildOpt{WithBuildCacheDir(cacheDir), WithBuildTargetDockerCache()}, []string{nonLocal}, &dockerMocker{supportBuildKit: false}, &cacheMocker{}, "must build for local platform"},
{"amd64", Pkg{org: "foo", image: "bar", hash: "abc", arches: []string{"amd64", "arm64"}, commitHash: "HEAD"}, []BuildOpt{WithBuildCacheDir(cacheDir), WithBuildImage()}, []string{"amd64"}, &dockerMocker{supportBuildKit: true, enableBuild: true}, &cacheMocker{enableImagePull: false, enableImageLoad: true, enableIndexWrite: true}, ""},
{"arm64", Pkg{org: "foo", image: "bar", hash: "abc", arches: []string{"amd64", "arm64"}, commitHash: "HEAD"}, []BuildOpt{WithBuildCacheDir(cacheDir), WithBuildImage()}, []string{"arm64"}, &dockerMocker{supportBuildKit: true, enableBuild: true}, &cacheMocker{enableImagePull: false, enableImageLoad: true, enableIndexWrite: true}, ""},
{"amd64 and arm64", Pkg{org: "foo", image: "bar", hash: "abc", arches: []string{"amd64", "arm64"}, commitHash: "HEAD"}, []BuildOpt{WithBuildCacheDir(cacheDir), WithBuildImage()}, []string{"amd64", "arm64"}, &dockerMocker{supportBuildKit: true, enableBuild: true}, &cacheMocker{enableImagePull: false, enableImageLoad: true, enableIndexWrite: true}, ""},
{"amd64", Pkg{org: "foo", image: "bar", hash: "abc", arches: []string{"amd64", "arm64"}, commitHash: "HEAD"}, []BuildOpt{WithBuildCacheDir(cacheDir)}, []string{"amd64"}, &dockerMocker{supportBuildKit: true, enableBuild: true}, &cacheMocker{enableImagePull: false, enableImageLoad: true, enableIndexWrite: true}, ""},
{"arm64", Pkg{org: "foo", image: "bar", hash: "abc", arches: []string{"amd64", "arm64"}, commitHash: "HEAD"}, []BuildOpt{WithBuildCacheDir(cacheDir)}, []string{"arm64"}, &dockerMocker{supportBuildKit: true, enableBuild: true}, &cacheMocker{enableImagePull: false, enableImageLoad: true, enableIndexWrite: true}, ""},
{"amd64 and arm64", Pkg{org: "foo", image: "bar", hash: "abc", arches: []string{"amd64", "arm64"}, commitHash: "HEAD"}, []BuildOpt{WithBuildCacheDir(cacheDir)}, []string{"amd64", "arm64"}, &dockerMocker{supportBuildKit: true, enableBuild: true}, &cacheMocker{enableImagePull: false, enableImageLoad: true, enableIndexWrite: true}, ""},
}
for _, tt := range tests {
t.Run(tt.msg, func(t *testing.T) {

View File

@ -19,7 +19,6 @@ import (
versioncompare "github.com/hashicorp/go-version"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/registry"
log "github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
const (
@ -34,18 +33,14 @@ var platforms = []string{
type dockerRunner interface {
buildkitCheck() error
tag(ref, tag string) error
build(tag, pkg, dockerContext, platform string, stdout io.Writer, opts ...string) error
build(tag, pkg, dockerContext, platform string, stdin io.Reader, stdout io.Writer, opts ...string) error
save(tgt string, refs ...string) error
load(src io.Reader) error
pull(img string) (bool, error)
setBuildCtx(ctx buildContext)
}
type dockerRunnerImpl struct {
cache bool
// Optional build context to use
ctx buildContext
}
type buildContext interface {
@ -80,8 +75,11 @@ var proxyEnvVars = []string{
"ALL_PROXY",
}
func (dr *dockerRunnerImpl) command(stdout, stderr io.Writer, args ...string) error {
func (dr *dockerRunnerImpl) command(stdin io.Reader, stdout, stderr io.Writer, args ...string) error {
cmd := exec.Command("docker", args...)
if stdin == nil {
stdin = os.Stdin
}
if stdout == nil {
stdout = os.Stdout
}
@ -90,56 +88,26 @@ func (dr *dockerRunnerImpl) command(stdout, stderr io.Writer, args ...string) er
}
cmd.Stdout = stdout
cmd.Stderr = stderr
cmd.Stdin = stdin
cmd.Env = os.Environ()
var eg errgroup.Group
// special handling for build-args
if args[0] == "buildx" && args[1] == "build" {
buildArgs := []string{}
for _, proxyVarName := range proxyEnvVars {
if value, ok := os.LookupEnv(proxyVarName); ok {
buildArgs = append(buildArgs,
[]string{"--build-arg", fmt.Sprintf("%s=%s", proxyVarName, value)}...)
}
}
// cannot use usual append(append( because it overwrites part of it
newArgs := make([]string, len(cmd.Args)+len(buildArgs))
copy(newArgs[:2], cmd.Args[:2])
copy(newArgs[2:], buildArgs)
copy(newArgs[2+len(buildArgs):], cmd.Args[2:])
cmd.Args = newArgs
if dr.ctx != nil {
stdin, err := cmd.StdinPipe()
if err != nil {
return err
}
eg.Go(func() error {
defer stdin.Close()
return dr.ctx.Copy(stdin)
})
cmd.Args = append(cmd.Args[:len(cmd.Args)-1], "-")
}
}
log.Debugf("Executing: %v", cmd.Args)
if err := cmd.Run(); err != nil {
err := cmd.Run()
if err != nil {
if isExecErrNotFound(err) {
return fmt.Errorf("linuxkit pkg requires docker to be installed")
}
return err
}
return eg.Wait()
return nil
}
// versionCheck returns the client version and server version, and compares them both
// against the minimum required version.
func (dr *dockerRunnerImpl) versionCheck(version string) (string, string, error) {
var stdout bytes.Buffer
if err := dr.command(&stdout, nil, "version", "--format", "json"); err != nil {
if err := dr.command(nil, &stdout, nil, "version", "--format", "json"); err != nil {
return "", "", err
}
@ -200,7 +168,7 @@ func (dr *dockerRunnerImpl) versionCheck(version string) (string, string, error)
// of docker in Actions, which makes it difficult to tell if buildkit is supported.
// See https://github.community/t/what-really-is-docker-3-0-6/16171
func (dr *dockerRunnerImpl) buildkitCheck() error {
return dr.command(ioutil.Discard, ioutil.Discard, "buildx", "ls")
return dr.command(nil, ioutil.Discard, ioutil.Discard, "buildx", "ls")
}
// builder ensure that a builder exists. Works as follows.
@ -216,7 +184,7 @@ func (dr *dockerRunnerImpl) builder(dockerContext, platform string) (string, err
// if we were given a context, we must find a builder and use it, or create one and use it
if dockerContext != "" {
// does the context exist?
if err := dr.command(ioutil.Discard, ioutil.Discard, "context", "inspect", dockerContext); err != nil {
if err := dr.command(nil, ioutil.Discard, ioutil.Discard, "context", "inspect", dockerContext); err != nil {
return "", fmt.Errorf("provided docker context '%s' not found", dockerContext)
}
builderName = fmt.Sprintf("%s-%s-%s-builder", buildkitBuilderName, dockerContext, strings.ReplaceAll(platform, "/", "-"))
@ -228,7 +196,7 @@ func (dr *dockerRunnerImpl) builder(dockerContext, platform string) (string, err
// no provided dockerContext, so look for one based on platform-specific name
dockerContext = fmt.Sprintf("%s-%s", buildkitBuilderName, strings.ReplaceAll(platform, "/", "-"))
if err := dr.command(ioutil.Discard, ioutil.Discard, "context", "inspect", dockerContext); err == nil {
if err := dr.command(nil, ioutil.Discard, ioutil.Discard, "context", "inspect", dockerContext); err == nil {
// we found an appropriately named context, so let us try to use it or error out
builderName = fmt.Sprintf("%s-builder", dockerContext)
if err := dr.builderEnsureContainer(builderName, platform, dockerContext, args...); err == nil {
@ -250,7 +218,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(name, platform, dockerContext
// if no error, then we have a builder already
// inspect it to make sure it is of the right type
var b bytes.Buffer
if err := dr.command(&b, ioutil.Discard, "buildx", "inspect", name); err != nil {
if err := dr.command(nil, &b, ioutil.Discard, "buildx", "inspect", name); err != nil {
// we did not have the named builder, so create the builder
args = append(args, "--name", name)
msg := fmt.Sprintf("creating builder '%s'", name)
@ -265,7 +233,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(name, platform, dockerContext
msg = fmt.Sprintf("%s based on docker context '%s'", msg, dockerContext)
}
fmt.Println(msg)
return dr.command(ioutil.Discard, ioutil.Discard, args...)
return dr.command(nil, ioutil.Discard, ioutil.Discard, args...)
}
// if we got here, we found a builder already, so let us check its type
var (
@ -295,7 +263,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(name, platform, dockerContext
}
func (dr *dockerRunnerImpl) pull(img string) (bool, error) {
err := dr.command(nil, nil, "image", "pull", img)
err := dr.command(nil, nil, nil, "image", "pull", img)
if err == nil {
return true, nil
}
@ -308,7 +276,7 @@ func (dr *dockerRunnerImpl) pull(img string) (bool, error) {
}
func (dr dockerRunnerImpl) push(img string) error {
return dr.command(nil, nil, "image", "push", img)
return dr.command(nil, nil, nil, "image", "push", img)
}
func (dr *dockerRunnerImpl) pushWithManifest(img, suffix string, pushImage, pushManifest bool) error {
@ -341,10 +309,10 @@ func (dr *dockerRunnerImpl) pushWithManifest(img, suffix string, pushImage, push
func (dr *dockerRunnerImpl) tag(ref, tag string) error {
fmt.Printf("Tagging %s as %s\n", ref, tag)
return dr.command(nil, nil, "image", "tag", ref, tag)
return dr.command(nil, nil, nil, "image", "tag", ref, tag)
}
func (dr *dockerRunnerImpl) build(tag, pkg, dockerContext, platform string, stdout io.Writer, opts ...string) error {
func (dr *dockerRunnerImpl) build(tag, pkg, dockerContext, platform string, stdin io.Reader, stdout io.Writer, opts ...string) error {
// ensure we have a builder
builderName, err := dr.builder(dockerContext, platform)
if err != nil {
@ -352,28 +320,37 @@ func (dr *dockerRunnerImpl) build(tag, pkg, dockerContext, platform string, stdo
}
args := []string{"buildx", "build"}
for _, proxyVarName := range proxyEnvVars {
if value, ok := os.LookupEnv(proxyVarName); ok {
args = append(args,
[]string{"--build-arg", fmt.Sprintf("%s=%s", proxyVarName, value)}...)
}
}
if !dr.cache {
args = append(args, "--no-cache")
}
args = append(args, opts...)
args = append(args, fmt.Sprintf("--builder=%s", builderName))
args = append(args, "-t", tag, pkg)
args = append(args, "-t", tag)
// should docker read from the build path or stdin?
buildPath := pkg
if stdin != nil {
buildPath = "-"
}
args = append(args, buildPath)
fmt.Printf("building for platform %s using builder %s\n", platform, builderName)
return dr.command(stdout, nil, args...)
return dr.command(stdin, stdout, nil, args...)
}
func (dr *dockerRunnerImpl) save(tgt string, refs ...string) error {
args := append([]string{"image", "save", "-o", tgt}, refs...)
return dr.command(nil, nil, args...)
return dr.command(nil, nil, nil, args...)
}
func (dr *dockerRunnerImpl) load(src io.Reader) error {
args := []string{"image", "load"}
dr.ctx = &readerCtx{
reader: src,
}
return dr.command(nil, nil, args...)
}
func (dr *dockerRunnerImpl) setBuildCtx(ctx buildContext) {
dr.ctx = ctx
return dr.command(src, nil, nil, args...)
}

View File

@ -10,7 +10,12 @@ import (
// ImageSource interface to an image. It can have its config read, and a its containers
// can be read via an io.ReadCloser tar stream.
type ImageSource interface {
// Config get the config for the image
Config() (imagespec.ImageConfig, error)
// TarReader get the flattened filesystem of the image as a tar stream/
TarReader() (io.ReadCloser, error)
// Descriptor get the v1.Descriptor of the image
Descriptor() *v1.Descriptor
// V1TarReader get the image as v1 tarball, also compatibel with `docker load`
V1TarReader() (io.ReadCloser, error)
}