pkglib/build: make dockerRunner public

there is already a public method "WithBuildDocker",
so it makes sense that the parameter definition is public as well
so that a user of this method can actually use it

Signed-off-by: Christoph Ostarek <christoph@zededa.com>
This commit is contained in:
Christoph Ostarek
2025-09-12 14:09:54 +02:00
committed by Avi Deitcher
parent 2ed6850397
commit bb0cf49975
7 changed files with 48 additions and 48 deletions

View File

@@ -38,7 +38,7 @@ type buildOpts struct {
cacheProvider spec.CacheProvider
platforms []imagespec.Platform
builders map[string]string
runner dockerRunner
runner DockerRunner
writer io.Writer
builderImage string
builderConfigPath string
@@ -137,7 +137,7 @@ func WithBuildBuilders(builders map[string]string) BuildOpt {
}
// WithBuildDocker provides a docker runner to use. If nil, defaults to the current platform
func WithBuildDocker(runner dockerRunner) BuildOpt {
func WithBuildDocker(runner DockerRunner) BuildOpt {
return func(bo *buildOpts) error {
bo.runner = runner
return nil
@@ -333,7 +333,7 @@ func (p Pkg) Build(bos ...BuildOpt) error {
}
}
if err := d.contextSupportCheck(); err != nil {
if err := d.ContextSupportCheck(); err != nil {
return fmt.Errorf("contexts not supported, check docker version: %v", err)
}
@@ -538,11 +538,11 @@ func (p Pkg) Build(bos ...BuildOpt) error {
if err != nil {
return fmt.Errorf("unable to get reader from cache: %v", err)
}
if err := d.load(reader); err != nil {
if err := d.Load(reader); err != nil {
return err
}
if platform.Architecture == arch {
err = d.tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), p.FullTag())
err = d.Tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), p.FullTag())
if err != nil {
return err
}
@@ -617,11 +617,11 @@ func (p Pkg) Build(bos ...BuildOpt) error {
// if one of the arch equals with system will add tag without suffix
if bo.targetDocker {
for _, platform := range bo.platforms {
if err := d.tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), fmt.Sprintf("%s-%s", fullRelTag, platform.Architecture)); err != nil {
if err := d.Tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), fmt.Sprintf("%s-%s", fullRelTag, platform.Architecture)); err != nil {
return err
}
if platform.Architecture == arch {
if err := d.tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), fullRelTag); err != nil {
if err := d.Tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), fullRelTag); err != nil {
return err
}
}
@@ -646,7 +646,7 @@ func (p Pkg) Build(bos ...BuildOpt) error {
// C - manifest, saved in cache as is, referenced by the index (E), and returned as a descriptor
// D - attestations (if any), saved in cache as is, referenced by the index (E), and returned as a descriptor
// E - index, saved in cache as is, stored in cache as tag "image:tag-arch", *not* returned as a descriptor
func (p Pkg) buildArch(ctx context.Context, d dockerRunner, c spec.CacheProvider, builderImage, builderConfigPath, arch string, restart bool, writer io.Writer, bo buildOpts, imageBuildOpts spec.ImageBuildOptions) ([]registry.Descriptor, error) {
func (p Pkg) buildArch(ctx context.Context, d DockerRunner, c spec.CacheProvider, builderImage, builderConfigPath, arch string, restart bool, writer io.Writer, bo buildOpts, imageBuildOpts spec.ImageBuildOptions) ([]registry.Descriptor, error) {
var (
tagArch string
tag = p.FullTag()
@@ -715,7 +715,7 @@ func (p Pkg) buildArch(ctx context.Context, d dockerRunner, c spec.CacheProvider
imageBuildOpts.Dockerfile = bo.dockerfile
if err := d.build(ctx, tagArch, p.path, builderName, builderImage, builderConfigPath, platform, restart, bo.preCacheImages, passCache, buildCtx.Reader(), stdout, bo.sbomScan, bo.sbomScannerImage, bo.progress, imageBuildOpts); err != nil {
if err := d.Build(ctx, tagArch, p.path, builderName, builderImage, builderConfigPath, platform, restart, bo.preCacheImages, passCache, buildCtx.Reader(), stdout, bo.sbomScan, bo.sbomScannerImage, bo.progress, imageBuildOpts); err != nil {
stdoutCloser()
if strings.Contains(err.Error(), "executor failed running [/dev/.buildkit_qemu_emulator") {
return nil, fmt.Errorf("buildkit was unable to emulate %s. check binfmt has been set up and works for this platform: %v", platform, err)

View File

@@ -40,23 +40,23 @@ type buildLog struct {
platform string
}
func (d *dockerMocker) tag(ref, tag string) error {
func (d *dockerMocker) Tag(ref, tag string) error {
if !d.enableTag {
return errors.New("tags not allowed")
}
d.images[tag] = d.images[ref]
return nil
}
func (d *dockerMocker) contextSupportCheck() error {
func (d *dockerMocker) ContextSupportCheck() error {
if d.supportContexts {
return nil
}
return errors.New("contexts not supported")
}
func (d *dockerMocker) builder(_ context.Context, _, _, _, _ string, _ bool) (*buildkitClient.Client, error) {
func (d *dockerMocker) Builder(_ context.Context, _, _, _, _ string, _ bool) (*buildkitClient.Client, error) {
return nil, fmt.Errorf("not implemented")
}
func (d *dockerMocker) build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, builderRestart, preCacheImages bool, c spec.CacheProvider, r io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progress string, imageBuildOpts spec.ImageBuildOptions) error {
func (d *dockerMocker) Build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, builderRestart, preCacheImages bool, c spec.CacheProvider, r io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progress string, imageBuildOpts spec.ImageBuildOptions) error {
if !d.enableBuild {
return errors.New("build disabled")
}
@@ -201,7 +201,7 @@ func (d *dockerMocker) build(ctx context.Context, tag, pkg, dockerContext, build
}
return nil
}
func (d *dockerMocker) save(tgt string, refs ...string) error {
func (d *dockerMocker) Save(tgt string, refs ...string) error {
var b []byte
for _, ref := range refs {
if data, ok := d.images[ref]; ok {
@@ -212,7 +212,7 @@ func (d *dockerMocker) save(tgt string, refs ...string) error {
}
return os.WriteFile(tgt, b, 0666)
}
func (d *dockerMocker) load(src io.Reader) error {
func (d *dockerMocker) Load(src io.Reader) error {
b, err := io.ReadAll(src)
if err != nil {
return err
@@ -220,7 +220,7 @@ func (d *dockerMocker) load(src io.Reader) error {
d.images[d.fixedReadName] = b
return nil
}
func (d *dockerMocker) pull(img string) (bool, error) {
func (d *dockerMocker) Pull(img string) (bool, error) {
if d.enablePull {
b := make([]byte, 256)
_, _ = rand.Read(b)

View File

@@ -87,7 +87,7 @@ func newDockerDepends(pkgPath string, pi *pkgInfo) (dockerDepends, error) {
}
// Do ensures that any dependencies the package has declared are met.
func (dd dockerDepends) Do(d dockerRunner) error {
func (dd dockerDepends) Do(d DockerRunner) error {
if len(dd.images) == 0 {
return nil
}
@@ -107,7 +107,7 @@ func (dd dockerDepends) Do(d dockerRunner) error {
var refs []string
for _, s := range dd.images {
if ok, err := d.pull(s.String()); !ok || err != nil {
if ok, err := d.Pull(s.String()); !ok || err != nil {
if err != nil {
return err
}
@@ -119,14 +119,14 @@ func (dd dockerDepends) Do(d dockerRunner) error {
bn := filepath.Base(s.Locator) + "@" + s.Digest().String()
path := filepath.Join(dd.path, bn+".tar")
fmt.Printf("Adding %q as dependency\n", bn)
if err := d.save(path, s.String()); err != nil {
if err := d.Save(path, s.String()); err != nil {
return err
}
}
}
if !dd.dir {
if err := d.save(dd.path, refs...); err != nil {
if err := d.Save(dd.path, refs...); err != nil {
return err
}
}

View File

@@ -18,12 +18,12 @@ import (
_ "github.com/moby/buildkit/client/connhelper/ssh"
)
type dockerRunner interface {
tag(ref, tag string) error
build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, r io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, platformType string, imageBuildOpts spec.ImageBuildOptions) error
save(tgt string, refs ...string) error
load(src io.Reader) error
pull(img string) (bool, error)
contextSupportCheck() error
builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error)
type DockerRunner interface {
Tag(ref, tag string) error
Build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, r io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, platformType string, imageBuildOpts spec.ImageBuildOptions) error
Save(tgt string, refs ...string) error
Load(src io.Reader) error
Pull(img string) (bool, error)
ContextSupportCheck() error
Builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error)
}

View File

@@ -25,11 +25,11 @@ import (
type dockerDryRunnerImpl struct {
}
func newDockerDryRunner() dockerRunner {
func newDockerDryRunner() DockerRunner {
return &dockerDryRunnerImpl{}
}
func (dr *dockerDryRunnerImpl) contextSupportCheck() error {
func (dr *dockerDryRunnerImpl) ContextSupportCheck() error {
return nil
}
@@ -46,19 +46,19 @@ func (dr *dockerDryRunnerImpl) contextSupportCheck() error {
// 1. if dockerContext is provided, try to create a builder with that context; if it succeeds, we are done; if not, return an error.
// 2. try to find an existing named runner with the pattern; if it succeeds, we are done; if not, try next.
// 3. try to create a generic builder using the default context named "linuxkit".
func (dr *dockerDryRunnerImpl) builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error) {
func (dr *dockerDryRunnerImpl) Builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error) {
return nil, nil
}
func (dr *dockerDryRunnerImpl) pull(img string) (bool, error) {
func (dr *dockerDryRunnerImpl) Pull(img string) (bool, error) {
return false, errors.New("not implemented")
}
func (dr *dockerDryRunnerImpl) tag(ref, tag string) error {
func (dr *dockerDryRunnerImpl) Tag(ref, tag string) error {
return errors.New("not implemented")
}
func (dr *dockerDryRunnerImpl) build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, stdin io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progressType string, imageBuildOpts spec.ImageBuildOptions) error {
func (dr *dockerDryRunnerImpl) Build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, stdin io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progressType string, imageBuildOpts spec.ImageBuildOptions) error {
// build args
var buildArgs []string
for k, v := range imageBuildOpts.BuildArgs {
@@ -85,10 +85,10 @@ func (dr *dockerDryRunnerImpl) build(ctx context.Context, tag, pkg, dockerContex
return nil
}
func (dr *dockerDryRunnerImpl) save(tgt string, refs ...string) error {
func (dr *dockerDryRunnerImpl) Save(tgt string, refs ...string) error {
return errors.New("not implemented")
}
func (dr *dockerDryRunnerImpl) load(src io.Reader) error {
func (dr *dockerDryRunnerImpl) Load(src io.Reader) error {
return errors.New("not implemented")
}

View File

@@ -71,7 +71,7 @@ type dockerRunnerImpl struct {
cache bool
}
func newDockerRunner(cache bool) dockerRunner {
func newDockerRunner(cache bool) DockerRunner {
return &dockerRunnerImpl{cache: cache}
}
@@ -199,14 +199,14 @@ func (dr *dockerRunnerImpl) versionCheck(version string) (string, string, error)
return clientVersionString, serverVersionString, nil
}
// contextSupportCheck checks if contexts are supported. This is necessary because github uses some strange versions
// ContextSupportCheck checks if contexts are supported. This is necessary because github uses some strange versions
// of docker in Actions, which makes it difficult to tell if context is supported.
// See https://github.community/t/what-really-is-docker-3-0-6/16171
func (dr *dockerRunnerImpl) contextSupportCheck() error {
func (dr *dockerRunnerImpl) ContextSupportCheck() error {
return dr.command(nil, io.Discard, io.Discard, "context", "ls")
}
// builder ensure that a builder container exists or return an error.
// Builder ensure that a builder container exists or return an error.
//
// Process:
//
@@ -219,7 +219,7 @@ func (dr *dockerRunnerImpl) contextSupportCheck() error {
// 1. if dockerContext is provided, try to create a builder with that context; if it succeeds, we are done; if not, return an error.
// 2. try to find an existing named runner with the pattern; if it succeeds, we are done; if not, try next.
// 3. try to create a generic builder using the default context named "linuxkit".
func (dr *dockerRunnerImpl) builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error) {
func (dr *dockerRunnerImpl) Builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error) {
// if we were given a context, we must find a builder and use it, or create one and use it
if dockerContext != "" {
// does the context exist?
@@ -460,7 +460,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
}
}
func (dr *dockerRunnerImpl) pull(img string) (bool, error) {
func (dr *dockerRunnerImpl) Pull(img string) (bool, error) {
err := dr.command(nil, nil, nil, "image", "pull", img)
if err == nil {
return true, nil
@@ -502,14 +502,14 @@ func (dr *dockerRunnerImpl) pushWithManifest(img, suffix string, pushImage, push
return nil
}
func (dr *dockerRunnerImpl) tag(ref, tag string) error {
func (dr *dockerRunnerImpl) Tag(ref, tag string) error {
fmt.Printf("Tagging %s as %s\n", ref, tag)
return dr.command(nil, nil, nil, "image", "tag", ref, tag)
}
func (dr *dockerRunnerImpl) build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, stdin io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progressType string, imageBuildOpts spec.ImageBuildOptions) error {
func (dr *dockerRunnerImpl) Build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, stdin io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progressType string, imageBuildOpts spec.ImageBuildOptions) error {
// ensure we have a builder
client, err := dr.builder(ctx, dockerContext, builderImage, builderConfigPath, platform, restart)
client, err := dr.Builder(ctx, dockerContext, builderImage, builderConfigPath, platform, restart)
if err != nil {
return fmt.Errorf("unable to ensure builder container: %v", err)
}
@@ -757,12 +757,12 @@ func (dr *dockerRunnerImpl) build(ctx context.Context, tag, pkg, dockerContext,
return err
}
func (dr *dockerRunnerImpl) save(tgt string, refs ...string) error {
func (dr *dockerRunnerImpl) Save(tgt string, refs ...string) error {
args := append([]string{"image", "save", "-o", tgt}, refs...)
return dr.command(nil, nil, nil, args...)
}
func (dr *dockerRunnerImpl) load(src io.Reader) error {
func (dr *dockerRunnerImpl) Load(src io.Reader) error {
args := []string{"image", "load"}
return dr.command(src, nil, nil, args...)
}

View File

@@ -100,7 +100,7 @@ func getClientForPlatform(ctx context.Context, buildersMap map[string]string, bu
}
dr := newDockerRunner(false)
builderName := getBuilderForPlatform(p.Architecture, buildersMap)
client, err := dr.builder(ctx, builderName, builderImage, builderConfigPath, platform, false)
client, err := dr.Builder(ctx, builderName, builderImage, builderConfigPath, platform, false)
if err != nil {
return nil, fmt.Errorf("unable to ensure builder container: %v", err)
}