5 Commits

Author SHA1 Message Date
Christoph Ostarek
43200ea634 pkglib: untangle WithBuildDocker and DryRun
this makes it possible for a user of this API to
build their own DryRunner

also make newDockerRunner public as well to be consistent

Signed-off-by: Christoph Ostarek <christoph@zededa.com>
2025-09-15 15:49:40 +03:00
Christoph Ostarek
bb0cf49975 pkglib/build: make dockerRunner public
there is already a public method "WithBuildDocker",
so it makes sense that the parameter definition is public as well
so that a user of this method can actually use it

Signed-off-by: Christoph Ostarek <christoph@zededa.com>
2025-09-15 15:49:40 +03:00
Avi Deitcher
2ed6850397 when comparing builder config files, ensure only compare post-processed
Signed-off-by: Avi Deitcher <avi@deitcher.net>
2025-09-14 10:20:55 +03:00
Avi Deitcher
a755fd917d validate linuxkit yaml for unknown fields
Signed-off-by: Avi Deitcher <avi@deitcher.net>
2025-09-05 15:12:54 +02:00
Avi Deitcher
358803fcc7 validate yaml for extraneous fields in pkg build
Signed-off-by: Avi Deitcher <avi@deitcher.net>
2025-09-05 15:12:54 +02:00
21 changed files with 281 additions and 77 deletions

View File

@@ -1,3 +1,4 @@
image: memlogd
binds:
config:
binds:
- /var/run:/var/run

View File

@@ -360,8 +360,9 @@ func NewConfig(config []byte, packageFinder spec.PackageResolver) (Moby, error)
// Parse raw yaml
var rawYaml interface{}
err := yaml.Unmarshal(config, &rawYaml)
if err != nil {
dec := yaml.NewDecoder(bytes.NewReader(config))
dec.KnownFields(true)
if err := dec.Decode(&rawYaml); err != nil {
return m, err
}

View File

@@ -38,7 +38,7 @@ type buildOpts struct {
cacheProvider spec.CacheProvider
platforms []imagespec.Platform
builders map[string]string
runner dockerRunner
runner DockerRunner
writer io.Writer
builderImage string
builderConfigPath string
@@ -137,7 +137,7 @@ func WithBuildBuilders(builders map[string]string) BuildOpt {
}
// WithBuildDocker provides a docker runner to use. If nil, defaults to the current platform
func WithBuildDocker(runner dockerRunner) BuildOpt {
func WithBuildDocker(runner DockerRunner) BuildOpt {
return func(bo *buildOpts) error {
bo.runner = runner
return nil
@@ -319,10 +319,10 @@ func (p Pkg) Build(bos ...BuildOpt) error {
d := bo.runner
switch {
case bo.dryRun:
d = newDockerDryRunner()
case d == nil && bo.dryRun:
d = NewDockerDryRunner()
case d == nil:
d = newDockerRunner(p.cache)
d = NewDockerRunner(p.cache)
}
c := bo.cacheProvider
@@ -333,7 +333,7 @@ func (p Pkg) Build(bos ...BuildOpt) error {
}
}
if err := d.contextSupportCheck(); err != nil {
if err := d.ContextSupportCheck(); err != nil {
return fmt.Errorf("contexts not supported, check docker version: %v", err)
}
@@ -538,11 +538,11 @@ func (p Pkg) Build(bos ...BuildOpt) error {
if err != nil {
return fmt.Errorf("unable to get reader from cache: %v", err)
}
if err := d.load(reader); err != nil {
if err := d.Load(reader); err != nil {
return err
}
if platform.Architecture == arch {
err = d.tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), p.FullTag())
err = d.Tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), p.FullTag())
if err != nil {
return err
}
@@ -617,11 +617,11 @@ func (p Pkg) Build(bos ...BuildOpt) error {
// if one of the arch equals with system will add tag without suffix
if bo.targetDocker {
for _, platform := range bo.platforms {
if err := d.tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), fmt.Sprintf("%s-%s", fullRelTag, platform.Architecture)); err != nil {
if err := d.Tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), fmt.Sprintf("%s-%s", fullRelTag, platform.Architecture)); err != nil {
return err
}
if platform.Architecture == arch {
if err := d.tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), fullRelTag); err != nil {
if err := d.Tag(fmt.Sprintf("%s-%s", p.FullTag(), platform.Architecture), fullRelTag); err != nil {
return err
}
}
@@ -646,7 +646,7 @@ func (p Pkg) Build(bos ...BuildOpt) error {
// C - manifest, saved in cache as is, referenced by the index (E), and returned as a descriptor
// D - attestations (if any), saved in cache as is, referenced by the index (E), and returned as a descriptor
// E - index, saved in cache as is, stored in cache as tag "image:tag-arch", *not* returned as a descriptor
func (p Pkg) buildArch(ctx context.Context, d dockerRunner, c spec.CacheProvider, builderImage, builderConfigPath, arch string, restart bool, writer io.Writer, bo buildOpts, imageBuildOpts spec.ImageBuildOptions) ([]registry.Descriptor, error) {
func (p Pkg) buildArch(ctx context.Context, d DockerRunner, c spec.CacheProvider, builderImage, builderConfigPath, arch string, restart bool, writer io.Writer, bo buildOpts, imageBuildOpts spec.ImageBuildOptions) ([]registry.Descriptor, error) {
var (
tagArch string
tag = p.FullTag()
@@ -715,7 +715,7 @@ func (p Pkg) buildArch(ctx context.Context, d dockerRunner, c spec.CacheProvider
imageBuildOpts.Dockerfile = bo.dockerfile
if err := d.build(ctx, tagArch, p.path, builderName, builderImage, builderConfigPath, platform, restart, bo.preCacheImages, passCache, buildCtx.Reader(), stdout, bo.sbomScan, bo.sbomScannerImage, bo.progress, imageBuildOpts); err != nil {
if err := d.Build(ctx, tagArch, p.path, builderName, builderImage, builderConfigPath, platform, restart, bo.preCacheImages, passCache, buildCtx.Reader(), stdout, bo.sbomScan, bo.sbomScannerImage, bo.progress, imageBuildOpts); err != nil {
stdoutCloser()
if strings.Contains(err.Error(), "executor failed running [/dev/.buildkit_qemu_emulator") {
return nil, fmt.Errorf("buildkit was unable to emulate %s. check binfmt has been set up and works for this platform: %v", platform, err)

View File

@@ -40,23 +40,23 @@ type buildLog struct {
platform string
}
func (d *dockerMocker) tag(ref, tag string) error {
func (d *dockerMocker) Tag(ref, tag string) error {
if !d.enableTag {
return errors.New("tags not allowed")
}
d.images[tag] = d.images[ref]
return nil
}
func (d *dockerMocker) contextSupportCheck() error {
func (d *dockerMocker) ContextSupportCheck() error {
if d.supportContexts {
return nil
}
return errors.New("contexts not supported")
}
func (d *dockerMocker) builder(_ context.Context, _, _, _, _ string, _ bool) (*buildkitClient.Client, error) {
func (d *dockerMocker) Builder(_ context.Context, _, _, _, _ string, _ bool) (*buildkitClient.Client, error) {
return nil, fmt.Errorf("not implemented")
}
func (d *dockerMocker) build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, builderRestart, preCacheImages bool, c spec.CacheProvider, r io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progress string, imageBuildOpts spec.ImageBuildOptions) error {
func (d *dockerMocker) Build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, builderRestart, preCacheImages bool, c spec.CacheProvider, r io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progress string, imageBuildOpts spec.ImageBuildOptions) error {
if !d.enableBuild {
return errors.New("build disabled")
}
@@ -201,7 +201,7 @@ func (d *dockerMocker) build(ctx context.Context, tag, pkg, dockerContext, build
}
return nil
}
func (d *dockerMocker) save(tgt string, refs ...string) error {
func (d *dockerMocker) Save(tgt string, refs ...string) error {
var b []byte
for _, ref := range refs {
if data, ok := d.images[ref]; ok {
@@ -212,7 +212,7 @@ func (d *dockerMocker) save(tgt string, refs ...string) error {
}
return os.WriteFile(tgt, b, 0666)
}
func (d *dockerMocker) load(src io.Reader) error {
func (d *dockerMocker) Load(src io.Reader) error {
b, err := io.ReadAll(src)
if err != nil {
return err
@@ -220,7 +220,7 @@ func (d *dockerMocker) load(src io.Reader) error {
d.images[d.fixedReadName] = b
return nil
}
func (d *dockerMocker) pull(img string) (bool, error) {
func (d *dockerMocker) Pull(img string) (bool, error) {
if d.enablePull {
b := make([]byte, 256)
_, _ = rand.Read(b)

View File

@@ -87,7 +87,7 @@ func newDockerDepends(pkgPath string, pi *pkgInfo) (dockerDepends, error) {
}
// Do ensures that any dependencies the package has declared are met.
func (dd dockerDepends) Do(d dockerRunner) error {
func (dd dockerDepends) Do(d DockerRunner) error {
if len(dd.images) == 0 {
return nil
}
@@ -107,7 +107,7 @@ func (dd dockerDepends) Do(d dockerRunner) error {
var refs []string
for _, s := range dd.images {
if ok, err := d.pull(s.String()); !ok || err != nil {
if ok, err := d.Pull(s.String()); !ok || err != nil {
if err != nil {
return err
}
@@ -119,14 +119,14 @@ func (dd dockerDepends) Do(d dockerRunner) error {
bn := filepath.Base(s.Locator) + "@" + s.Digest().String()
path := filepath.Join(dd.path, bn+".tar")
fmt.Printf("Adding %q as dependency\n", bn)
if err := d.save(path, s.String()); err != nil {
if err := d.Save(path, s.String()); err != nil {
return err
}
}
}
if !dd.dir {
if err := d.save(dd.path, refs...); err != nil {
if err := d.Save(dd.path, refs...); err != nil {
return err
}
}

View File

@@ -18,12 +18,12 @@ import (
_ "github.com/moby/buildkit/client/connhelper/ssh"
)
type dockerRunner interface {
tag(ref, tag string) error
build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, r io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, platformType string, imageBuildOpts spec.ImageBuildOptions) error
save(tgt string, refs ...string) error
load(src io.Reader) error
pull(img string) (bool, error)
contextSupportCheck() error
builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error)
type DockerRunner interface {
Tag(ref, tag string) error
Build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, r io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, platformType string, imageBuildOpts spec.ImageBuildOptions) error
Save(tgt string, refs ...string) error
Load(src io.Reader) error
Pull(img string) (bool, error)
ContextSupportCheck() error
Builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error)
}

View File

@@ -25,11 +25,11 @@ import (
type dockerDryRunnerImpl struct {
}
func newDockerDryRunner() dockerRunner {
func NewDockerDryRunner() DockerRunner {
return &dockerDryRunnerImpl{}
}
func (dr *dockerDryRunnerImpl) contextSupportCheck() error {
func (dr *dockerDryRunnerImpl) ContextSupportCheck() error {
return nil
}
@@ -46,19 +46,19 @@ func (dr *dockerDryRunnerImpl) contextSupportCheck() error {
// 1. if dockerContext is provided, try to create a builder with that context; if it succeeds, we are done; if not, return an error.
// 2. try to find an existing named runner with the pattern; if it succeeds, we are done; if not, try next.
// 3. try to create a generic builder using the default context named "linuxkit".
func (dr *dockerDryRunnerImpl) builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error) {
func (dr *dockerDryRunnerImpl) Builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error) {
return nil, nil
}
func (dr *dockerDryRunnerImpl) pull(img string) (bool, error) {
func (dr *dockerDryRunnerImpl) Pull(img string) (bool, error) {
return false, errors.New("not implemented")
}
func (dr *dockerDryRunnerImpl) tag(ref, tag string) error {
func (dr *dockerDryRunnerImpl) Tag(ref, tag string) error {
return errors.New("not implemented")
}
func (dr *dockerDryRunnerImpl) build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, stdin io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progressType string, imageBuildOpts spec.ImageBuildOptions) error {
func (dr *dockerDryRunnerImpl) Build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, stdin io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progressType string, imageBuildOpts spec.ImageBuildOptions) error {
// build args
var buildArgs []string
for k, v := range imageBuildOpts.BuildArgs {
@@ -85,10 +85,10 @@ func (dr *dockerDryRunnerImpl) build(ctx context.Context, tag, pkg, dockerContex
return nil
}
func (dr *dockerDryRunnerImpl) save(tgt string, refs ...string) error {
func (dr *dockerDryRunnerImpl) Save(tgt string, refs ...string) error {
return errors.New("not implemented")
}
func (dr *dockerDryRunnerImpl) load(src io.Reader) error {
func (dr *dockerDryRunnerImpl) Load(src io.Reader) error {
return errors.New("not implemented")
}

View File

@@ -57,20 +57,21 @@ import (
)
const (
buildkitBuilderName = "linuxkit-builder"
buildkitSocketPath = "/run/buildkit/buildkitd.sock"
buildkitWaitServer = 30 // seconds
buildkitCheckInterval = 1 // seconds
sbomFrontEndKey = "attest:sbom"
buildkitConfigDir = "/etc/buildkit"
buildkitConfigPath = buildkitConfigDir + "/buildkitd.toml"
buildkitBuilderName = "linuxkit-builder"
buildkitSocketPath = "/run/buildkit/buildkitd.sock"
buildkitWaitServer = 30 // seconds
buildkitCheckInterval = 1 // seconds
sbomFrontEndKey = "attest:sbom"
buildkitConfigDir = "/etc/buildkit"
buildkitConfigFileName = "buildkitd.toml"
buildkitConfigPath = buildkitConfigDir + "/" + buildkitConfigFileName
)
type dockerRunnerImpl struct {
cache bool
}
func newDockerRunner(cache bool) dockerRunner {
func NewDockerRunner(cache bool) DockerRunner {
return &dockerRunnerImpl{cache: cache}
}
@@ -198,14 +199,14 @@ func (dr *dockerRunnerImpl) versionCheck(version string) (string, string, error)
return clientVersionString, serverVersionString, nil
}
// contextSupportCheck checks if contexts are supported. This is necessary because github uses some strange versions
// ContextSupportCheck checks if contexts are supported. This is necessary because github uses some strange versions
// of docker in Actions, which makes it difficult to tell if context is supported.
// See https://github.community/t/what-really-is-docker-3-0-6/16171
func (dr *dockerRunnerImpl) contextSupportCheck() error {
func (dr *dockerRunnerImpl) ContextSupportCheck() error {
return dr.command(nil, io.Discard, io.Discard, "context", "ls")
}
// builder ensure that a builder container exists or return an error.
// Builder ensure that a builder container exists or return an error.
//
// Process:
//
@@ -218,7 +219,7 @@ func (dr *dockerRunnerImpl) contextSupportCheck() error {
// 1. if dockerContext is provided, try to create a builder with that context; if it succeeds, we are done; if not, return an error.
// 2. try to find an existing named runner with the pattern; if it succeeds, we are done; if not, try next.
// 3. try to create a generic builder using the default context named "linuxkit".
func (dr *dockerRunnerImpl) builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error) {
func (dr *dockerRunnerImpl) Builder(ctx context.Context, dockerContext, builderImage, builderConfigPath, platform string, restart bool) (*buildkitClient.Client, error) {
// if we were given a context, we must find a builder and use it, or create one and use it
if dockerContext != "" {
// does the context exist?
@@ -277,6 +278,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
for range buildKitCheckRetryCount {
var b bytes.Buffer
var cid string
var filesToLoadIntoContainer map[string][]byte
if err := dr.command(nil, &b, io.Discard, "--context", dockerContext, "container", "inspect", name); err == nil {
// we already have a container named "linuxkit-builder" in the provided context.
// get its state and config
@@ -295,16 +297,25 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
// if it is provided, we assume it is false until proven true
log.Debugf("checking if configPath %s is correct in container %s", configPath, name)
configPathCorrect = false
if err := dr.command(nil, &b, io.Discard, "--context", dockerContext, "container", "exec", name, "cat", buildkitConfigPath); err == nil {
var configB bytes.Buffer
// we cannot exactly use the local config file, as it gets modified to get loaded into the container
// so we preprocess it using the same library that would load it up
filesToLoadIntoContainer, err = confutil.LoadConfigFiles(configPath)
if err != nil {
return nil, fmt.Errorf("failed to load buildkit config file %s: %v", configPath, err)
}
if err := dr.command(nil, &configB, io.Discard, "--context", dockerContext, "container", "exec", name, "cat", buildkitConfigPath); err == nil {
// sha256sum the config file to see if it matches the provided configPath
containerConfigFileHash := sha256.Sum256(b.Bytes())
containerConfigFileHash := sha256.Sum256(configB.Bytes())
log.Debugf("container %s has configPath %s with sha256sum %x", name, buildkitConfigPath, containerConfigFileHash)
configFileContents, err := os.ReadFile(configPath)
if err != nil {
return nil, fmt.Errorf("unable to read buildkit config file %s: %v", configPath, err)
log.Tracef("container %s has configPath %s with contents:\n%s", name, buildkitConfigPath, configB.String())
configFileContents, ok := filesToLoadIntoContainer[buildkitConfigFileName]
if !ok {
return nil, fmt.Errorf("unable to read provided buildkit config file %s: %v", configPath, err)
}
localConfigFileHash := sha256.Sum256(configFileContents)
log.Debugf("local %s has configPath %s with sha256sum %x", name, configPath, localConfigFileHash)
log.Tracef("local %s has configPath %s with contents:\n%s", name, buildkitConfigPath, string(configFileContents))
if bytes.Equal(containerConfigFileHash[:], localConfigFileHash[:]) {
log.Debugf("configPath %s in container %s matches local configPath %s", buildkitConfigPath, name, configPath)
configPathCorrect = true
@@ -314,8 +325,6 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
} else {
log.Debugf("could not read configPath %s from container %s, assuming it is not correct", buildkitConfigPath, name)
}
// now rewrite and copy over certs, if needed
//https://github.com/docker/buildx/blob/master/util/confutil/container.go#L27
}
switch {
@@ -338,7 +347,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
stop = isRunning
remove = true
case !configPathCorrect:
fmt.Printf("existing container has wrong configPath mount, restarting\n")
fmt.Printf("existing container has wrong configPath contents, restarting\n")
recreate = true
stop = isRunning
remove = true
@@ -405,11 +414,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
}
// copy in the buildkit config file, if provided
if configPath != "" {
files, err := confutil.LoadConfigFiles(configPath)
if err != nil {
return nil, fmt.Errorf("failed to load buildkit config file %s: %v", configPath, err)
}
if err := dr.copyFilesToContainer(name, files); err != nil {
if err := dr.copyFilesToContainer(name, filesToLoadIntoContainer); err != nil {
return nil, fmt.Errorf("failed to copy buildkit config file %s and certificates into container %s: %v", configPath, name, err)
}
}
@@ -455,7 +460,7 @@ func (dr *dockerRunnerImpl) builderEnsureContainer(ctx context.Context, name, im
}
}
func (dr *dockerRunnerImpl) pull(img string) (bool, error) {
func (dr *dockerRunnerImpl) Pull(img string) (bool, error) {
err := dr.command(nil, nil, nil, "image", "pull", img)
if err == nil {
return true, nil
@@ -497,14 +502,14 @@ func (dr *dockerRunnerImpl) pushWithManifest(img, suffix string, pushImage, push
return nil
}
func (dr *dockerRunnerImpl) tag(ref, tag string) error {
func (dr *dockerRunnerImpl) Tag(ref, tag string) error {
fmt.Printf("Tagging %s as %s\n", ref, tag)
return dr.command(nil, nil, nil, "image", "tag", ref, tag)
}
func (dr *dockerRunnerImpl) build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, stdin io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progressType string, imageBuildOpts spec.ImageBuildOptions) error {
func (dr *dockerRunnerImpl) Build(ctx context.Context, tag, pkg, dockerContext, builderImage, builderConfigPath, platform string, restart, preCacheImages bool, c spec.CacheProvider, stdin io.Reader, stdout io.Writer, sbomScan bool, sbomScannerImage, progressType string, imageBuildOpts spec.ImageBuildOptions) error {
// ensure we have a builder
client, err := dr.builder(ctx, dockerContext, builderImage, builderConfigPath, platform, restart)
client, err := dr.Builder(ctx, dockerContext, builderImage, builderConfigPath, platform, restart)
if err != nil {
return fmt.Errorf("unable to ensure builder container: %v", err)
}
@@ -752,12 +757,12 @@ func (dr *dockerRunnerImpl) build(ctx context.Context, tag, pkg, dockerContext,
return err
}
func (dr *dockerRunnerImpl) save(tgt string, refs ...string) error {
func (dr *dockerRunnerImpl) Save(tgt string, refs ...string) error {
args := append([]string{"image", "save", "-o", tgt}, refs...)
return dr.command(nil, nil, nil, args...)
}
func (dr *dockerRunnerImpl) load(src io.Reader) error {
func (dr *dockerRunnerImpl) Load(src io.Reader) error {
args := []string{"image", "load"}
return dr.command(src, nil, nil, args...)
}

View File

@@ -10,7 +10,7 @@ import (
"strings"
"text/template"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/moby"
"github.com/linuxkit/linuxkit/src/cmd/linuxkit/util"
@@ -157,7 +157,9 @@ func NewFromConfig(cfg PkglibConfig, args ...string) ([]Pkg, error) {
return nil, err
}
if err := yaml.Unmarshal(b, &pi); err != nil {
dec := yaml.NewDecoder(bytes.NewReader(b))
dec.KnownFields(true)
if err := dec.Decode(&pi); err != nil {
return nil, err
}

View File

@@ -98,9 +98,9 @@ func getClientForPlatform(ctx context.Context, buildersMap map[string]string, bu
if err != nil {
return nil, fmt.Errorf("failed to parse platform: %s", err)
}
dr := newDockerRunner(false)
dr := NewDockerRunner(false)
builderName := getBuilderForPlatform(p.Architecture, buildersMap)
client, err := dr.builder(ctx, builderName, builderImage, builderConfigPath, platform, false)
client, err := dr.Builder(ctx, builderName, builderImage, builderConfigPath, platform, false)
if err != nil {
return nil, fmt.Errorf("unable to ensure builder container: %v", err)
}

View File

@@ -0,0 +1,2 @@
FROM alpine:3.22

View File

@@ -0,0 +1,7 @@
image: build-args-test
network: true
arches:
- amd64
- arm64
# should error out on this unknown field
unknownField: "abc"

View File

@@ -0,0 +1,19 @@
#!/bin/sh
# SUMMARY: Check that the build-args are correctly passed to Dockerfiles
# LABELS:
# REPEAT:
set -ex
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
# Test code goes here
RESULT=$(linuxkit pkg build --force . 2>&1 || echo FAILED)
if [ "${RESULT}" != "FAILED" ]; then
echo "Build should have failed with invalid yaml, instead was ${RESULT}"
fi
echo "Summary: correctly detected invalid yaml"
exit 0

View File

@@ -0,0 +1,39 @@
unknownField: "abc"
kernel:
image: linuxkit/kernel:6.6.71
cmdline: "console=tty0 console=ttyS0 console=ttyAMA0"
init:
- linuxkit/init:680da6e6f79bb8236a095147d532cd2160e23c9f
- linuxkit/runc:2dfee46421e963d6c0d946137e46fe36fa606d29
- linuxkit/containerd:838b745e38e43309393675ce3cf04bee9047eb91
- linuxkit/ca-certificates:a4f15fe71bb0ad7560ff78f48504dd2af500a442
onboot:
- name: sysctl
image: linuxkit/sysctl:2fad4cdf96faa97bf7888696b8c3ca00f98137af
- name: dhcpcd
image: linuxkit/dhcpcd:4681273eeea47c26d980958656e60fe70d49e318
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
onshutdown:
- name: shutdown
image: busybox:latest
command: ["/bin/echo", "so long and thanks for all the fish"]
services:
- name: getty
image: linuxkit/getty:37a16fb37f56ad0aee6532c1a39d780416f7fb80
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:80f22b0f60d23c29ce28d06674bc77fe3775a38b
- name: nginx
image: nginx:1.19.5-alpine
capabilities:
- CAP_NET_BIND_SERVICE
- CAP_CHOWN
- CAP_SETUID
- CAP_SETGID
- CAP_DAC_OVERRIDE
binds:
- /etc/resolv.conf:/etc/resolv.conf
files:
- path: etc/linuxkit-config
metadata: yaml

View File

@@ -0,0 +1,19 @@
#!/bin/sh
# SUMMARY: Check that the build-args are correctly passed to Dockerfiles
# LABELS:
# REPEAT:
set -ex
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
# Test code goes here
RESULT=$(linuxkit build linuxkit.yaml 2>&1 || echo FAILED)
if [ "${RESULT}" != "FAILED" ]; then
echo "Build should have failed with invalid yaml, instead was ${RESULT}"
fi
echo "Summary: correctly detected invalid yaml"
exit 0

View File

@@ -0,0 +1 @@
docker-config/

View File

@@ -0,0 +1,2 @@
FROM alpine:3.21
RUN echo hi

View File

@@ -0,0 +1,2 @@
org: linuxkit
image: builder-config

View File

@@ -0,0 +1,18 @@
# it does not matter what these contents are, as long as they are valid and can be processed
# and are different than the ones in buildkitd.toml
debug = true
# trace = true
insecure-entitlements = [ "network.host", "security.insecure" ]
[worker.oci]
max-parallelism = 48
[worker.oci.gcpulimits]
enabled = true
[log]
# log formatter: json or text
format = "json"
[registry."172.17.0.2:5001"]
insecure = true
http = true

View File

@@ -0,0 +1,17 @@
# it does not matter what these contents are, as long as they are valid and can be processed
debug = true
# trace = true
insecure-entitlements = [ "network.host", "security.insecure" ]
[worker.oci]
max-parallelism = 56
[worker.oci.gcpulimits]
enabled = false
[log]
# log formatter: json or text
format = "text"
[registry."172.17.0.2:5000"]
insecure = true
http = true

View File

@@ -0,0 +1,69 @@
#!/bin/sh
# SUMMARY: Check that we can access a registry with auth
# LABELS:
set -e
# Source libraries. Uncomment if needed/defined
#. "${RT_LIB}"
. "${RT_PROJECT_ROOT}/_lib/lib.sh"
clean_up() {
[ -n "${CACHEDIR}" ] && rm -rf "${CACHEDIR}"
}
trap clean_up EXIT
# determine platform
ARCH=$(uname -m)
if [ "${ARCH}" = "x86_64" ]; then
ARCH="amd64"
elif [ "${ARCH}" = "aarch64" ]; then
ARCH="arm64"
fi
PLATFORM="linux/${ARCH}"
CACHEDIR=$(mktemp -d)
# tests:
# 1. build the local package with the custom buildkitd.toml - should succeed
# 2. rebuild the local package with the same buildkitd.toml - should succeed without starting a new builder container
# 3. rebuild the local package with the different buildkitd-2.toml - should succeed after starting a new builder container
if ! linuxkit --verbose 3 --cache "${CACHEDIR}" pkg build --platforms "${PLATFORM}" \
--builder-config "$(pwd)/buildkitd.toml" --force \
.; then
echo "Build 1 failed"
exit 1
fi
CID1=$(docker inspect linuxkit-builder --format '{{.ID}}')
# get the containerd
if ! linuxkit --verbose 3 --cache "${CACHEDIR}" pkg build --platforms "${PLATFORM}" \
--builder-config "$(pwd)/buildkitd.toml" --force \
.; then
echo "Build 2 failed"
exit 1
fi
CID2=$(docker inspect linuxkit-builder --format '{{.ID}}')
if ! linuxkit --verbose 3 --cache "${CACHEDIR}" pkg build --platforms "${PLATFORM}" \
--builder-config "$(pwd)/buildkitd-2.toml" --force \
.; then
echo "Build 3 failed"
exit 1
fi
CID3=$(docker inspect linuxkit-builder --format '{{.ID}}')
# CID1 and CID2 should match, CID3 should not
echo "CID1: ${CID1}"
echo "CID2: ${CID2}"
echo "CID3: ${CID3}"
if [ "${CID1}" = "${CID2}" ] && [ "${CID2}" != "${CID3}" ]; then
echo "Build 1 and 2 used the same builder container, but Build 3 used a different one"
else
echo "Unexpected builder container behavior"
exit 1
fi
exit 0