Update gomod and vendor

This commit is contained in:
Ettore Di Giacinto
2021-01-20 12:36:07 +01:00
parent 163f93067c
commit c24a3a35f1
149 changed files with 6 additions and 16940 deletions

View File

@@ -1,57 +0,0 @@
package client
import (
"os"
"path/filepath"
"github.com/containerd/containerd/snapshots/overlay"
"github.com/genuinetools/img/types"
"github.com/moby/buildkit/control"
"github.com/moby/buildkit/session"
"github.com/sirupsen/logrus"
)
// Client holds the information for the client we will use for communicating
// with the buildkit controller.
type Client struct {
backend string
localDirs map[string]string
root string
sessionManager *session.Manager
controller *control.Controller
}
// New returns a new client for communicating with the buildkit controller.
func New(root, backend string, localDirs map[string]string) (*Client, error) {
// Set the name for the directory executor.
name := "runc"
switch backend {
case types.AutoBackend:
if overlay.Supported(root) == nil {
backend = types.OverlayFSBackend
} else {
backend = types.NativeBackend
}
logrus.Debugf("using backend: %s", backend)
}
// Create the root/
root = filepath.Join(root, name, backend)
if err := os.MkdirAll(root, 0700); err != nil {
return nil, err
}
// Create the start of the client.
return &Client{
backend: backend,
root: root,
localDirs: localDirs,
}, nil
}
// Close safely closes the client.
// This used to shut down the FUSE server but since that was removed
// it is basically a no-op now.
func (c *Client) Close() {}

View File

@@ -1,83 +0,0 @@
package client
import (
"fmt"
"path/filepath"
"github.com/containerd/containerd/remotes/docker"
"github.com/moby/buildkit/cache/remotecache"
inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline"
localremotecache "github.com/moby/buildkit/cache/remotecache/local"
registryremotecache "github.com/moby/buildkit/cache/remotecache/registry"
"github.com/moby/buildkit/control"
"github.com/moby/buildkit/frontend"
"github.com/moby/buildkit/frontend/dockerfile/builder"
"github.com/moby/buildkit/frontend/gateway"
"github.com/moby/buildkit/frontend/gateway/forwarder"
"github.com/moby/buildkit/solver/bboltcachestorage"
"github.com/moby/buildkit/worker"
"github.com/moby/buildkit/worker/base"
)
func (c *Client) createController() error {
sm, err := c.getSessionManager()
if err != nil {
return fmt.Errorf("creating session manager failed: %v", err)
}
// Create the worker opts.
opt, err := c.createWorkerOpt(true)
if err != nil {
return fmt.Errorf("creating worker opt failed: %v", err)
}
// Create the new worker.
w, err := base.NewWorker(opt)
if err != nil {
return fmt.Errorf("creating worker failed: %v", err)
}
// Create the worker controller.
wc := &worker.Controller{}
if err := wc.Add(w); err != nil {
return fmt.Errorf("adding worker to worker controller failed: %v", err)
}
// Add the frontends.
frontends := map[string]frontend.Frontend{}
frontends["dockerfile.v0"] = forwarder.NewGatewayForwarder(wc, builder.Build)
frontends["gateway.v0"] = gateway.NewGatewayFrontend(wc)
// Create the cache storage
cacheStorage, err := bboltcachestorage.NewStore(filepath.Join(c.root, "cache.db"))
if err != nil {
return err
}
remoteCacheExporterFuncs := map[string]remotecache.ResolveCacheExporterFunc{
"inline": inlineremotecache.ResolveCacheExporterFunc(),
"local": localremotecache.ResolveCacheExporterFunc(sm),
"registry": registryremotecache.ResolveCacheExporterFunc(sm, docker.ConfigureDefaultRegistries()),
}
remoteCacheImporterFuncs := map[string]remotecache.ResolveCacheImporterFunc{
"local": localremotecache.ResolveCacheImporterFunc(sm),
"registry": registryremotecache.ResolveCacheImporterFunc(sm, opt.ContentStore, docker.ConfigureDefaultRegistries()),
}
// Create the controller.
controller, err := control.NewController(control.Opt{
SessionManager: sm,
WorkerController: wc,
Frontends: frontends,
ResolveCacheExporterFuncs: remoteCacheExporterFuncs,
ResolveCacheImporterFuncs: remoteCacheImporterFuncs,
CacheKeyStorage: cacheStorage,
})
if err != nil {
return fmt.Errorf("creating new controller failed: %v", err)
}
// Set the controller for the client.
c.controller = controller
return nil
}

View File

@@ -1,26 +0,0 @@
package client
import (
"context"
"fmt"
controlapi "github.com/moby/buildkit/api/services/control"
)
// DiskUsage returns the disk usage being consumed by the buildkit controller.
func (c *Client) DiskUsage(ctx context.Context, req *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) {
if c.controller == nil {
// Create the controller.
if err := c.createController(); err != nil {
return nil, err
}
}
// Call diskusage.
resp, err := c.controller.DiskUsage(ctx, req)
if err != nil {
return nil, fmt.Errorf("getting disk usage failed: %v", err)
}
return resp, nil
}

View File

@@ -1,67 +0,0 @@
package client
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/images"
ctdmetadata "github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/platforms"
bolt "go.etcd.io/bbolt"
)
// ListedImage represents an image structure returuned from ListImages.
// It extends containerd/images.Image with extra fields.
type ListedImage struct {
images.Image
ContentSize int64
}
// ListImages returns the images from the image store.
func (c *Client) ListImages(ctx context.Context, filters ...string) ([]ListedImage, error) {
dbPath := filepath.Join(c.root, "containerdmeta.db")
if _, err := os.Stat(dbPath); os.IsNotExist(err) {
// The metadata database does not exist so we should just return as if there
// were no results.
return nil, nil
}
// Open the bolt database for metadata.
// Since we are only listing we can open it as read-only.
db, err := bolt.Open(dbPath, 0644, &bolt.Options{ReadOnly: true})
if err != nil {
return nil, fmt.Errorf("opening boltdb failed: %v", err)
}
// Create the content store locally.
contentStore, err := local.NewStore(filepath.Join(c.root, "content"))
if err != nil {
return nil, fmt.Errorf("creating content store failed: %v", err)
}
// Create the database for metadata.
mdb := ctdmetadata.NewDB(db, contentStore, nil)
// Create the image store.
imageStore := ctdmetadata.NewImageStore(mdb)
// List the images in the image store.
i, err := imageStore.List(ctx, filters...)
if err != nil {
return nil, fmt.Errorf("listing images with filters (%s) failed: %v", strings.Join(filters, ", "), err)
}
listedImages := []ListedImage{}
for _, image := range i {
size, err := image.Size(ctx, contentStore, platforms.Default())
if err != nil {
return nil, fmt.Errorf("calculating size of image %s failed: %v", image.Name, err)
}
listedImages = append(listedImages, ListedImage{Image: image, ContentSize: size})
}
return listedImages, nil
}

View File

@@ -1,61 +0,0 @@
package client
import (
"context"
"fmt"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/worker/base"
"golang.org/x/sync/errgroup"
)
// Prune calls Prune on the worker.
func (c *Client) Prune(ctx context.Context) ([]*controlapi.UsageRecord, error) {
ch := make(chan client.UsageInfo)
// Create the worker opts.
opt, err := c.createWorkerOpt(false)
if err != nil {
return nil, fmt.Errorf("creating worker opt failed: %v", err)
}
// Create the new worker.
w, err := base.NewWorker(opt)
if err != nil {
return nil, fmt.Errorf("creating worker failed: %v", err)
}
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
// Call prune on the worker.
return w.Prune(ctx, ch)
})
eg2, ctx := errgroup.WithContext(ctx)
eg2.Go(func() error {
defer close(ch)
return eg.Wait()
})
usage := []*controlapi.UsageRecord{}
eg2.Go(func() error {
for r := range ch {
usage = append(usage, &controlapi.UsageRecord{
ID: r.ID,
Mutable: r.Mutable,
InUse: r.InUse,
Size_: r.Size,
Parent: r.Parent,
UsageCount: int64(r.UsageCount),
Description: r.Description,
CreatedAt: r.CreatedAt,
LastUsedAt: r.LastUsedAt,
})
}
return nil
})
return usage, eg2.Wait()
}

View File

@@ -1,117 +0,0 @@
package client
import (
"context"
"fmt"
"github.com/containerd/containerd/platforms"
"github.com/docker/distribution/reference"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/exporter"
imageexporter "github.com/moby/buildkit/exporter/containerimage"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/source/containerimage"
)
// Pull retrieves an image from a remote registry.
func (c *Client) Pull(ctx context.Context, image string) (*ListedImage, error) {
sm, err := c.getSessionManager()
if err != nil {
return nil, err
}
// Parse the image name and tag.
named, err := reference.ParseNormalizedNamed(image)
if err != nil {
return nil, fmt.Errorf("parsing image name %q failed: %v", image, err)
}
// Add the latest lag if they did not provide one.
named = reference.TagNameOnly(named)
image = named.String()
// Get the identifier for the image.
identifier, err := source.NewImageIdentifier(image)
if err != nil {
return nil, err
}
// Create the worker opts.
opt, err := c.createWorkerOpt(false)
if err != nil {
return nil, fmt.Errorf("creating worker opt failed: %v", err)
}
cm, err := cache.NewManager(cache.ManagerOpt{
Snapshotter: opt.Snapshotter,
MetadataStore: opt.MetadataStore,
ContentStore: opt.ContentStore,
LeaseManager: opt.LeaseManager,
GarbageCollect: opt.GarbageCollect,
Applier: opt.Applier,
})
if err != nil {
return nil, err
}
// Create the source for the pull.
srcOpt := containerimage.SourceOpt{
Snapshotter: opt.Snapshotter,
ContentStore: opt.ContentStore,
Applier: opt.Applier,
CacheAccessor: cm,
ImageStore: opt.ImageStore,
RegistryHosts: opt.RegistryHosts,
LeaseManager: opt.LeaseManager,
}
src, err := containerimage.NewSource(srcOpt)
if err != nil {
return nil, err
}
s, err := src.Resolve(ctx, identifier, sm)
if err != nil {
return nil, err
}
ref, err := s.Snapshot(ctx)
if err != nil {
return nil, err
}
// Create the exporter for the pull.
iw, err := imageexporter.NewImageWriter(imageexporter.WriterOpt{
Snapshotter: opt.Snapshotter,
ContentStore: opt.ContentStore,
Differ: opt.Differ,
})
if err != nil {
return nil, err
}
expOpt := imageexporter.Opt{
SessionManager: sm,
ImageWriter: iw,
Images: opt.ImageStore,
RegistryHosts: opt.RegistryHosts,
LeaseManager: opt.LeaseManager,
}
exp, err := imageexporter.New(expOpt)
if err != nil {
return nil, err
}
e, err := exp.Resolve(ctx, map[string]string{"name": image})
if err != nil {
return nil, err
}
if _, err := e.Export(ctx, exporter.Source{Ref: ref}); err != nil {
return nil, err
}
// Get the image.
img, err := opt.ImageStore.Get(ctx, image)
if err != nil {
return nil, fmt.Errorf("getting image %s from image store failed: %v", image, err)
}
size, err := img.Size(ctx, opt.ContentStore, platforms.Default())
if err != nil {
return nil, fmt.Errorf("calculating size of image %s failed: %v", img.Name, err)
}
return &ListedImage{Image: img, ContentSize: size}, nil
}

View File

@@ -1,38 +0,0 @@
package client
import (
"context"
"fmt"
"github.com/docker/distribution/reference"
"github.com/moby/buildkit/util/push"
)
// Push sends an image to a remote registry.
func (c *Client) Push(ctx context.Context, image string, insecure bool) error {
// Parse the image name and tag.
named, err := reference.ParseNormalizedNamed(image)
if err != nil {
return fmt.Errorf("parsing image name %q failed: %v", image, err)
}
// Add the latest lag if they did not provide one.
named = reference.TagNameOnly(named)
image = named.String()
// Create the worker opts.
opt, err := c.createWorkerOpt(false)
if err != nil {
return fmt.Errorf("creating worker opt failed: %v", err)
}
imgObj, err := opt.ImageStore.Get(ctx, image)
if err != nil {
return fmt.Errorf("getting image %q failed: %v", image, err)
}
sm, err := c.getSessionManager()
if err != nil {
return err
}
return push.Push(ctx, sm, opt.ContentStore, imgObj.Target.Digest, image, insecure, opt.RegistryHosts, false)
}

View File

@@ -1,34 +0,0 @@
package client
import (
"context"
"fmt"
"github.com/containerd/containerd/images"
"github.com/docker/distribution/reference"
)
// RemoveImage removes image from the image store.
func (c *Client) RemoveImage(ctx context.Context, image string) error {
named, err := reference.ParseNormalizedNamed(image)
if err != nil {
return fmt.Errorf("parsing image name %q failed: %v", image, err)
}
// Add the latest lag if they did not provide one.
named = reference.TagNameOnly(named)
image = named.String()
// Create the worker opts.
opt, err := c.createWorkerOpt(false)
if err != nil {
return fmt.Errorf("creating worker opt failed: %v", err)
}
// Remove the image from the image store.
err = opt.ImageStore.Delete(ctx, image, images.SynchronousDelete())
if err != nil {
return fmt.Errorf("removing image failed: %v", err)
}
return nil
}

View File

@@ -1,53 +0,0 @@
package client
import (
"context"
"errors"
"fmt"
"io"
"github.com/containerd/containerd/images/archive"
"github.com/docker/distribution/reference"
)
// SaveImage exports an image as a tarball which can then be imported by docker.
func (c *Client) SaveImage(ctx context.Context, image, format string, writer io.WriteCloser) error {
// Parse the image name and tag.
named, err := reference.ParseNormalizedNamed(image)
if err != nil {
return fmt.Errorf("parsing image name %q failed: %v", image, err)
}
// Add the latest lag if they did not provide one.
named = reference.TagNameOnly(named)
image = named.String()
// Create the worker opts.
opt, err := c.createWorkerOpt(false)
if err != nil {
return fmt.Errorf("creating worker opt failed: %v", err)
}
if opt.ImageStore == nil {
return errors.New("image store is nil")
}
exportOpts := []archive.ExportOpt{
archive.WithImage(opt.ImageStore, image),
}
switch format {
case "docker":
case "oci":
exportOpts = append(exportOpts, archive.WithSkipDockerManifest())
default:
return fmt.Errorf("%q is not a valid format", format)
}
if err := archive.Export(ctx, opt.ContentStore, writer, exportOpts...); err != nil {
return fmt.Errorf("exporting image %s failed: %v", image, err)
}
return writer.Close()
}

View File

@@ -1,49 +0,0 @@
package client
import (
"context"
"os"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/session/testutil"
"github.com/pkg/errors"
)
func (c *Client) getSessionManager() (*session.Manager, error) {
if c.sessionManager == nil {
var err error
c.sessionManager, err = session.NewManager()
if err != nil {
return nil, err
}
}
return c.sessionManager, nil
}
// Session creates the session manager and returns the session and it's
// dialer.
func (c *Client) Session(ctx context.Context) (*session.Session, session.Dialer, error) {
m, err := c.getSessionManager()
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create session manager")
}
sessionName := "img"
s, err := session.NewSession(ctx, sessionName, "")
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create session")
}
syncedDirs := make([]filesync.SyncedDir, 0, len(c.localDirs))
for name, d := range c.localDirs {
syncedDirs = append(syncedDirs, filesync.SyncedDir{Name: name, Dir: d})
}
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
s.Allow(authprovider.NewDockerAuthProvider(os.Stderr))
return s, sessionDialer(s, m), err
}
func sessionDialer(s *session.Session, m *session.Manager) session.Dialer {
// FIXME: rename testutil
return session.Dialer(testutil.TestStream(testutil.Handler(m.HandleConn)))
}

View File

@@ -1,68 +0,0 @@
package client
import (
"context"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
)
// Solve calls Solve on the controller.
func (c *Client) Solve(ctx context.Context, req *controlapi.SolveRequest, ch chan *controlapi.StatusResponse) error {
defer close(ch)
if c.controller == nil {
// Create the controller.
if err := c.createController(); err != nil {
return err
}
}
statusCtx, cancelStatus := context.WithCancel(context.Background())
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
defer func() { // make sure the Status ends cleanly on build errors
go func() {
<-time.After(3 * time.Second)
cancelStatus()
}()
}()
_, err := c.controller.Solve(ctx, req)
if err != nil {
return errors.Wrap(err, "failed to solve")
}
return nil
})
eg.Go(func() error {
srv := &controlStatusServer{
ctx: statusCtx,
ch: ch,
}
return c.controller.Status(&controlapi.StatusRequest{
Ref: req.Ref,
}, srv)
})
return eg.Wait()
}
type controlStatusServer struct {
ctx context.Context
ch chan *controlapi.StatusResponse
grpc.ServerStream // dummy
}
func (x *controlStatusServer) SendMsg(m interface{}) error {
return x.Send(m.(*controlapi.StatusResponse))
}
func (x *controlStatusServer) Send(m *controlapi.StatusResponse) error {
x.ch <- m
return nil
}
func (x *controlStatusServer) Context() context.Context {
return x.ctx
}

View File

@@ -1,68 +0,0 @@
package client
import (
"context"
"errors"
"fmt"
"time"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/docker/distribution/reference"
)
// TagImage creates a reference to an image with a specific name in the image store.
func (c *Client) TagImage(ctx context.Context, src, dest string) error {
// Parse the image name and tag for the src image.
named, err := reference.ParseNormalizedNamed(src)
if err != nil {
return fmt.Errorf("parsing image name %q failed: %v", src, err)
}
// Add the latest lag if they did not provide one.
named = reference.TagNameOnly(named)
src = named.String()
// Parse the image name and tag for the dest image.
named, err = reference.ParseNormalizedNamed(dest)
if err != nil {
return fmt.Errorf("parsing image name %q failed: %v", dest, err)
}
// Add the latest lag if they did not provide one.
named = reference.TagNameOnly(named)
dest = named.String()
// Create the worker opts.
opt, err := c.createWorkerOpt(false)
if err != nil {
return fmt.Errorf("creating worker opt failed: %v", err)
}
if opt.ImageStore == nil {
return errors.New("image store is nil")
}
// Get the source image.
image, err := opt.ImageStore.Get(ctx, src)
if err != nil {
return fmt.Errorf("getting image %s from image store failed: %v", src, err)
}
// Update the target image. Create it if it does not exist.
img := images.Image{
Name: dest,
Target: image.Target,
CreatedAt: time.Now(),
}
if _, err := opt.ImageStore.Update(ctx, img); err != nil {
if !errdefs.IsNotFound(err) {
return fmt.Errorf("updating image store for %s failed: %v", dest, err)
}
// Create it if we didn't find it.
if _, err := opt.ImageStore.Create(ctx, img); err != nil {
return fmt.Errorf("creating image in image store for %s failed: %v", dest, err)
}
}
return nil
}

View File

@@ -1,75 +0,0 @@
package client
import (
"context"
"errors"
"fmt"
"os"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/docker/distribution/reference"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
)
// Unpack exports an image to a rootfs destination directory.
func (c *Client) Unpack(ctx context.Context, image, dest string) error {
if len(dest) < 1 {
return errors.New("destination directory for rootfs cannot be empty")
}
if _, err := os.Stat(dest); err == nil {
return fmt.Errorf("destination directory already exists: %s", dest)
}
// Parse the image name and tag.
named, err := reference.ParseNormalizedNamed(image)
if err != nil {
return fmt.Errorf("parsing image name %q failed: %v", image, err)
}
// Add the latest lag if they did not provide one.
named = reference.TagNameOnly(named)
image = named.String()
// Create the worker opts.
opt, err := c.createWorkerOpt(true)
if err != nil {
return fmt.Errorf("creating worker opt failed: %v", err)
}
if opt.ImageStore == nil {
return errors.New("image store is nil")
}
img, err := opt.ImageStore.Get(ctx, image)
if err != nil {
return fmt.Errorf("getting image %s from image store failed: %v", image, err)
}
manifest, err := images.Manifest(ctx, opt.ContentStore, img.Target, platforms.Default())
if err != nil {
return fmt.Errorf("getting image manifest failed: %v", err)
}
for _, desc := range manifest.Layers {
logrus.Debugf("Unpacking layer %s", desc.Digest.String())
// Read the blob from the content store.
layer, err := opt.ContentStore.ReaderAt(ctx, desc)
if err != nil {
return fmt.Errorf("getting reader for digest %s failed: %v", desc.Digest.String(), err)
}
// Unpack the tarfile to the rootfs path.
// FROM: https://godoc.org/github.com/moby/moby/pkg/archive#TarOptions
if err := archive.Untar(content.NewReader(layer), dest, &archive.TarOptions{
NoLchown: true,
}); err != nil {
return fmt.Errorf("extracting tar for %s to directory %s failed: %v", desc.Digest.String(), dest, err)
}
}
return nil
}

View File

@@ -1,156 +0,0 @@
package client
import (
"context"
"fmt"
"github.com/containerd/containerd/remotes/docker"
"github.com/moby/buildkit/util/leaseutil"
"os/exec"
"path/filepath"
"syscall"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/diff/apply"
"github.com/containerd/containerd/diff/walking"
ctdmetadata "github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/platforms"
ctdsnapshot "github.com/containerd/containerd/snapshots"
"github.com/containerd/containerd/snapshots/native"
"github.com/containerd/containerd/snapshots/overlay"
"github.com/genuinetools/img/types"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/executor"
executoroci "github.com/moby/buildkit/executor/oci"
"github.com/moby/buildkit/executor/runcexecutor"
containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
"github.com/moby/buildkit/util/binfmt_misc"
"github.com/moby/buildkit/util/network/netproviders"
"github.com/moby/buildkit/worker/base"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/sirupsen/logrus"
bolt "go.etcd.io/bbolt"
)
// createWorkerOpt creates a base.WorkerOpt to be used for a new worker.
func (c *Client) createWorkerOpt(withExecutor bool) (opt base.WorkerOpt, err error) {
// Create the metadata store.
md, err := metadata.NewStore(filepath.Join(c.root, "metadata.db"))
if err != nil {
return opt, err
}
snapshotRoot := filepath.Join(c.root, "snapshots")
unprivileged := system.GetParentNSeuid() != 0
// Create the snapshotter.
var (
s ctdsnapshot.Snapshotter
)
switch c.backend {
case types.NativeBackend:
s, err = native.NewSnapshotter(snapshotRoot)
case types.OverlayFSBackend:
// On some distros such as Ubuntu overlayfs can be mounted without privileges
s, err = overlay.NewSnapshotter(snapshotRoot)
default:
// "auto" backend needs to be already resolved on Client instantiation
return opt, fmt.Errorf("%s is not a valid snapshots backend", c.backend)
}
if err != nil {
return opt, fmt.Errorf("creating %s snapshotter failed: %v", c.backend, err)
}
var exe executor.Executor
if withExecutor {
exeOpt := runcexecutor.Opt{
Root: filepath.Join(c.root, "executor"),
Rootless: unprivileged,
ProcessMode: processMode(),
}
np, err := netproviders.Providers(netproviders.Opt{Mode: "auto"})
if err != nil {
return base.WorkerOpt{}, err
}
exe, err = runcexecutor.New(exeOpt, np)
if err != nil {
return opt, err
}
}
// Create the content store locally.
contentStore, err := local.NewStore(filepath.Join(c.root, "content"))
if err != nil {
return opt, err
}
// Open the bolt database for metadata.
db, err := bolt.Open(filepath.Join(c.root, "containerdmeta.db"), 0644, nil)
if err != nil {
return opt, err
}
// Create the new database for metadata.
mdb := ctdmetadata.NewDB(db, contentStore, map[string]ctdsnapshot.Snapshotter{
c.backend: s,
})
if err := mdb.Init(context.TODO()); err != nil {
return opt, err
}
// Create the image store.
imageStore := ctdmetadata.NewImageStore(mdb)
contentStore = containerdsnapshot.NewContentStore(mdb.ContentStore(), "buildkit")
id, err := base.ID(c.root)
if err != nil {
return opt, err
}
xlabels := base.Labels("oci", c.backend)
var supportedPlatforms []specs.Platform
for _, p := range binfmt_misc.SupportedPlatforms(false) {
parsed, err := platforms.Parse(p)
if err != nil {
return opt, err
}
supportedPlatforms = append(supportedPlatforms, platforms.Normalize(parsed))
}
opt = base.WorkerOpt{
ID: id,
Labels: xlabels,
MetadataStore: md,
Executor: exe,
Snapshotter: containerdsnapshot.NewSnapshotter(c.backend, mdb.Snapshotter(c.backend), "buildkit", nil),
ContentStore: contentStore,
Applier: apply.NewFileSystemApplier(contentStore),
Differ: walking.NewWalkingDiff(contentStore),
ImageStore: imageStore,
Platforms: supportedPlatforms,
RegistryHosts: docker.ConfigureDefaultRegistries(),
LeaseManager: leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), "buildkit"),
GarbageCollect: mdb.GarbageCollect,
}
return opt, err
}
func processMode() executoroci.ProcessMode {
mountArgs := []string{"-t", "proc", "none", "/proc"}
cmd := exec.Command("mount", mountArgs...)
cmd.SysProcAttr = &syscall.SysProcAttr{
Pdeathsig: syscall.SIGKILL,
Cloneflags: syscall.CLONE_NEWPID,
Unshareflags: syscall.CLONE_NEWNS,
}
if b, err := cmd.CombinedOutput(); err != nil {
logrus.Warnf("Process sandbox is not available, consider unmasking procfs: %v", string(b))
return executoroci.NoProcessSandbox
}
return executoroci.ProcessSandbox
}