mirror of
https://github.com/mudler/luet.git
synced 2025-09-10 11:39:35 +00:00
Update gomod and vendor
This commit is contained in:
905
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
Normal file
905
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
Normal file
@@ -0,0 +1,905 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/secrets"
|
||||
"github.com/moby/buildkit/session/sshforward"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/progress/logs"
|
||||
utilsystem "github.com/moby/buildkit/util/system"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const execCacheType = "buildkit.exec.v0"
|
||||
|
||||
type execOp struct {
|
||||
op *pb.ExecOp
|
||||
cm cache.Manager
|
||||
sm *session.Manager
|
||||
md *metadata.Store
|
||||
exec executor.Executor
|
||||
w worker.Worker
|
||||
platform *pb.Platform
|
||||
numInputs int
|
||||
|
||||
cacheMounts map[string]*cacheRefShare
|
||||
cacheMountsMu sync.Mutex
|
||||
}
|
||||
|
||||
func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
|
||||
if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &execOp{
|
||||
op: op.Exec,
|
||||
cm: cm,
|
||||
sm: sm,
|
||||
md: md,
|
||||
exec: exec,
|
||||
numInputs: len(v.Inputs()),
|
||||
w: w,
|
||||
platform: platform,
|
||||
cacheMounts: map[string]*cacheRefShare{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cloneExecOp(old *pb.ExecOp) pb.ExecOp {
|
||||
n := *old
|
||||
meta := *n.Meta
|
||||
meta.ExtraHosts = nil
|
||||
for i := range n.Meta.ExtraHosts {
|
||||
h := *n.Meta.ExtraHosts[i]
|
||||
meta.ExtraHosts = append(meta.ExtraHosts, &h)
|
||||
}
|
||||
n.Meta = &meta
|
||||
n.Mounts = nil
|
||||
for i := range n.Mounts {
|
||||
m := *n.Mounts[i]
|
||||
n.Mounts = append(n.Mounts, &m)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (e *execOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) {
|
||||
op := cloneExecOp(e.op)
|
||||
for i := range op.Meta.ExtraHosts {
|
||||
h := op.Meta.ExtraHosts[i]
|
||||
h.IP = ""
|
||||
op.Meta.ExtraHosts[i] = h
|
||||
}
|
||||
for i := range op.Mounts {
|
||||
op.Mounts[i].Selector = ""
|
||||
}
|
||||
op.Meta.ProxyEnv = nil
|
||||
|
||||
p := platforms.DefaultSpec()
|
||||
if e.platform != nil {
|
||||
p = specs.Platform{
|
||||
OS: e.platform.OS,
|
||||
Architecture: e.platform.Architecture,
|
||||
Variant: e.platform.Variant,
|
||||
}
|
||||
}
|
||||
|
||||
dt, err := json.Marshal(struct {
|
||||
Type string
|
||||
Exec *pb.ExecOp
|
||||
OS string
|
||||
Arch string
|
||||
Variant string `json:",omitempty"`
|
||||
}{
|
||||
Type: execCacheType,
|
||||
Exec: &op,
|
||||
OS: p.OS,
|
||||
Arch: p.Architecture,
|
||||
Variant: p.Variant,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
cm := &solver.CacheMap{
|
||||
Digest: digest.FromBytes(dt),
|
||||
Deps: make([]struct {
|
||||
Selector digest.Digest
|
||||
ComputeDigestFunc solver.ResultBasedCacheFunc
|
||||
}, e.numInputs),
|
||||
}
|
||||
|
||||
deps, err := e.getMountDeps()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
for i, dep := range deps {
|
||||
if len(dep.Selectors) != 0 {
|
||||
dgsts := make([][]byte, 0, len(dep.Selectors))
|
||||
for _, p := range dep.Selectors {
|
||||
dgsts = append(dgsts, []byte(p))
|
||||
}
|
||||
cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0}))
|
||||
}
|
||||
if !dep.NoContentBasedHash {
|
||||
cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors)))
|
||||
}
|
||||
}
|
||||
|
||||
return cm, true, nil
|
||||
}
|
||||
|
||||
func dedupePaths(inp []string) []string {
|
||||
old := make(map[string]struct{}, len(inp))
|
||||
for _, p := range inp {
|
||||
old[p] = struct{}{}
|
||||
}
|
||||
paths := make([]string, 0, len(old))
|
||||
for p1 := range old {
|
||||
var skip bool
|
||||
for p2 := range old {
|
||||
if p1 != p2 && strings.HasPrefix(p1, p2+"/") {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
paths = append(paths, p1)
|
||||
}
|
||||
}
|
||||
sort.Slice(paths, func(i, j int) bool {
|
||||
return paths[i] < paths[j]
|
||||
})
|
||||
return paths
|
||||
}
|
||||
|
||||
func toSelectors(p []string) []llbsolver.Selector {
|
||||
sel := make([]llbsolver.Selector, 0, len(p))
|
||||
for _, p := range p {
|
||||
sel = append(sel, llbsolver.Selector{Path: p, FollowLinks: true})
|
||||
}
|
||||
return sel
|
||||
}
|
||||
|
||||
type dep struct {
|
||||
Selectors []string
|
||||
NoContentBasedHash bool
|
||||
}
|
||||
|
||||
func (e *execOp) getMountDeps() ([]dep, error) {
|
||||
deps := make([]dep, e.numInputs)
|
||||
for _, m := range e.op.Mounts {
|
||||
if m.Input == pb.Empty {
|
||||
continue
|
||||
}
|
||||
if int(m.Input) >= len(deps) {
|
||||
return nil, errors.Errorf("invalid mountinput %v", m)
|
||||
}
|
||||
|
||||
sel := m.Selector
|
||||
if sel != "" {
|
||||
sel = path.Join("/", sel)
|
||||
deps[m.Input].Selectors = append(deps[m.Input].Selectors, sel)
|
||||
}
|
||||
|
||||
if (!m.Readonly || m.Dest == pb.RootMount) && m.Output != -1 { // exclude read-only rootfs && read-write mounts
|
||||
deps[m.Input].NoContentBasedHash = true
|
||||
}
|
||||
}
|
||||
return deps, nil
|
||||
}
|
||||
|
||||
func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
|
||||
g := &cacheRefGetter{
|
||||
locker: &e.cacheMountsMu,
|
||||
cacheMounts: e.cacheMounts,
|
||||
cm: e.cm,
|
||||
md: e.md,
|
||||
globalCacheRefs: sharedCacheRefs,
|
||||
name: fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")),
|
||||
}
|
||||
return g.getRefCacheDir(ctx, ref, id, sharing)
|
||||
}
|
||||
|
||||
type cacheRefGetter struct {
|
||||
locker sync.Locker
|
||||
cacheMounts map[string]*cacheRefShare
|
||||
cm cache.Manager
|
||||
md *metadata.Store
|
||||
globalCacheRefs *cacheRefs
|
||||
name string
|
||||
}
|
||||
|
||||
func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
|
||||
key := "cache-dir:" + id
|
||||
if ref != nil {
|
||||
key += ":" + ref.ID()
|
||||
}
|
||||
mu := g.locker
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if ref, ok := g.cacheMounts[key]; ok {
|
||||
return ref.clone(), nil
|
||||
}
|
||||
defer func() {
|
||||
if err == nil {
|
||||
share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}}
|
||||
g.cacheMounts[key] = share
|
||||
mref = share.clone()
|
||||
}
|
||||
}()
|
||||
|
||||
switch sharing {
|
||||
case pb.CacheSharingOpt_SHARED:
|
||||
return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) {
|
||||
return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
|
||||
})
|
||||
case pb.CacheSharingOpt_PRIVATE:
|
||||
return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
|
||||
case pb.CacheSharingOpt_LOCKED:
|
||||
return g.getRefCacheDirNoCache(ctx, key, ref, id, true)
|
||||
default:
|
||||
return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) {
|
||||
makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
|
||||
return g.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain)
|
||||
}
|
||||
|
||||
cacheRefsLocker.Lock(key)
|
||||
defer cacheRefsLocker.Unlock(key)
|
||||
for {
|
||||
sis, err := g.md.Search(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
locked := false
|
||||
for _, si := range sis {
|
||||
if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil {
|
||||
logrus.Debugf("reusing ref for cache dir: %s", mRef.ID())
|
||||
return mRef, nil
|
||||
} else if errors.Cause(err) == cache.ErrLocked {
|
||||
locked = true
|
||||
}
|
||||
}
|
||||
if block && locked {
|
||||
cacheRefsLocker.Unlock(key)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cacheRefsLocker.Lock(key)
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
cacheRefsLocker.Lock(key)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
mRef, err := makeMutable(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
si, _ := g.md.Get(mRef.ID())
|
||||
v, err := metadata.NewValue(key)
|
||||
if err != nil {
|
||||
mRef.Release(context.TODO())
|
||||
return nil, err
|
||||
}
|
||||
v.Index = key
|
||||
if err := si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, key, v)
|
||||
}); err != nil {
|
||||
mRef.Release(context.TODO())
|
||||
return nil, err
|
||||
}
|
||||
return mRef, nil
|
||||
}
|
||||
|
||||
func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) {
|
||||
sessionID := session.FromContext(ctx)
|
||||
if sessionID == "" {
|
||||
return nil, errors.New("could not access local files without session")
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
caller, err := e.sm.Get(timeoutCtx, sessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sshforward.CheckSSHID(ctx, caller, m.SSHOpt.ID); err != nil {
|
||||
if m.SSHOpt.Optional {
|
||||
return nil, nil
|
||||
}
|
||||
if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.Unimplemented {
|
||||
return nil, errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &sshMount{mount: m, caller: caller, idmap: e.cm.IdentityMapping()}, nil
|
||||
}
|
||||
|
||||
type sshMount struct {
|
||||
mount *pb.Mount
|
||||
caller session.Caller
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *sshMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &sshMountInstance{sm: sm, idmap: sm.idmap}, nil
|
||||
}
|
||||
|
||||
type sshMountInstance struct {
|
||||
sm *sshMount
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *sshMountInstance) Mount() ([]mount.Mount, func() error, error) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
uid := int(sm.sm.mount.SSHOpt.Uid)
|
||||
gid := int(sm.sm.mount.SSHOpt.Gid)
|
||||
|
||||
if sm.idmap != nil {
|
||||
identity, err := sm.idmap.ToHost(idtools.Identity{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
uid = identity.UID
|
||||
gid = identity.GID
|
||||
}
|
||||
|
||||
sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{
|
||||
ID: sm.sm.mount.SSHOpt.ID,
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
Mode: int(sm.sm.mount.SSHOpt.Mode & 0777),
|
||||
})
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
release := func() error {
|
||||
var err error
|
||||
if cleanup != nil {
|
||||
err = cleanup()
|
||||
}
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
return []mount.Mount{{
|
||||
Type: "bind",
|
||||
Source: sock,
|
||||
Options: []string{"rbind"},
|
||||
}}, release, nil
|
||||
}
|
||||
|
||||
func (sm *sshMountInstance) IdentityMapping() *idtools.IdentityMapping {
|
||||
return sm.idmap
|
||||
}
|
||||
|
||||
func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) {
|
||||
if m.SecretOpt == nil {
|
||||
return nil, errors.Errorf("invalid sercet mount options")
|
||||
}
|
||||
sopt := *m.SecretOpt
|
||||
|
||||
id := sopt.ID
|
||||
if id == "" {
|
||||
return nil, errors.Errorf("secret ID missing from mount options")
|
||||
}
|
||||
|
||||
sessionID := session.FromContext(ctx)
|
||||
if sessionID == "" {
|
||||
return nil, errors.New("could not access local files without session")
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
caller, err := e.sm.Get(timeoutCtx, sessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dt, err := secrets.GetSecret(ctx, caller, id)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == secrets.ErrNotFound && m.SecretOpt.Optional {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &secretMount{mount: m, data: dt, idmap: e.cm.IdentityMapping()}, nil
|
||||
}
|
||||
|
||||
type secretMount struct {
|
||||
mount *pb.Mount
|
||||
data []byte
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &secretMountInstance{sm: sm, idmap: sm.idmap}, nil
|
||||
}
|
||||
|
||||
type secretMountInstance struct {
|
||||
sm *secretMount
|
||||
root string
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) {
|
||||
dir, err := ioutil.TempDir("", "buildkit-secrets")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create temp dir")
|
||||
}
|
||||
cleanupDir := func() error {
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
if err := os.Chmod(dir, 0711); err != nil {
|
||||
cleanupDir()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
tmpMount := mount.Mount{
|
||||
Type: "tmpfs",
|
||||
Source: "tmpfs",
|
||||
Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())},
|
||||
}
|
||||
|
||||
if system.RunningInUserNS() {
|
||||
tmpMount.Options = nil
|
||||
}
|
||||
|
||||
if err := mount.All([]mount.Mount{tmpMount}, dir); err != nil {
|
||||
cleanupDir()
|
||||
return nil, nil, errors.Wrap(err, "unable to setup secret mount")
|
||||
}
|
||||
sm.root = dir
|
||||
|
||||
cleanup := func() error {
|
||||
if err := mount.Unmount(dir, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return cleanupDir()
|
||||
}
|
||||
|
||||
randID := identity.NewID()
|
||||
fp := filepath.Join(dir, randID)
|
||||
if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
uid := int(sm.sm.mount.SecretOpt.Uid)
|
||||
gid := int(sm.sm.mount.SecretOpt.Gid)
|
||||
|
||||
if sm.idmap != nil {
|
||||
identity, err := sm.idmap.ToHost(idtools.Identity{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
})
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
uid = identity.UID
|
||||
gid = identity.GID
|
||||
}
|
||||
|
||||
if err := os.Chown(fp, uid, gid); err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := os.Chmod(fp, os.FileMode(sm.sm.mount.SecretOpt.Mode&0777)); err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return []mount.Mount{{
|
||||
Type: "bind",
|
||||
Source: fp,
|
||||
Options: []string{"ro", "rbind", "nodev", "nosuid", "noexec"},
|
||||
}}, cleanup, nil
|
||||
}
|
||||
|
||||
func (sm *secretMountInstance) IdentityMapping() *idtools.IdentityMapping {
|
||||
return sm.idmap
|
||||
}
|
||||
|
||||
func addDefaultEnvvar(env []string, k, v string) []string {
|
||||
for _, e := range env {
|
||||
if strings.HasPrefix(e, k+"=") {
|
||||
return env
|
||||
}
|
||||
}
|
||||
return append(env, k+"="+v)
|
||||
}
|
||||
|
||||
func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) {
|
||||
var mounts []executor.Mount
|
||||
var root cache.Mountable
|
||||
var readonlyRootFS bool
|
||||
|
||||
var outputs []cache.Ref
|
||||
|
||||
defer func() {
|
||||
for _, o := range outputs {
|
||||
if o != nil {
|
||||
go o.Release(context.TODO())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// loop over all mounts, fill in mounts, root and outputs
|
||||
for _, m := range e.op.Mounts {
|
||||
var mountable cache.Mountable
|
||||
var ref cache.ImmutableRef
|
||||
|
||||
if m.Dest == pb.RootMount && m.MountType != pb.MountType_BIND {
|
||||
return nil, errors.Errorf("invalid mount type %s for %s", m.MountType.String(), m.Dest)
|
||||
}
|
||||
|
||||
// if mount is based on input validate and load it
|
||||
if m.Input != pb.Empty {
|
||||
if int(m.Input) > len(inputs) {
|
||||
return nil, errors.Errorf("missing input %d", m.Input)
|
||||
}
|
||||
inp := inputs[int(m.Input)]
|
||||
workerRef, ok := inp.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid reference for exec %T", inp.Sys())
|
||||
}
|
||||
ref = workerRef.ImmutableRef
|
||||
mountable = ref
|
||||
}
|
||||
|
||||
makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
|
||||
desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " "))
|
||||
return e.cm.New(ctx, ref, cache.WithDescription(desc))
|
||||
}
|
||||
|
||||
switch m.MountType {
|
||||
case pb.MountType_BIND:
|
||||
// if mount creates an output
|
||||
if m.Output != pb.SkipOutput {
|
||||
// it it is readonly and not root then output is the input
|
||||
if m.Readonly && ref != nil && m.Dest != pb.RootMount {
|
||||
outputs = append(outputs, ref.Clone())
|
||||
} else {
|
||||
// otherwise output and mount is the mutable child
|
||||
active, err := makeMutable(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outputs = append(outputs, active)
|
||||
mountable = active
|
||||
}
|
||||
} else if (!m.Readonly || ref == nil) && m.Dest != pb.RootMount {
|
||||
// this case is empty readonly scratch without output that is not really useful for anything but don't error
|
||||
active, err := makeMutable(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer active.Release(context.TODO())
|
||||
mountable = active
|
||||
}
|
||||
|
||||
case pb.MountType_CACHE:
|
||||
if m.CacheOpt == nil {
|
||||
return nil, errors.Errorf("missing cache mount options")
|
||||
}
|
||||
mRef, err := e.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mountable = mRef
|
||||
defer func() {
|
||||
go mRef.Release(context.TODO())
|
||||
}()
|
||||
if m.Output != pb.SkipOutput && ref != nil {
|
||||
outputs = append(outputs, ref.Clone())
|
||||
}
|
||||
|
||||
case pb.MountType_TMPFS:
|
||||
mountable = newTmpfs(e.cm.IdentityMapping())
|
||||
|
||||
case pb.MountType_SECRET:
|
||||
secretMount, err := e.getSecretMountable(ctx, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secretMount == nil {
|
||||
continue
|
||||
}
|
||||
mountable = secretMount
|
||||
|
||||
case pb.MountType_SSH:
|
||||
sshMount, err := e.getSSHMountable(ctx, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sshMount == nil {
|
||||
continue
|
||||
}
|
||||
mountable = sshMount
|
||||
|
||||
default:
|
||||
return nil, errors.Errorf("mount type %s not implemented", m.MountType)
|
||||
}
|
||||
|
||||
// validate that there is a mount
|
||||
if mountable == nil {
|
||||
return nil, errors.Errorf("mount %s has no input", m.Dest)
|
||||
}
|
||||
|
||||
// if dest is root we need mutable ref even if there is no output
|
||||
if m.Dest == pb.RootMount {
|
||||
root = mountable
|
||||
readonlyRootFS = m.Readonly
|
||||
if m.Output == pb.SkipOutput && readonlyRootFS {
|
||||
active, err := makeMutable(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
go active.Release(context.TODO())
|
||||
}()
|
||||
root = active
|
||||
}
|
||||
} else {
|
||||
mounts = append(mounts, executor.Mount{Src: mountable, Dest: m.Dest, Readonly: m.Readonly, Selector: m.Selector})
|
||||
}
|
||||
}
|
||||
|
||||
// sort mounts so parents are mounted first
|
||||
sort.Slice(mounts, func(i, j int) bool {
|
||||
return mounts[i].Dest < mounts[j].Dest
|
||||
})
|
||||
|
||||
extraHosts, err := parseExtraHosts(e.op.Meta.ExtraHosts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
meta := executor.Meta{
|
||||
Args: e.op.Meta.Args,
|
||||
Env: e.op.Meta.Env,
|
||||
Cwd: e.op.Meta.Cwd,
|
||||
User: e.op.Meta.User,
|
||||
ReadonlyRootFS: readonlyRootFS,
|
||||
ExtraHosts: extraHosts,
|
||||
NetMode: e.op.Network,
|
||||
SecurityMode: e.op.Security,
|
||||
}
|
||||
|
||||
if e.op.Meta.ProxyEnv != nil {
|
||||
meta.Env = append(meta.Env, proxyEnvList(e.op.Meta.ProxyEnv)...)
|
||||
}
|
||||
meta.Env = addDefaultEnvvar(meta.Env, "PATH", utilsystem.DefaultPathEnv)
|
||||
|
||||
stdout, stderr := logs.NewLogStreams(ctx, os.Getenv("BUILDKIT_DEBUG_EXEC_OUTPUT") == "1")
|
||||
defer stdout.Close()
|
||||
defer stderr.Close()
|
||||
|
||||
if err := e.exec.Exec(ctx, meta, root, mounts, nil, stdout, stderr); err != nil {
|
||||
return nil, errors.Wrapf(err, "executor failed running %v", meta.Args)
|
||||
}
|
||||
|
||||
refs := []solver.Result{}
|
||||
for i, out := range outputs {
|
||||
if mutable, ok := out.(cache.MutableRef); ok {
|
||||
ref, err := mutable.Commit(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error committing %s", mutable.ID())
|
||||
}
|
||||
refs = append(refs, worker.NewWorkerRefResult(ref, e.w))
|
||||
} else {
|
||||
refs = append(refs, worker.NewWorkerRefResult(out.(cache.ImmutableRef), e.w))
|
||||
}
|
||||
outputs[i] = nil
|
||||
}
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
func proxyEnvList(p *pb.ProxyEnv) []string {
|
||||
out := []string{}
|
||||
if v := p.HttpProxy; v != "" {
|
||||
out = append(out, "HTTP_PROXY="+v, "http_proxy="+v)
|
||||
}
|
||||
if v := p.HttpsProxy; v != "" {
|
||||
out = append(out, "HTTPS_PROXY="+v, "https_proxy="+v)
|
||||
}
|
||||
if v := p.FtpProxy; v != "" {
|
||||
out = append(out, "FTP_PROXY="+v, "ftp_proxy="+v)
|
||||
}
|
||||
if v := p.NoProxy; v != "" {
|
||||
out = append(out, "NO_PROXY="+v, "no_proxy="+v)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable {
|
||||
return &tmpfs{idmap: idmap}
|
||||
}
|
||||
|
||||
type tmpfs struct {
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil
|
||||
}
|
||||
|
||||
type tmpfsMount struct {
|
||||
readonly bool
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) {
|
||||
opt := []string{"nosuid"}
|
||||
if m.readonly {
|
||||
opt = append(opt, "ro")
|
||||
}
|
||||
return []mount.Mount{{
|
||||
Type: "tmpfs",
|
||||
Source: "tmpfs",
|
||||
Options: opt,
|
||||
}}, func() error { return nil }, nil
|
||||
}
|
||||
|
||||
func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping {
|
||||
return m.idmap
|
||||
}
|
||||
|
||||
var cacheRefsLocker = locker.New()
|
||||
var sharedCacheRefs = &cacheRefs{}
|
||||
|
||||
type cacheRefs struct {
|
||||
mu sync.Mutex
|
||||
shares map[string]*cacheRefShare
|
||||
}
|
||||
|
||||
// ClearActiveCacheMounts clears shared cache mounts currently in use.
|
||||
// Caller needs to hold CacheMountsLocker before calling
|
||||
func ClearActiveCacheMounts() {
|
||||
sharedCacheRefs.shares = nil
|
||||
}
|
||||
|
||||
func CacheMountsLocker() sync.Locker {
|
||||
return &sharedCacheRefs.mu
|
||||
}
|
||||
|
||||
func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.shares == nil {
|
||||
r.shares = map[string]*cacheRefShare{}
|
||||
}
|
||||
|
||||
share, ok := r.shares[key]
|
||||
if ok {
|
||||
return share.clone(), nil
|
||||
}
|
||||
|
||||
mref, err := fn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}}
|
||||
r.shares[key] = share
|
||||
return share.clone(), nil
|
||||
}
|
||||
|
||||
type cacheRefShare struct {
|
||||
cache.MutableRef
|
||||
mu sync.Mutex
|
||||
refs map[*cacheRef]struct{}
|
||||
main *cacheRefs
|
||||
key string
|
||||
}
|
||||
|
||||
func (r *cacheRefShare) clone() cache.MutableRef {
|
||||
cacheRef := &cacheRef{cacheRefShare: r}
|
||||
if cacheRefCloneHijack != nil {
|
||||
cacheRefCloneHijack()
|
||||
}
|
||||
r.mu.Lock()
|
||||
r.refs[cacheRef] = struct{}{}
|
||||
r.mu.Unlock()
|
||||
return cacheRef
|
||||
}
|
||||
|
||||
func (r *cacheRefShare) release(ctx context.Context) error {
|
||||
if r.main != nil {
|
||||
delete(r.main.shares, r.key)
|
||||
}
|
||||
return r.MutableRef.Release(ctx)
|
||||
}
|
||||
|
||||
var cacheRefReleaseHijack func()
|
||||
var cacheRefCloneHijack func()
|
||||
|
||||
type cacheRef struct {
|
||||
*cacheRefShare
|
||||
}
|
||||
|
||||
func (r *cacheRef) Release(ctx context.Context) error {
|
||||
if r.main != nil {
|
||||
r.main.mu.Lock()
|
||||
defer r.main.mu.Unlock()
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
delete(r.refs, r)
|
||||
if len(r.refs) == 0 {
|
||||
if cacheRefReleaseHijack != nil {
|
||||
cacheRefReleaseHijack()
|
||||
}
|
||||
return r.release(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) {
|
||||
out := make([]executor.HostIP, len(ips))
|
||||
for i, hip := range ips {
|
||||
ip := net.ParseIP(hip.IP)
|
||||
if ip == nil {
|
||||
return nil, errors.Errorf("failed to parse IP %s", hip.IP)
|
||||
}
|
||||
out[i] = executor.HostIP{
|
||||
IP: ip,
|
||||
Host: hip.Host,
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
Reference in New Issue
Block a user