mirror of
https://github.com/mudler/luet.git
synced 2025-09-06 01:30:29 +00:00
Update gomod and vendor
This commit is contained in:
324
vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
generated
vendored
Normal file
324
vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go
generated
vendored
Normal file
@@ -0,0 +1,324 @@
|
||||
package llbsolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/mitchellh/hashstructure"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/remotecache"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
gw "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type llbBridge struct {
|
||||
builder solver.Builder
|
||||
frontends map[string]frontend.Frontend
|
||||
resolveWorker func() (worker.Worker, error)
|
||||
eachWorker func(func(worker.Worker) error) error
|
||||
resolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
|
||||
cms map[string]solver.CacheManager
|
||||
cmsMu sync.Mutex
|
||||
platforms []specs.Platform
|
||||
sm *session.Manager
|
||||
}
|
||||
|
||||
func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResult, error) {
|
||||
w, err := b.resolveWorker()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ent, err := loadEntitlements(b.builder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cms []solver.CacheManager
|
||||
for _, im := range cacheImports {
|
||||
cmID, err := cmKey(im)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.cmsMu.Lock()
|
||||
var cm solver.CacheManager
|
||||
if prevCm, ok := b.cms[cmID]; !ok {
|
||||
func(cmID string, im gw.CacheOptionsEntry) {
|
||||
cm = newLazyCacheManager(cmID, func() (solver.CacheManager, error) {
|
||||
var cmNew solver.CacheManager
|
||||
if err := inVertexContext(b.builder.Context(context.TODO()), "importing cache manifest from "+cmID, "", func(ctx context.Context) error {
|
||||
resolveCI, ok := b.resolveCacheImporterFuncs[im.Type]
|
||||
if !ok {
|
||||
return errors.Errorf("unknown cache importer: %s", im.Type)
|
||||
}
|
||||
ci, desc, err := resolveCI(ctx, im.Attrs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmNew, err = ci.Resolve(ctx, desc, cmID, w)
|
||||
return err
|
||||
}); err != nil {
|
||||
logrus.Debugf("error while importing cache manifest from cmId=%s: %v", cmID, err)
|
||||
return nil, err
|
||||
}
|
||||
return cmNew, nil
|
||||
})
|
||||
}(cmID, im)
|
||||
b.cms[cmID] = cm
|
||||
} else {
|
||||
cm = prevCm
|
||||
}
|
||||
cms = append(cms, cm)
|
||||
b.cmsMu.Unlock()
|
||||
}
|
||||
dpc := &detectPrunedCacheID{}
|
||||
|
||||
edge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load LLB")
|
||||
}
|
||||
|
||||
if len(dpc.ids) > 0 {
|
||||
ids := make([]string, 0, len(dpc.ids))
|
||||
for id := range dpc.ids {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
if err := b.eachWorker(func(w worker.Worker) error {
|
||||
return w.PruneCacheMounts(ctx, ids)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
res, err := b.builder.Build(ctx, edge)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wr, ok := res.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid reference for exporting: %T", res.Sys())
|
||||
}
|
||||
if wr.ImmutableRef != nil {
|
||||
if err := wr.ImmutableRef.Finalize(ctx, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *frontend.Result, err error) {
|
||||
if req.Definition != nil && req.Definition.Def != nil && req.Frontend != "" {
|
||||
return nil, errors.New("cannot solve with both Definition and Frontend specified")
|
||||
}
|
||||
|
||||
if req.Definition != nil && req.Definition.Def != nil {
|
||||
res = &frontend.Result{Ref: newResultProxy(b, req)}
|
||||
} else if req.Frontend != "" {
|
||||
f, ok := b.frontends[req.Frontend]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid frontend: %s", req.Frontend)
|
||||
}
|
||||
res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to solve with frontend %s", req.Frontend)
|
||||
}
|
||||
} else {
|
||||
return &frontend.Result{}, nil
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type resultProxy struct {
|
||||
cb func(context.Context) (solver.CachedResult, error)
|
||||
def *pb.Definition
|
||||
g flightcontrol.Group
|
||||
mu sync.Mutex
|
||||
released bool
|
||||
v solver.CachedResult
|
||||
err error
|
||||
}
|
||||
|
||||
func newResultProxy(b *llbBridge, req frontend.SolveRequest) *resultProxy {
|
||||
return &resultProxy{
|
||||
def: req.Definition,
|
||||
cb: func(ctx context.Context) (solver.CachedResult, error) {
|
||||
return b.loadResult(ctx, req.Definition, req.CacheImports)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (rp *resultProxy) Definition() *pb.Definition {
|
||||
return rp.def
|
||||
}
|
||||
|
||||
func (rp *resultProxy) Release(ctx context.Context) error {
|
||||
rp.mu.Lock()
|
||||
defer rp.mu.Unlock()
|
||||
if rp.v != nil {
|
||||
if rp.released {
|
||||
logrus.Warnf("release of already released result")
|
||||
}
|
||||
if err := rp.v.Release(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
rp.released = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rp *resultProxy) Result(ctx context.Context) (solver.CachedResult, error) {
|
||||
r, err := rp.g.Do(ctx, "result", func(ctx context.Context) (interface{}, error) {
|
||||
rp.mu.Lock()
|
||||
if rp.released {
|
||||
rp.mu.Unlock()
|
||||
return nil, errors.Errorf("accessing released result")
|
||||
}
|
||||
if rp.v != nil || rp.err != nil {
|
||||
rp.mu.Unlock()
|
||||
return rp.v, rp.err
|
||||
}
|
||||
rp.mu.Unlock()
|
||||
v, err := rp.cb(ctx)
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if strings.Contains(err.Error(), context.Canceled.Error()) {
|
||||
return v, err
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
rp.mu.Lock()
|
||||
if rp.released {
|
||||
if v != nil {
|
||||
v.Release(context.TODO())
|
||||
}
|
||||
rp.mu.Unlock()
|
||||
return nil, errors.Errorf("evaluating released result")
|
||||
}
|
||||
rp.v = v
|
||||
rp.err = err
|
||||
rp.mu.Unlock()
|
||||
return v, err
|
||||
})
|
||||
if r != nil {
|
||||
return r.(solver.CachedResult), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
func (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (err error) {
|
||||
w, err := s.resolveWorker()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
span, ctx := tracing.StartSpan(ctx, strings.Join(meta.Args, " "))
|
||||
err = w.Exec(ctx, meta, root, stdin, stdout, stderr)
|
||||
tracing.FinishWithError(span, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) {
|
||||
w, err := s.resolveWorker()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if opt.LogName == "" {
|
||||
opt.LogName = fmt.Sprintf("resolve image config for %s", ref)
|
||||
}
|
||||
id := ref // make a deterministic ID for avoiding duplicates
|
||||
if platform := opt.Platform; platform == nil {
|
||||
id += platforms.Format(platforms.DefaultSpec())
|
||||
} else {
|
||||
id += platforms.Format(*platform)
|
||||
}
|
||||
err = inVertexContext(s.builder.Context(ctx), opt.LogName, id, func(ctx context.Context) error {
|
||||
dgst, config, err = w.ResolveImageConfig(ctx, ref, opt, s.sm)
|
||||
return err
|
||||
})
|
||||
return dgst, config, err
|
||||
}
|
||||
|
||||
type lazyCacheManager struct {
|
||||
id string
|
||||
main solver.CacheManager
|
||||
|
||||
waitCh chan struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (lcm *lazyCacheManager) ID() string {
|
||||
return lcm.id
|
||||
}
|
||||
func (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex solver.Index, dgst digest.Digest, outputIndex solver.Index) ([]*solver.CacheKey, error) {
|
||||
lcm.wait()
|
||||
if lcm.main == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return lcm.main.Query(inp, inputIndex, dgst, outputIndex)
|
||||
}
|
||||
func (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) {
|
||||
lcm.wait()
|
||||
if lcm.main == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return lcm.main.Records(ck)
|
||||
}
|
||||
func (lcm *lazyCacheManager) Load(ctx context.Context, rec *solver.CacheRecord) (solver.Result, error) {
|
||||
if err := lcm.wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lcm.main.Load(ctx, rec)
|
||||
}
|
||||
func (lcm *lazyCacheManager) Save(key *solver.CacheKey, s solver.Result, createdAt time.Time) (*solver.ExportableCacheKey, error) {
|
||||
if err := lcm.wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lcm.main.Save(key, s, createdAt)
|
||||
}
|
||||
|
||||
func (lcm *lazyCacheManager) wait() error {
|
||||
<-lcm.waitCh
|
||||
return lcm.err
|
||||
}
|
||||
|
||||
func newLazyCacheManager(id string, fn func() (solver.CacheManager, error)) solver.CacheManager {
|
||||
lcm := &lazyCacheManager{id: id, waitCh: make(chan struct{})}
|
||||
go func() {
|
||||
defer close(lcm.waitCh)
|
||||
cm, err := fn()
|
||||
if err != nil {
|
||||
lcm.err = err
|
||||
return
|
||||
}
|
||||
lcm.main = cm
|
||||
}()
|
||||
return lcm
|
||||
}
|
||||
|
||||
func cmKey(im gw.CacheOptionsEntry) (string, error) {
|
||||
if im.Type == "registry" && im.Attrs["ref"] != "" {
|
||||
return im.Attrs["ref"], nil
|
||||
}
|
||||
i, err := hashstructure.Hash(im, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", im.Type, i), nil
|
||||
}
|
342
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
generated
vendored
Normal file
342
vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go
generated
vendored
Normal file
@@ -0,0 +1,342 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/pkg/errors"
|
||||
copy "github.com/tonistiigi/fsutil/copy"
|
||||
)
|
||||
|
||||
func timestampToTime(ts int64) *time.Time {
|
||||
if ts == -1 {
|
||||
return nil
|
||||
}
|
||||
tm := time.Unix(ts/1e9, ts%1e9)
|
||||
return &tm
|
||||
}
|
||||
|
||||
func mapUserToChowner(user *copy.User, idmap *idtools.IdentityMapping) (copy.Chowner, error) {
|
||||
if user == nil {
|
||||
return func(old *copy.User) (*copy.User, error) {
|
||||
if old == nil {
|
||||
if idmap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
old = ©.User{} // root
|
||||
// non-nil old is already mapped
|
||||
if idmap != nil {
|
||||
identity, err := idmap.ToHost(idtools.Identity{
|
||||
UID: old.Uid,
|
||||
GID: old.Gid,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ©.User{Uid: identity.UID, Gid: identity.GID}, nil
|
||||
}
|
||||
}
|
||||
return old, nil
|
||||
}, nil
|
||||
}
|
||||
u := *user
|
||||
if idmap != nil {
|
||||
identity, err := idmap.ToHost(idtools.Identity{
|
||||
UID: user.Uid,
|
||||
GID: user.Gid,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.Uid = identity.UID
|
||||
u.Gid = identity.GID
|
||||
}
|
||||
return func(*copy.User) (*copy.User, error) {
|
||||
return &u, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.User, idmap *idtools.IdentityMapping) error {
|
||||
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch, err := mapUserToChowner(user, idmap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if action.MakeParents {
|
||||
if err := copy.MkdirAll(p, os.FileMode(action.Mode)&0777, ch, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.Mkdir(p, os.FileMode(action.Mode)&0777); err != nil {
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := copy.Chown(p, nil, ch); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.User, idmap *idtools.IdentityMapping) error {
|
||||
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ch, err := mapUserToChowner(user, idmap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := copy.Chown(p, nil, ch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rm(ctx context.Context, d string, action pb.FileActionRm) error {
|
||||
if action.AllowWildcard {
|
||||
src := cleanPath(action.Path)
|
||||
m, err := copy.ResolveWildcards(d, src, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, s := range m {
|
||||
if err := rmPath(d, s, action.AllowNotFound); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return rmPath(d, action.Path, action.AllowNotFound)
|
||||
}
|
||||
|
||||
func rmPath(root, src string, allowNotFound bool) error {
|
||||
p, err := fs.RootPath(root, filepath.Join(filepath.Join("/", src)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(p); err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) && allowNotFound {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.User, idmap *idtools.IdentityMapping) error {
|
||||
srcPath := cleanPath(action.Src)
|
||||
destPath := cleanPath(action.Dest)
|
||||
|
||||
if !action.CreateDestPath {
|
||||
p, err := fs.RootPath(dest, filepath.Join(filepath.Join("/", action.Dest)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := os.Lstat(filepath.Dir(p)); err != nil {
|
||||
return errors.Wrapf(err, "failed to stat %s", action.Dest)
|
||||
}
|
||||
}
|
||||
|
||||
xattrErrorHandler := func(dst, src, key string, err error) error {
|
||||
log.Println(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
ch, err := mapUserToChowner(u, idmap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opt := []copy.Opt{
|
||||
func(ci *copy.CopyInfo) {
|
||||
ci.Chown = ch
|
||||
ci.Utime = timestampToTime(action.Timestamp)
|
||||
if m := int(action.Mode); m != -1 {
|
||||
ci.Mode = &m
|
||||
}
|
||||
ci.CopyDirContents = action.DirCopyContents
|
||||
ci.FollowLinks = action.FollowSymlink
|
||||
},
|
||||
copy.WithXAttrErrorHandler(xattrErrorHandler),
|
||||
}
|
||||
|
||||
if !action.AllowWildcard {
|
||||
if action.AttemptUnpackDockerCompatibility {
|
||||
if ok, err := unpack(ctx, src, srcPath, dest, destPath, ch, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return copy.Copy(ctx, src, srcPath, dest, destPath, opt...)
|
||||
}
|
||||
|
||||
m, err := copy.ResolveWildcards(src, srcPath, action.FollowSymlink)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(m) == 0 {
|
||||
if action.AllowEmptyWildcard {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("%s not found", srcPath)
|
||||
}
|
||||
|
||||
for _, s := range m {
|
||||
if action.AttemptUnpackDockerCompatibility {
|
||||
if ok, err := unpack(ctx, src, s, dest, destPath, ch, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := copy.Copy(ctx, src, s, dest, destPath, opt...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanPath(s string) string {
|
||||
s2 := filepath.Join("/", s)
|
||||
if strings.HasSuffix(s, "/.") {
|
||||
if s2 != "/" {
|
||||
s2 += "/"
|
||||
}
|
||||
s2 += "."
|
||||
} else if strings.HasSuffix(s, "/") && s2 != "/" {
|
||||
s2 += "/"
|
||||
}
|
||||
return s2
|
||||
}
|
||||
|
||||
type Backend struct {
|
||||
}
|
||||
|
||||
func (fb *Backend) Mkdir(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkDir) error {
|
||||
mnt, ok := m.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m)
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mnt.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
u, err := readUser(action.Owner, user, group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mkdir(ctx, dir, action, u, mnt.m.IdentityMapping())
|
||||
}
|
||||
|
||||
func (fb *Backend) Mkfile(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkFile) error {
|
||||
mnt, ok := m.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m)
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mnt.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
u, err := readUser(action.Owner, user, group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mkfile(ctx, dir, action, u, mnt.m.IdentityMapping())
|
||||
}
|
||||
func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error {
|
||||
mnt, ok := m.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m)
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mnt.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
return rm(ctx, dir, action)
|
||||
}
|
||||
func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mount, action pb.FileActionCopy) error {
|
||||
mnt1, ok := m1.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m1)
|
||||
}
|
||||
mnt2, ok := m2.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m2)
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mnt1.m)
|
||||
src, err := lm.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
lm2 := snapshot.LocalMounter(mnt2.m)
|
||||
dest, err := lm2.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm2.Unmount()
|
||||
|
||||
u, err := readUser(action.Owner, user, group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return docopy(ctx, src, dest, action, u, mnt2.m.IdentityMapping())
|
||||
}
|
70
vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go
generated
vendored
Normal file
70
vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func NewRefManager(cm cache.Manager) *RefManager {
|
||||
return &RefManager{cm: cm}
|
||||
}
|
||||
|
||||
type RefManager struct {
|
||||
cm cache.Manager
|
||||
}
|
||||
|
||||
func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool) (fileoptypes.Mount, error) {
|
||||
ir, ok := ref.(cache.ImmutableRef)
|
||||
if !ok && ref != nil {
|
||||
return nil, errors.Errorf("invalid ref type: %T", ref)
|
||||
}
|
||||
|
||||
if ir != nil && readonly {
|
||||
m, err := ir.Mount(ctx, readonly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Mount{m: m}, nil
|
||||
}
|
||||
|
||||
mr, err := rm.cm.New(ctx, ir, cache.WithDescription("fileop target"), cache.CachePolicyRetain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, err := mr.Mount(ctx, readonly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Mount{m: m, mr: mr}, nil
|
||||
}
|
||||
|
||||
func (rm *RefManager) Commit(ctx context.Context, mount fileoptypes.Mount) (fileoptypes.Ref, error) {
|
||||
m, ok := mount.(*Mount)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid mount type %T", mount)
|
||||
}
|
||||
if m.mr == nil {
|
||||
return nil, errors.Errorf("invalid mount without active ref for commit")
|
||||
}
|
||||
defer func() {
|
||||
m.mr = nil
|
||||
}()
|
||||
return m.mr.Commit(ctx)
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
m snapshot.Mountable
|
||||
mr cache.MutableRef
|
||||
}
|
||||
|
||||
func (m *Mount) Release(ctx context.Context) error {
|
||||
if m.mr != nil {
|
||||
return m.mr.Release(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Mount) IsFileOpMount() {}
|
61
vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go
generated
vendored
Normal file
61
vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
copy "github.com/tonistiigi/fsutil/copy"
|
||||
)
|
||||
|
||||
func unpack(ctx context.Context, srcRoot string, src string, destRoot string, dest string, ch copy.Chowner, tm *time.Time) (bool, error) {
|
||||
src, err := fs.RootPath(srcRoot, src)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !isArchivePath(src) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
dest, err = fs.RootPath(destRoot, dest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := copy.MkdirAll(dest, 0755, ch, tm); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
file, err := os.Open(src)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return true, chrootarchive.Untar(file, dest, nil)
|
||||
}
|
||||
|
||||
func isArchivePath(path string) bool {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if fi.Mode()&os.ModeType != 0 {
|
||||
return false
|
||||
}
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer file.Close()
|
||||
rdr, err := archive.DecompressStream(file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
r := tar.NewReader(rdr)
|
||||
_, err = r.Next()
|
||||
return err == nil
|
||||
}
|
119
vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go
generated
vendored
Normal file
119
vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
"github.com/pkg/errors"
|
||||
copy "github.com/tonistiigi/fsutil/copy"
|
||||
)
|
||||
|
||||
func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) {
|
||||
if chopt == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var us copy.User
|
||||
if chopt.User != nil {
|
||||
switch u := chopt.User.User.(type) {
|
||||
case *pb.UserOpt_ByName:
|
||||
if mu == nil {
|
||||
return nil, errors.Errorf("invalid missing user mount")
|
||||
}
|
||||
mmu, ok := mu.(*Mount)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid mount type %T", mu)
|
||||
}
|
||||
lm := snapshot.LocalMounter(mmu.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
passwdPath, err := user.GetPasswdPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
passwdPath, err = fs.RootPath(dir, passwdPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ufile, err := os.Open(passwdPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer ufile.Close()
|
||||
|
||||
users, err := user.ParsePasswdFilter(ufile, func(uu user.User) bool {
|
||||
return uu.Name == u.ByName.Name
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(users) > 0 {
|
||||
us.Uid = users[0].Uid
|
||||
us.Gid = users[0].Gid
|
||||
}
|
||||
case *pb.UserOpt_ByID:
|
||||
us.Uid = int(u.ByID)
|
||||
us.Gid = int(u.ByID)
|
||||
}
|
||||
}
|
||||
|
||||
if chopt.Group != nil {
|
||||
switch u := chopt.Group.User.(type) {
|
||||
case *pb.UserOpt_ByName:
|
||||
if mg == nil {
|
||||
return nil, errors.Errorf("invalid missing group mount")
|
||||
}
|
||||
mmg, ok := mg.(*Mount)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid mount type %T", mg)
|
||||
}
|
||||
lm := snapshot.LocalMounter(mmg.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
groupPath, err := user.GetGroupPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groupPath, err = fs.RootPath(dir, groupPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gfile, err := os.Open(groupPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer gfile.Close()
|
||||
|
||||
groups, err := user.ParseGroupFilter(gfile, func(g user.Group) bool {
|
||||
return g.Name == u.ByName.Name
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(groups) > 0 {
|
||||
us.Gid = groups[0].Gid
|
||||
}
|
||||
case *pb.UserOpt_ByID:
|
||||
us.Gid = int(u.ByID)
|
||||
}
|
||||
}
|
||||
|
||||
return &us, nil
|
||||
}
|
14
vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go
generated
vendored
Normal file
14
vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// +build !linux
|
||||
|
||||
package file
|
||||
|
||||
import (
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/pkg/errors"
|
||||
copy "github.com/tonistiigi/fsutil/copy"
|
||||
)
|
||||
|
||||
func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) {
|
||||
return nil, errors.New("only implemented in linux")
|
||||
}
|
141
vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
generated
vendored
Normal file
141
vendor/github.com/moby/buildkit/solver/llbsolver/ops/build.go
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const buildCacheType = "buildkit.build.v0"
|
||||
|
||||
type buildOp struct {
|
||||
op *pb.BuildOp
|
||||
b frontend.FrontendLLBBridge
|
||||
v solver.Vertex
|
||||
}
|
||||
|
||||
func NewBuildOp(v solver.Vertex, op *pb.Op_Build, b frontend.FrontendLLBBridge, _ worker.Worker) (solver.Op, error) {
|
||||
if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &buildOp{
|
||||
op: op.Build,
|
||||
b: b,
|
||||
v: v,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (b *buildOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) {
|
||||
dt, err := json.Marshal(struct {
|
||||
Type string
|
||||
Exec *pb.BuildOp
|
||||
}{
|
||||
Type: buildCacheType,
|
||||
Exec: b.op,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
return &solver.CacheMap{
|
||||
Digest: digest.FromBytes(dt),
|
||||
Deps: make([]struct {
|
||||
Selector digest.Digest
|
||||
ComputeDigestFunc solver.ResultBasedCacheFunc
|
||||
}, len(b.v.Inputs())),
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
func (b *buildOp) Exec(ctx context.Context, inputs []solver.Result) (outputs []solver.Result, retErr error) {
|
||||
if b.op.Builder != pb.LLBBuilder {
|
||||
return nil, errors.Errorf("only LLB builder is currently allowed")
|
||||
}
|
||||
|
||||
builderInputs := b.op.Inputs
|
||||
llbDef, ok := builderInputs[pb.LLBDefinitionInput]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("no llb definition input %s found", pb.LLBDefinitionInput)
|
||||
}
|
||||
|
||||
i := int(llbDef.Input)
|
||||
if i >= len(inputs) {
|
||||
return nil, errors.Errorf("invalid index %v", i) // TODO: this should be validated before
|
||||
}
|
||||
inp := inputs[i]
|
||||
|
||||
ref, ok := inp.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid reference for build %T", inp.Sys())
|
||||
}
|
||||
|
||||
mount, err := ref.ImmutableRef.Mount(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mount)
|
||||
|
||||
root, err := lm.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil && lm != nil {
|
||||
lm.Unmount()
|
||||
}
|
||||
}()
|
||||
|
||||
fn := pb.LLBDefaultDefinitionFile
|
||||
if override, ok := b.op.Attrs[pb.AttrLLBDefinitionFilename]; ok {
|
||||
fn = override
|
||||
}
|
||||
|
||||
newfn, err := fs.RootPath(root, fn)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "working dir %s points to invalid target", fn)
|
||||
}
|
||||
|
||||
f, err := os.Open(newfn)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open %s", newfn)
|
||||
}
|
||||
|
||||
def, err := llb.ReadFrom(f)
|
||||
if err != nil {
|
||||
f.Close()
|
||||
return nil, err
|
||||
}
|
||||
f.Close()
|
||||
lm.Unmount()
|
||||
lm = nil
|
||||
|
||||
newRes, err := b.b.Solve(ctx, frontend.SolveRequest{
|
||||
Definition: def.ToPB(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, r := range newRes.Refs {
|
||||
r.Release(context.TODO())
|
||||
}
|
||||
|
||||
r, err := newRes.Ref.Result(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []solver.Result{r}, err
|
||||
}
|
905
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
Normal file
905
vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go
generated
vendored
Normal file
@@ -0,0 +1,905 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/executor"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/secrets"
|
||||
"github.com/moby/buildkit/session/sshforward"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/progress/logs"
|
||||
utilsystem "github.com/moby/buildkit/util/system"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const execCacheType = "buildkit.exec.v0"
|
||||
|
||||
type execOp struct {
|
||||
op *pb.ExecOp
|
||||
cm cache.Manager
|
||||
sm *session.Manager
|
||||
md *metadata.Store
|
||||
exec executor.Executor
|
||||
w worker.Worker
|
||||
platform *pb.Platform
|
||||
numInputs int
|
||||
|
||||
cacheMounts map[string]*cacheRefShare
|
||||
cacheMountsMu sync.Mutex
|
||||
}
|
||||
|
||||
func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) {
|
||||
if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &execOp{
|
||||
op: op.Exec,
|
||||
cm: cm,
|
||||
sm: sm,
|
||||
md: md,
|
||||
exec: exec,
|
||||
numInputs: len(v.Inputs()),
|
||||
w: w,
|
||||
platform: platform,
|
||||
cacheMounts: map[string]*cacheRefShare{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func cloneExecOp(old *pb.ExecOp) pb.ExecOp {
|
||||
n := *old
|
||||
meta := *n.Meta
|
||||
meta.ExtraHosts = nil
|
||||
for i := range n.Meta.ExtraHosts {
|
||||
h := *n.Meta.ExtraHosts[i]
|
||||
meta.ExtraHosts = append(meta.ExtraHosts, &h)
|
||||
}
|
||||
n.Meta = &meta
|
||||
n.Mounts = nil
|
||||
for i := range n.Mounts {
|
||||
m := *n.Mounts[i]
|
||||
n.Mounts = append(n.Mounts, &m)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (e *execOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) {
|
||||
op := cloneExecOp(e.op)
|
||||
for i := range op.Meta.ExtraHosts {
|
||||
h := op.Meta.ExtraHosts[i]
|
||||
h.IP = ""
|
||||
op.Meta.ExtraHosts[i] = h
|
||||
}
|
||||
for i := range op.Mounts {
|
||||
op.Mounts[i].Selector = ""
|
||||
}
|
||||
op.Meta.ProxyEnv = nil
|
||||
|
||||
p := platforms.DefaultSpec()
|
||||
if e.platform != nil {
|
||||
p = specs.Platform{
|
||||
OS: e.platform.OS,
|
||||
Architecture: e.platform.Architecture,
|
||||
Variant: e.platform.Variant,
|
||||
}
|
||||
}
|
||||
|
||||
dt, err := json.Marshal(struct {
|
||||
Type string
|
||||
Exec *pb.ExecOp
|
||||
OS string
|
||||
Arch string
|
||||
Variant string `json:",omitempty"`
|
||||
}{
|
||||
Type: execCacheType,
|
||||
Exec: &op,
|
||||
OS: p.OS,
|
||||
Arch: p.Architecture,
|
||||
Variant: p.Variant,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
cm := &solver.CacheMap{
|
||||
Digest: digest.FromBytes(dt),
|
||||
Deps: make([]struct {
|
||||
Selector digest.Digest
|
||||
ComputeDigestFunc solver.ResultBasedCacheFunc
|
||||
}, e.numInputs),
|
||||
}
|
||||
|
||||
deps, err := e.getMountDeps()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
for i, dep := range deps {
|
||||
if len(dep.Selectors) != 0 {
|
||||
dgsts := make([][]byte, 0, len(dep.Selectors))
|
||||
for _, p := range dep.Selectors {
|
||||
dgsts = append(dgsts, []byte(p))
|
||||
}
|
||||
cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0}))
|
||||
}
|
||||
if !dep.NoContentBasedHash {
|
||||
cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors)))
|
||||
}
|
||||
}
|
||||
|
||||
return cm, true, nil
|
||||
}
|
||||
|
||||
func dedupePaths(inp []string) []string {
|
||||
old := make(map[string]struct{}, len(inp))
|
||||
for _, p := range inp {
|
||||
old[p] = struct{}{}
|
||||
}
|
||||
paths := make([]string, 0, len(old))
|
||||
for p1 := range old {
|
||||
var skip bool
|
||||
for p2 := range old {
|
||||
if p1 != p2 && strings.HasPrefix(p1, p2+"/") {
|
||||
skip = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
paths = append(paths, p1)
|
||||
}
|
||||
}
|
||||
sort.Slice(paths, func(i, j int) bool {
|
||||
return paths[i] < paths[j]
|
||||
})
|
||||
return paths
|
||||
}
|
||||
|
||||
func toSelectors(p []string) []llbsolver.Selector {
|
||||
sel := make([]llbsolver.Selector, 0, len(p))
|
||||
for _, p := range p {
|
||||
sel = append(sel, llbsolver.Selector{Path: p, FollowLinks: true})
|
||||
}
|
||||
return sel
|
||||
}
|
||||
|
||||
type dep struct {
|
||||
Selectors []string
|
||||
NoContentBasedHash bool
|
||||
}
|
||||
|
||||
func (e *execOp) getMountDeps() ([]dep, error) {
|
||||
deps := make([]dep, e.numInputs)
|
||||
for _, m := range e.op.Mounts {
|
||||
if m.Input == pb.Empty {
|
||||
continue
|
||||
}
|
||||
if int(m.Input) >= len(deps) {
|
||||
return nil, errors.Errorf("invalid mountinput %v", m)
|
||||
}
|
||||
|
||||
sel := m.Selector
|
||||
if sel != "" {
|
||||
sel = path.Join("/", sel)
|
||||
deps[m.Input].Selectors = append(deps[m.Input].Selectors, sel)
|
||||
}
|
||||
|
||||
if (!m.Readonly || m.Dest == pb.RootMount) && m.Output != -1 { // exclude read-only rootfs && read-write mounts
|
||||
deps[m.Input].NoContentBasedHash = true
|
||||
}
|
||||
}
|
||||
return deps, nil
|
||||
}
|
||||
|
||||
func (e *execOp) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, m *pb.Mount, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
|
||||
g := &cacheRefGetter{
|
||||
locker: &e.cacheMountsMu,
|
||||
cacheMounts: e.cacheMounts,
|
||||
cm: e.cm,
|
||||
md: e.md,
|
||||
globalCacheRefs: sharedCacheRefs,
|
||||
name: fmt.Sprintf("cached mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")),
|
||||
}
|
||||
return g.getRefCacheDir(ctx, ref, id, sharing)
|
||||
}
|
||||
|
||||
type cacheRefGetter struct {
|
||||
locker sync.Locker
|
||||
cacheMounts map[string]*cacheRefShare
|
||||
cm cache.Manager
|
||||
md *metadata.Store
|
||||
globalCacheRefs *cacheRefs
|
||||
name string
|
||||
}
|
||||
|
||||
func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) {
|
||||
key := "cache-dir:" + id
|
||||
if ref != nil {
|
||||
key += ":" + ref.ID()
|
||||
}
|
||||
mu := g.locker
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
if ref, ok := g.cacheMounts[key]; ok {
|
||||
return ref.clone(), nil
|
||||
}
|
||||
defer func() {
|
||||
if err == nil {
|
||||
share := &cacheRefShare{MutableRef: mref, refs: map[*cacheRef]struct{}{}}
|
||||
g.cacheMounts[key] = share
|
||||
mref = share.clone()
|
||||
}
|
||||
}()
|
||||
|
||||
switch sharing {
|
||||
case pb.CacheSharingOpt_SHARED:
|
||||
return g.globalCacheRefs.get(key, func() (cache.MutableRef, error) {
|
||||
return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
|
||||
})
|
||||
case pb.CacheSharingOpt_PRIVATE:
|
||||
return g.getRefCacheDirNoCache(ctx, key, ref, id, false)
|
||||
case pb.CacheSharingOpt_LOCKED:
|
||||
return g.getRefCacheDirNoCache(ctx, key, ref, id, true)
|
||||
default:
|
||||
return nil, errors.Errorf("invalid cache sharing option: %s", sharing.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, ref cache.ImmutableRef, id string, block bool) (cache.MutableRef, error) {
|
||||
makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
|
||||
return g.cm.New(ctx, ref, cache.WithRecordType(client.UsageRecordTypeCacheMount), cache.WithDescription(g.name), cache.CachePolicyRetain)
|
||||
}
|
||||
|
||||
cacheRefsLocker.Lock(key)
|
||||
defer cacheRefsLocker.Unlock(key)
|
||||
for {
|
||||
sis, err := g.md.Search(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
locked := false
|
||||
for _, si := range sis {
|
||||
if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil {
|
||||
logrus.Debugf("reusing ref for cache dir: %s", mRef.ID())
|
||||
return mRef, nil
|
||||
} else if errors.Cause(err) == cache.ErrLocked {
|
||||
locked = true
|
||||
}
|
||||
}
|
||||
if block && locked {
|
||||
cacheRefsLocker.Unlock(key)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cacheRefsLocker.Lock(key)
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
cacheRefsLocker.Lock(key)
|
||||
}
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
mRef, err := makeMutable(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
si, _ := g.md.Get(mRef.ID())
|
||||
v, err := metadata.NewValue(key)
|
||||
if err != nil {
|
||||
mRef.Release(context.TODO())
|
||||
return nil, err
|
||||
}
|
||||
v.Index = key
|
||||
if err := si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, key, v)
|
||||
}); err != nil {
|
||||
mRef.Release(context.TODO())
|
||||
return nil, err
|
||||
}
|
||||
return mRef, nil
|
||||
}
|
||||
|
||||
func (e *execOp) getSSHMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) {
|
||||
sessionID := session.FromContext(ctx)
|
||||
if sessionID == "" {
|
||||
return nil, errors.New("could not access local files without session")
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
caller, err := e.sm.Get(timeoutCtx, sessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := sshforward.CheckSSHID(ctx, caller, m.SSHOpt.ID); err != nil {
|
||||
if m.SSHOpt.Optional {
|
||||
return nil, nil
|
||||
}
|
||||
if st, ok := status.FromError(errors.Cause(err)); ok && st.Code() == codes.Unimplemented {
|
||||
return nil, errors.Errorf("no SSH key %q forwarded from the client", m.SSHOpt.ID)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &sshMount{mount: m, caller: caller, idmap: e.cm.IdentityMapping()}, nil
|
||||
}
|
||||
|
||||
type sshMount struct {
|
||||
mount *pb.Mount
|
||||
caller session.Caller
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *sshMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &sshMountInstance{sm: sm, idmap: sm.idmap}, nil
|
||||
}
|
||||
|
||||
type sshMountInstance struct {
|
||||
sm *sshMount
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *sshMountInstance) Mount() ([]mount.Mount, func() error, error) {
|
||||
ctx, cancel := context.WithCancel(context.TODO())
|
||||
|
||||
uid := int(sm.sm.mount.SSHOpt.Uid)
|
||||
gid := int(sm.sm.mount.SSHOpt.Gid)
|
||||
|
||||
if sm.idmap != nil {
|
||||
identity, err := sm.idmap.ToHost(idtools.Identity{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
uid = identity.UID
|
||||
gid = identity.GID
|
||||
}
|
||||
|
||||
sock, cleanup, err := sshforward.MountSSHSocket(ctx, sm.sm.caller, sshforward.SocketOpt{
|
||||
ID: sm.sm.mount.SSHOpt.ID,
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
Mode: int(sm.sm.mount.SSHOpt.Mode & 0777),
|
||||
})
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, nil, err
|
||||
}
|
||||
release := func() error {
|
||||
var err error
|
||||
if cleanup != nil {
|
||||
err = cleanup()
|
||||
}
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
return []mount.Mount{{
|
||||
Type: "bind",
|
||||
Source: sock,
|
||||
Options: []string{"rbind"},
|
||||
}}, release, nil
|
||||
}
|
||||
|
||||
func (sm *sshMountInstance) IdentityMapping() *idtools.IdentityMapping {
|
||||
return sm.idmap
|
||||
}
|
||||
|
||||
func (e *execOp) getSecretMountable(ctx context.Context, m *pb.Mount) (cache.Mountable, error) {
|
||||
if m.SecretOpt == nil {
|
||||
return nil, errors.Errorf("invalid sercet mount options")
|
||||
}
|
||||
sopt := *m.SecretOpt
|
||||
|
||||
id := sopt.ID
|
||||
if id == "" {
|
||||
return nil, errors.Errorf("secret ID missing from mount options")
|
||||
}
|
||||
|
||||
sessionID := session.FromContext(ctx)
|
||||
if sessionID == "" {
|
||||
return nil, errors.New("could not access local files without session")
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
caller, err := e.sm.Get(timeoutCtx, sessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dt, err := secrets.GetSecret(ctx, caller, id)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == secrets.ErrNotFound && m.SecretOpt.Optional {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &secretMount{mount: m, data: dt, idmap: e.cm.IdentityMapping()}, nil
|
||||
}
|
||||
|
||||
type secretMount struct {
|
||||
mount *pb.Mount
|
||||
data []byte
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *secretMount) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &secretMountInstance{sm: sm, idmap: sm.idmap}, nil
|
||||
}
|
||||
|
||||
type secretMountInstance struct {
|
||||
sm *secretMount
|
||||
root string
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) {
|
||||
dir, err := ioutil.TempDir("", "buildkit-secrets")
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to create temp dir")
|
||||
}
|
||||
cleanupDir := func() error {
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
if err := os.Chmod(dir, 0711); err != nil {
|
||||
cleanupDir()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
tmpMount := mount.Mount{
|
||||
Type: "tmpfs",
|
||||
Source: "tmpfs",
|
||||
Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())},
|
||||
}
|
||||
|
||||
if system.RunningInUserNS() {
|
||||
tmpMount.Options = nil
|
||||
}
|
||||
|
||||
if err := mount.All([]mount.Mount{tmpMount}, dir); err != nil {
|
||||
cleanupDir()
|
||||
return nil, nil, errors.Wrap(err, "unable to setup secret mount")
|
||||
}
|
||||
sm.root = dir
|
||||
|
||||
cleanup := func() error {
|
||||
if err := mount.Unmount(dir, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
return cleanupDir()
|
||||
}
|
||||
|
||||
randID := identity.NewID()
|
||||
fp := filepath.Join(dir, randID)
|
||||
if err := ioutil.WriteFile(fp, sm.sm.data, 0600); err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
uid := int(sm.sm.mount.SecretOpt.Uid)
|
||||
gid := int(sm.sm.mount.SecretOpt.Gid)
|
||||
|
||||
if sm.idmap != nil {
|
||||
identity, err := sm.idmap.ToHost(idtools.Identity{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
})
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
uid = identity.UID
|
||||
gid = identity.GID
|
||||
}
|
||||
|
||||
if err := os.Chown(fp, uid, gid); err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := os.Chmod(fp, os.FileMode(sm.sm.mount.SecretOpt.Mode&0777)); err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return []mount.Mount{{
|
||||
Type: "bind",
|
||||
Source: fp,
|
||||
Options: []string{"ro", "rbind", "nodev", "nosuid", "noexec"},
|
||||
}}, cleanup, nil
|
||||
}
|
||||
|
||||
func (sm *secretMountInstance) IdentityMapping() *idtools.IdentityMapping {
|
||||
return sm.idmap
|
||||
}
|
||||
|
||||
func addDefaultEnvvar(env []string, k, v string) []string {
|
||||
for _, e := range env {
|
||||
if strings.HasPrefix(e, k+"=") {
|
||||
return env
|
||||
}
|
||||
}
|
||||
return append(env, k+"="+v)
|
||||
}
|
||||
|
||||
func (e *execOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) {
|
||||
var mounts []executor.Mount
|
||||
var root cache.Mountable
|
||||
var readonlyRootFS bool
|
||||
|
||||
var outputs []cache.Ref
|
||||
|
||||
defer func() {
|
||||
for _, o := range outputs {
|
||||
if o != nil {
|
||||
go o.Release(context.TODO())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// loop over all mounts, fill in mounts, root and outputs
|
||||
for _, m := range e.op.Mounts {
|
||||
var mountable cache.Mountable
|
||||
var ref cache.ImmutableRef
|
||||
|
||||
if m.Dest == pb.RootMount && m.MountType != pb.MountType_BIND {
|
||||
return nil, errors.Errorf("invalid mount type %s for %s", m.MountType.String(), m.Dest)
|
||||
}
|
||||
|
||||
// if mount is based on input validate and load it
|
||||
if m.Input != pb.Empty {
|
||||
if int(m.Input) > len(inputs) {
|
||||
return nil, errors.Errorf("missing input %d", m.Input)
|
||||
}
|
||||
inp := inputs[int(m.Input)]
|
||||
workerRef, ok := inp.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid reference for exec %T", inp.Sys())
|
||||
}
|
||||
ref = workerRef.ImmutableRef
|
||||
mountable = ref
|
||||
}
|
||||
|
||||
makeMutable := func(ref cache.ImmutableRef) (cache.MutableRef, error) {
|
||||
desc := fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " "))
|
||||
return e.cm.New(ctx, ref, cache.WithDescription(desc))
|
||||
}
|
||||
|
||||
switch m.MountType {
|
||||
case pb.MountType_BIND:
|
||||
// if mount creates an output
|
||||
if m.Output != pb.SkipOutput {
|
||||
// it it is readonly and not root then output is the input
|
||||
if m.Readonly && ref != nil && m.Dest != pb.RootMount {
|
||||
outputs = append(outputs, ref.Clone())
|
||||
} else {
|
||||
// otherwise output and mount is the mutable child
|
||||
active, err := makeMutable(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outputs = append(outputs, active)
|
||||
mountable = active
|
||||
}
|
||||
} else if (!m.Readonly || ref == nil) && m.Dest != pb.RootMount {
|
||||
// this case is empty readonly scratch without output that is not really useful for anything but don't error
|
||||
active, err := makeMutable(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer active.Release(context.TODO())
|
||||
mountable = active
|
||||
}
|
||||
|
||||
case pb.MountType_CACHE:
|
||||
if m.CacheOpt == nil {
|
||||
return nil, errors.Errorf("missing cache mount options")
|
||||
}
|
||||
mRef, err := e.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mountable = mRef
|
||||
defer func() {
|
||||
go mRef.Release(context.TODO())
|
||||
}()
|
||||
if m.Output != pb.SkipOutput && ref != nil {
|
||||
outputs = append(outputs, ref.Clone())
|
||||
}
|
||||
|
||||
case pb.MountType_TMPFS:
|
||||
mountable = newTmpfs(e.cm.IdentityMapping())
|
||||
|
||||
case pb.MountType_SECRET:
|
||||
secretMount, err := e.getSecretMountable(ctx, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secretMount == nil {
|
||||
continue
|
||||
}
|
||||
mountable = secretMount
|
||||
|
||||
case pb.MountType_SSH:
|
||||
sshMount, err := e.getSSHMountable(ctx, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sshMount == nil {
|
||||
continue
|
||||
}
|
||||
mountable = sshMount
|
||||
|
||||
default:
|
||||
return nil, errors.Errorf("mount type %s not implemented", m.MountType)
|
||||
}
|
||||
|
||||
// validate that there is a mount
|
||||
if mountable == nil {
|
||||
return nil, errors.Errorf("mount %s has no input", m.Dest)
|
||||
}
|
||||
|
||||
// if dest is root we need mutable ref even if there is no output
|
||||
if m.Dest == pb.RootMount {
|
||||
root = mountable
|
||||
readonlyRootFS = m.Readonly
|
||||
if m.Output == pb.SkipOutput && readonlyRootFS {
|
||||
active, err := makeMutable(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
go active.Release(context.TODO())
|
||||
}()
|
||||
root = active
|
||||
}
|
||||
} else {
|
||||
mounts = append(mounts, executor.Mount{Src: mountable, Dest: m.Dest, Readonly: m.Readonly, Selector: m.Selector})
|
||||
}
|
||||
}
|
||||
|
||||
// sort mounts so parents are mounted first
|
||||
sort.Slice(mounts, func(i, j int) bool {
|
||||
return mounts[i].Dest < mounts[j].Dest
|
||||
})
|
||||
|
||||
extraHosts, err := parseExtraHosts(e.op.Meta.ExtraHosts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
meta := executor.Meta{
|
||||
Args: e.op.Meta.Args,
|
||||
Env: e.op.Meta.Env,
|
||||
Cwd: e.op.Meta.Cwd,
|
||||
User: e.op.Meta.User,
|
||||
ReadonlyRootFS: readonlyRootFS,
|
||||
ExtraHosts: extraHosts,
|
||||
NetMode: e.op.Network,
|
||||
SecurityMode: e.op.Security,
|
||||
}
|
||||
|
||||
if e.op.Meta.ProxyEnv != nil {
|
||||
meta.Env = append(meta.Env, proxyEnvList(e.op.Meta.ProxyEnv)...)
|
||||
}
|
||||
meta.Env = addDefaultEnvvar(meta.Env, "PATH", utilsystem.DefaultPathEnv)
|
||||
|
||||
stdout, stderr := logs.NewLogStreams(ctx, os.Getenv("BUILDKIT_DEBUG_EXEC_OUTPUT") == "1")
|
||||
defer stdout.Close()
|
||||
defer stderr.Close()
|
||||
|
||||
if err := e.exec.Exec(ctx, meta, root, mounts, nil, stdout, stderr); err != nil {
|
||||
return nil, errors.Wrapf(err, "executor failed running %v", meta.Args)
|
||||
}
|
||||
|
||||
refs := []solver.Result{}
|
||||
for i, out := range outputs {
|
||||
if mutable, ok := out.(cache.MutableRef); ok {
|
||||
ref, err := mutable.Commit(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error committing %s", mutable.ID())
|
||||
}
|
||||
refs = append(refs, worker.NewWorkerRefResult(ref, e.w))
|
||||
} else {
|
||||
refs = append(refs, worker.NewWorkerRefResult(out.(cache.ImmutableRef), e.w))
|
||||
}
|
||||
outputs[i] = nil
|
||||
}
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
func proxyEnvList(p *pb.ProxyEnv) []string {
|
||||
out := []string{}
|
||||
if v := p.HttpProxy; v != "" {
|
||||
out = append(out, "HTTP_PROXY="+v, "http_proxy="+v)
|
||||
}
|
||||
if v := p.HttpsProxy; v != "" {
|
||||
out = append(out, "HTTPS_PROXY="+v, "https_proxy="+v)
|
||||
}
|
||||
if v := p.FtpProxy; v != "" {
|
||||
out = append(out, "FTP_PROXY="+v, "ftp_proxy="+v)
|
||||
}
|
||||
if v := p.NoProxy; v != "" {
|
||||
out = append(out, "NO_PROXY="+v, "no_proxy="+v)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable {
|
||||
return &tmpfs{idmap: idmap}
|
||||
}
|
||||
|
||||
type tmpfs struct {
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (f *tmpfs) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil
|
||||
}
|
||||
|
||||
type tmpfsMount struct {
|
||||
readonly bool
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) {
|
||||
opt := []string{"nosuid"}
|
||||
if m.readonly {
|
||||
opt = append(opt, "ro")
|
||||
}
|
||||
return []mount.Mount{{
|
||||
Type: "tmpfs",
|
||||
Source: "tmpfs",
|
||||
Options: opt,
|
||||
}}, func() error { return nil }, nil
|
||||
}
|
||||
|
||||
func (m *tmpfsMount) IdentityMapping() *idtools.IdentityMapping {
|
||||
return m.idmap
|
||||
}
|
||||
|
||||
var cacheRefsLocker = locker.New()
|
||||
var sharedCacheRefs = &cacheRefs{}
|
||||
|
||||
type cacheRefs struct {
|
||||
mu sync.Mutex
|
||||
shares map[string]*cacheRefShare
|
||||
}
|
||||
|
||||
// ClearActiveCacheMounts clears shared cache mounts currently in use.
|
||||
// Caller needs to hold CacheMountsLocker before calling
|
||||
func ClearActiveCacheMounts() {
|
||||
sharedCacheRefs.shares = nil
|
||||
}
|
||||
|
||||
func CacheMountsLocker() sync.Locker {
|
||||
return &sharedCacheRefs.mu
|
||||
}
|
||||
|
||||
func (r *cacheRefs) get(key string, fn func() (cache.MutableRef, error)) (cache.MutableRef, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.shares == nil {
|
||||
r.shares = map[string]*cacheRefShare{}
|
||||
}
|
||||
|
||||
share, ok := r.shares[key]
|
||||
if ok {
|
||||
return share.clone(), nil
|
||||
}
|
||||
|
||||
mref, err := fn()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
share = &cacheRefShare{MutableRef: mref, main: r, key: key, refs: map[*cacheRef]struct{}{}}
|
||||
r.shares[key] = share
|
||||
return share.clone(), nil
|
||||
}
|
||||
|
||||
type cacheRefShare struct {
|
||||
cache.MutableRef
|
||||
mu sync.Mutex
|
||||
refs map[*cacheRef]struct{}
|
||||
main *cacheRefs
|
||||
key string
|
||||
}
|
||||
|
||||
func (r *cacheRefShare) clone() cache.MutableRef {
|
||||
cacheRef := &cacheRef{cacheRefShare: r}
|
||||
if cacheRefCloneHijack != nil {
|
||||
cacheRefCloneHijack()
|
||||
}
|
||||
r.mu.Lock()
|
||||
r.refs[cacheRef] = struct{}{}
|
||||
r.mu.Unlock()
|
||||
return cacheRef
|
||||
}
|
||||
|
||||
func (r *cacheRefShare) release(ctx context.Context) error {
|
||||
if r.main != nil {
|
||||
delete(r.main.shares, r.key)
|
||||
}
|
||||
return r.MutableRef.Release(ctx)
|
||||
}
|
||||
|
||||
var cacheRefReleaseHijack func()
|
||||
var cacheRefCloneHijack func()
|
||||
|
||||
type cacheRef struct {
|
||||
*cacheRefShare
|
||||
}
|
||||
|
||||
func (r *cacheRef) Release(ctx context.Context) error {
|
||||
if r.main != nil {
|
||||
r.main.mu.Lock()
|
||||
defer r.main.mu.Unlock()
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
delete(r.refs, r)
|
||||
if len(r.refs) == 0 {
|
||||
if cacheRefReleaseHijack != nil {
|
||||
cacheRefReleaseHijack()
|
||||
}
|
||||
return r.release(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) {
|
||||
out := make([]executor.HostIP, len(ips))
|
||||
for i, hip := range ips {
|
||||
ip := net.ParseIP(hip.IP)
|
||||
if ip == nil {
|
||||
return nil, errors.Errorf("failed to parse IP %s", hip.IP)
|
||||
}
|
||||
out[i] = executor.HostIP{
|
||||
IP: ip,
|
||||
Host: hip.Host,
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
583
vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go
generated
vendored
Normal file
583
vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go
generated
vendored
Normal file
@@ -0,0 +1,583 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/solver/llbsolver/file"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const fileCacheType = "buildkit.file.v0"
|
||||
|
||||
type fileOp struct {
|
||||
op *pb.FileOp
|
||||
md *metadata.Store
|
||||
w worker.Worker
|
||||
solver *FileOpSolver
|
||||
numInputs int
|
||||
}
|
||||
|
||||
func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, md *metadata.Store, w worker.Worker) (solver.Op, error) {
|
||||
if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fileOp{
|
||||
op: op.File,
|
||||
md: md,
|
||||
numInputs: len(v.Inputs()),
|
||||
w: w,
|
||||
solver: NewFileOpSolver(&file.Backend{}, file.NewRefManager(cm)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) {
|
||||
selectors := map[int]map[llbsolver.Selector]struct{}{}
|
||||
invalidSelectors := map[int]struct{}{}
|
||||
|
||||
actions := make([][]byte, 0, len(f.op.Actions))
|
||||
|
||||
markInvalid := func(idx pb.InputIndex) {
|
||||
if idx != -1 {
|
||||
invalidSelectors[int(idx)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, action := range f.op.Actions {
|
||||
var dt []byte
|
||||
var err error
|
||||
switch a := action.Action.(type) {
|
||||
case *pb.FileAction_Mkdir:
|
||||
p := *a.Mkdir
|
||||
markInvalid(action.Input)
|
||||
processOwner(p.Owner, selectors)
|
||||
dt, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
case *pb.FileAction_Mkfile:
|
||||
p := *a.Mkfile
|
||||
markInvalid(action.Input)
|
||||
processOwner(p.Owner, selectors)
|
||||
dt, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
case *pb.FileAction_Rm:
|
||||
p := *a.Rm
|
||||
markInvalid(action.Input)
|
||||
dt, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
case *pb.FileAction_Copy:
|
||||
p := *a.Copy
|
||||
markInvalid(action.Input)
|
||||
processOwner(p.Owner, selectors)
|
||||
if action.SecondaryInput != -1 && int(action.SecondaryInput) < f.numInputs {
|
||||
addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard, p.FollowSymlink)
|
||||
p.Src = path.Base(p.Src)
|
||||
}
|
||||
dt, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
actions = append(actions, dt)
|
||||
}
|
||||
|
||||
dt, err := json.Marshal(struct {
|
||||
Type string
|
||||
Actions [][]byte
|
||||
}{
|
||||
Type: fileCacheType,
|
||||
Actions: actions,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
cm := &solver.CacheMap{
|
||||
Digest: digest.FromBytes(dt),
|
||||
Deps: make([]struct {
|
||||
Selector digest.Digest
|
||||
ComputeDigestFunc solver.ResultBasedCacheFunc
|
||||
}, f.numInputs),
|
||||
}
|
||||
|
||||
for idx, m := range selectors {
|
||||
if _, ok := invalidSelectors[idx]; ok {
|
||||
continue
|
||||
}
|
||||
dgsts := make([][]byte, 0, len(m))
|
||||
for k := range m {
|
||||
dgsts = append(dgsts, []byte(k.Path))
|
||||
}
|
||||
sort.Slice(dgsts, func(i, j int) bool {
|
||||
return bytes.Compare(dgsts[i], dgsts[j]) > 0
|
||||
})
|
||||
cm.Deps[idx].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0}))
|
||||
|
||||
cm.Deps[idx].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupeSelectors(m))
|
||||
}
|
||||
|
||||
return cm, true, nil
|
||||
}
|
||||
|
||||
func (f *fileOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) {
|
||||
inpRefs := make([]fileoptypes.Ref, 0, len(inputs))
|
||||
for _, inp := range inputs {
|
||||
workerRef, ok := inp.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid reference for exec %T", inp.Sys())
|
||||
}
|
||||
inpRefs = append(inpRefs, workerRef.ImmutableRef)
|
||||
}
|
||||
|
||||
outs, err := f.solver.Solve(ctx, inpRefs, f.op.Actions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outResults := make([]solver.Result, 0, len(outs))
|
||||
for _, out := range outs {
|
||||
outResults = append(outResults, worker.NewWorkerRefResult(out.(cache.ImmutableRef), f.w))
|
||||
}
|
||||
|
||||
return outResults, nil
|
||||
}
|
||||
|
||||
func addSelector(m map[int]map[llbsolver.Selector]struct{}, idx int, sel string, wildcard, followLinks bool) {
|
||||
mm, ok := m[idx]
|
||||
if !ok {
|
||||
mm = map[llbsolver.Selector]struct{}{}
|
||||
m[idx] = mm
|
||||
}
|
||||
s := llbsolver.Selector{Path: sel}
|
||||
|
||||
if wildcard && containsWildcards(sel) {
|
||||
s.Wildcard = true
|
||||
}
|
||||
if followLinks {
|
||||
s.FollowLinks = true
|
||||
}
|
||||
mm[s] = struct{}{}
|
||||
}
|
||||
|
||||
func containsWildcards(name string) bool {
|
||||
isWindows := runtime.GOOS == "windows"
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '\\' && !isWindows {
|
||||
i++
|
||||
} else if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func dedupeSelectors(m map[llbsolver.Selector]struct{}) []llbsolver.Selector {
|
||||
paths := make([]string, 0, len(m))
|
||||
pathsFollow := make([]string, 0, len(m))
|
||||
for sel := range m {
|
||||
if !sel.Wildcard {
|
||||
if sel.FollowLinks {
|
||||
pathsFollow = append(pathsFollow, sel.Path)
|
||||
} else {
|
||||
paths = append(paths, sel.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
paths = dedupePaths(paths)
|
||||
pathsFollow = dedupePaths(pathsFollow)
|
||||
selectors := make([]llbsolver.Selector, 0, len(m))
|
||||
|
||||
for _, p := range paths {
|
||||
selectors = append(selectors, llbsolver.Selector{Path: p})
|
||||
}
|
||||
for _, p := range pathsFollow {
|
||||
selectors = append(selectors, llbsolver.Selector{Path: p, FollowLinks: true})
|
||||
}
|
||||
|
||||
for sel := range m {
|
||||
if sel.Wildcard {
|
||||
selectors = append(selectors, sel)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(selectors, func(i, j int) bool {
|
||||
return selectors[i].Path < selectors[j].Path
|
||||
})
|
||||
|
||||
return selectors
|
||||
}
|
||||
|
||||
func processOwner(chopt *pb.ChownOpt, selectors map[int]map[llbsolver.Selector]struct{}) error {
|
||||
if chopt == nil {
|
||||
return nil
|
||||
}
|
||||
if chopt.User != nil {
|
||||
if u, ok := chopt.User.User.(*pb.UserOpt_ByName); ok {
|
||||
if u.ByName.Input < 0 {
|
||||
return errors.Errorf("invalid user index %d", u.ByName.Input)
|
||||
}
|
||||
addSelector(selectors, int(u.ByName.Input), "/etc/passwd", false, true)
|
||||
}
|
||||
}
|
||||
if chopt.Group != nil {
|
||||
if u, ok := chopt.Group.User.(*pb.UserOpt_ByName); ok {
|
||||
if u.ByName.Input < 0 {
|
||||
return errors.Errorf("invalid user index %d", u.ByName.Input)
|
||||
}
|
||||
addSelector(selectors, int(u.ByName.Input), "/etc/group", false, true)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewFileOpSolver(b fileoptypes.Backend, r fileoptypes.RefManager) *FileOpSolver {
|
||||
return &FileOpSolver{
|
||||
b: b,
|
||||
r: r,
|
||||
outs: map[int]int{},
|
||||
ins: map[int]input{},
|
||||
}
|
||||
}
|
||||
|
||||
type FileOpSolver struct {
|
||||
b fileoptypes.Backend
|
||||
r fileoptypes.RefManager
|
||||
|
||||
mu sync.Mutex
|
||||
outs map[int]int
|
||||
ins map[int]input
|
||||
g flightcontrol.Group
|
||||
}
|
||||
|
||||
type input struct {
|
||||
requiresCommit bool
|
||||
mount fileoptypes.Mount
|
||||
ref fileoptypes.Ref
|
||||
}
|
||||
|
||||
func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, actions []*pb.FileAction) ([]fileoptypes.Ref, error) {
|
||||
for i, a := range actions {
|
||||
if int(a.Input) < -1 || int(a.Input) >= len(inputs)+len(actions) {
|
||||
return nil, errors.Errorf("invalid input index %d, %d provided", a.Input, len(inputs)+len(actions))
|
||||
}
|
||||
if int(a.SecondaryInput) < -1 || int(a.SecondaryInput) >= len(inputs)+len(actions) {
|
||||
return nil, errors.Errorf("invalid secondary input index %d, %d provided", a.Input, len(inputs))
|
||||
}
|
||||
|
||||
inp, ok := s.ins[int(a.Input)]
|
||||
if ok {
|
||||
inp.requiresCommit = true
|
||||
}
|
||||
s.ins[int(a.Input)] = inp
|
||||
|
||||
inp, ok = s.ins[int(a.SecondaryInput)]
|
||||
if ok {
|
||||
inp.requiresCommit = true
|
||||
}
|
||||
s.ins[int(a.SecondaryInput)] = inp
|
||||
|
||||
if a.Output != -1 {
|
||||
if _, ok := s.outs[int(a.Output)]; ok {
|
||||
return nil, errors.Errorf("duplicate output %d", a.Output)
|
||||
}
|
||||
idx := len(inputs) + i
|
||||
s.outs[int(a.Output)] = idx
|
||||
s.ins[idx] = input{requiresCommit: true}
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.outs) == 0 {
|
||||
return nil, errors.Errorf("no outputs specified")
|
||||
}
|
||||
|
||||
for i := 0; i < len(s.outs); i++ {
|
||||
if _, ok := s.outs[i]; !ok {
|
||||
return nil, errors.Errorf("missing output index %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, in := range s.ins {
|
||||
if in.ref == nil && in.mount != nil {
|
||||
in.mount.Release(context.TODO())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
outs := make([]fileoptypes.Ref, len(s.outs))
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for i, idx := range s.outs {
|
||||
func(i, idx int) {
|
||||
eg.Go(func() error {
|
||||
if err := s.validate(idx, inputs, actions, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
inp, err := s.getInput(ctx, idx, inputs, actions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outs[i] = inp.ref
|
||||
return nil
|
||||
})
|
||||
}(i, idx)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
for _, r := range outs {
|
||||
if r != nil {
|
||||
r.Release(context.TODO())
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return outs, nil
|
||||
}
|
||||
|
||||
func (s *FileOpSolver) validate(idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction, loaded []int) error {
|
||||
for _, check := range loaded {
|
||||
if idx == check {
|
||||
return errors.Errorf("loop from index %d", idx)
|
||||
}
|
||||
}
|
||||
if idx < len(inputs) {
|
||||
return nil
|
||||
}
|
||||
loaded = append(loaded, idx)
|
||||
action := actions[idx-len(inputs)]
|
||||
for _, inp := range []int{int(action.Input), int(action.SecondaryInput)} {
|
||||
if err := s.validate(inp, inputs, actions, loaded); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction) (input, error) {
|
||||
inp, err := s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (_ interface{}, err error) {
|
||||
s.mu.Lock()
|
||||
inp := s.ins[idx]
|
||||
s.mu.Unlock()
|
||||
if inp.mount != nil || inp.ref != nil {
|
||||
return inp, nil
|
||||
}
|
||||
|
||||
if idx < len(inputs) {
|
||||
inp.ref = inputs[idx]
|
||||
s.mu.Lock()
|
||||
s.ins[idx] = inp
|
||||
s.mu.Unlock()
|
||||
return inp, nil
|
||||
}
|
||||
|
||||
var inpMount, inpMountSecondary fileoptypes.Mount
|
||||
var toRelease []fileoptypes.Mount
|
||||
var inpMountPrepared bool
|
||||
defer func() {
|
||||
for _, m := range toRelease {
|
||||
m.Release(context.TODO())
|
||||
}
|
||||
if err != nil && inpMount != nil && inpMountPrepared {
|
||||
inpMount.Release(context.TODO())
|
||||
}
|
||||
}()
|
||||
|
||||
action := actions[idx-len(inputs)]
|
||||
|
||||
loadInput := func(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
inp, err := s.getInput(ctx, int(action.Input), inputs, actions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if inp.ref != nil {
|
||||
m, err := s.r.Prepare(ctx, inp.ref, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inpMount = m
|
||||
inpMountPrepared = true
|
||||
return nil
|
||||
}
|
||||
inpMount = inp.mount
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
loadSecondaryInput := func(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
inp, err := s.getInput(ctx, int(action.SecondaryInput), inputs, actions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if inp.ref != nil {
|
||||
m, err := s.r.Prepare(ctx, inp.ref, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inpMountSecondary = m
|
||||
toRelease = append(toRelease, m)
|
||||
return nil
|
||||
}
|
||||
inpMountSecondary = inp.mount
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
loadUser := func(ctx context.Context, uopt *pb.UserOpt) (fileoptypes.Mount, error) {
|
||||
if uopt == nil {
|
||||
return nil, nil
|
||||
}
|
||||
switch u := uopt.User.(type) {
|
||||
case *pb.UserOpt_ByName:
|
||||
var m fileoptypes.Mount
|
||||
if u.ByName.Input < 0 {
|
||||
return nil, errors.Errorf("invalid user index: %d", u.ByName.Input)
|
||||
}
|
||||
inp, err := s.getInput(ctx, int(u.ByName.Input), inputs, actions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if inp.ref != nil {
|
||||
mm, err := s.r.Prepare(ctx, inp.ref, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toRelease = append(toRelease, mm)
|
||||
m = mm
|
||||
} else {
|
||||
m = inp.mount
|
||||
}
|
||||
return m, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
loadOwner := func(ctx context.Context, chopt *pb.ChownOpt) (fileoptypes.Mount, fileoptypes.Mount, error) {
|
||||
if chopt == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
um, err := loadUser(ctx, chopt.User)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gm, err := loadUser(ctx, chopt.Group)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return um, gm, nil
|
||||
}
|
||||
|
||||
if action.Input != -1 && action.SecondaryInput != -1 {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg.Go(loadInput(ctx))
|
||||
eg.Go(loadSecondaryInput(ctx))
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if action.Input != -1 {
|
||||
if err := loadInput(ctx)(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if action.SecondaryInput != -1 {
|
||||
if err := loadSecondaryInput(ctx)(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if inpMount == nil {
|
||||
m, err := s.r.Prepare(ctx, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inpMount = m
|
||||
inpMountPrepared = true
|
||||
}
|
||||
|
||||
switch a := action.Action.(type) {
|
||||
case *pb.FileAction_Mkdir:
|
||||
user, group, err := loadOwner(ctx, a.Mkdir.Owner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.b.Mkdir(ctx, inpMount, user, group, *a.Mkdir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *pb.FileAction_Mkfile:
|
||||
user, group, err := loadOwner(ctx, a.Mkfile.Owner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.b.Mkfile(ctx, inpMount, user, group, *a.Mkfile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *pb.FileAction_Rm:
|
||||
if err := s.b.Rm(ctx, inpMount, *a.Rm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *pb.FileAction_Copy:
|
||||
if inpMountSecondary == nil {
|
||||
m, err := s.r.Prepare(ctx, nil, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inpMountSecondary = m
|
||||
}
|
||||
user, group, err := loadOwner(ctx, a.Copy.Owner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.b.Copy(ctx, inpMountSecondary, inpMount, user, group, *a.Copy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("invalid action type %T", action.Action)
|
||||
}
|
||||
|
||||
if inp.requiresCommit {
|
||||
ref, err := s.r.Commit(ctx, inpMount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inp.ref = ref
|
||||
} else {
|
||||
inp.mount = inpMount
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.ins[idx] = inp
|
||||
s.mu.Unlock()
|
||||
return inp, nil
|
||||
})
|
||||
if err != nil {
|
||||
return input{}, err
|
||||
}
|
||||
return inp.(input), err
|
||||
}
|
28
vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go
generated
vendored
Normal file
28
vendor/github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes/types.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package fileoptypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
)
|
||||
|
||||
type Ref interface {
|
||||
Release(context.Context) error
|
||||
}
|
||||
|
||||
type Mount interface {
|
||||
IsFileOpMount()
|
||||
Release(context.Context) error
|
||||
}
|
||||
|
||||
type Backend interface {
|
||||
Mkdir(context.Context, Mount, Mount, Mount, pb.FileActionMkDir) error
|
||||
Mkfile(context.Context, Mount, Mount, Mount, pb.FileActionMkFile) error
|
||||
Rm(context.Context, Mount, pb.FileActionRm) error
|
||||
Copy(context.Context, Mount, Mount, Mount, Mount, pb.FileActionCopy) error
|
||||
}
|
||||
|
||||
type RefManager interface {
|
||||
Prepare(ctx context.Context, ref Ref, readonly bool) (Mount, error)
|
||||
Commit(ctx context.Context, mount Mount) (Ref, error)
|
||||
}
|
92
vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go
generated
vendored
Normal file
92
vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package ops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const sourceCacheType = "buildkit.source.v0"
|
||||
|
||||
type sourceOp struct {
|
||||
mu sync.Mutex
|
||||
op *pb.Op_Source
|
||||
platform *pb.Platform
|
||||
sm *source.Manager
|
||||
src source.SourceInstance
|
||||
sessM *session.Manager
|
||||
w worker.Worker
|
||||
}
|
||||
|
||||
func NewSourceOp(_ solver.Vertex, op *pb.Op_Source, platform *pb.Platform, sm *source.Manager, sessM *session.Manager, w worker.Worker) (solver.Op, error) {
|
||||
if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &sourceOp{
|
||||
op: op,
|
||||
sm: sm,
|
||||
w: w,
|
||||
sessM: sessM,
|
||||
platform: platform,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *sourceOp) instance(ctx context.Context) (source.SourceInstance, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.src != nil {
|
||||
return s.src, nil
|
||||
}
|
||||
id, err := source.FromLLB(s.op, s.platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src, err := s.sm.Resolve(ctx, id, s.sessM)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.src = src
|
||||
return s.src, nil
|
||||
}
|
||||
|
||||
func (s *sourceOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) {
|
||||
src, err := s.instance(ctx)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
k, done, err := src.CacheKey(ctx, index)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
dgst := digest.FromBytes([]byte(sourceCacheType + ":" + k))
|
||||
|
||||
if strings.HasPrefix(k, "session:") {
|
||||
dgst = digest.Digest("random:" + strings.TrimPrefix(dgst.String(), dgst.Algorithm().String()+":"))
|
||||
}
|
||||
|
||||
return &solver.CacheMap{
|
||||
// TODO: add os/arch
|
||||
Digest: dgst,
|
||||
}, done, nil
|
||||
}
|
||||
|
||||
func (s *sourceOp) Exec(ctx context.Context, _ []solver.Result) (outputs []solver.Result, err error) {
|
||||
src, err := s.instance(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ref, err := src.Snapshot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []solver.Result{worker.NewWorkerRefResult(ref, s.w)}, nil
|
||||
}
|
74
vendor/github.com/moby/buildkit/solver/llbsolver/result.go
generated
vendored
Normal file
74
vendor/github.com/moby/buildkit/solver/llbsolver/result.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package llbsolver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"path"
|
||||
|
||||
"github.com/moby/buildkit/cache/contenthash"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type Selector struct {
|
||||
Path string
|
||||
Wildcard bool
|
||||
FollowLinks bool
|
||||
}
|
||||
|
||||
func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc {
|
||||
return func(ctx context.Context, res solver.Result) (digest.Digest, error) {
|
||||
ref, ok := res.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return "", errors.Errorf("invalid reference: %T", res)
|
||||
}
|
||||
|
||||
if len(selectors) == 0 {
|
||||
selectors = []Selector{{}}
|
||||
}
|
||||
|
||||
dgsts := make([][]byte, len(selectors))
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
for i, sel := range selectors {
|
||||
// FIXME(tonistiigi): enabling this parallelization seems to create wrong results for some big inputs(like gobuild)
|
||||
// func(i int) {
|
||||
// eg.Go(func() error {
|
||||
if !sel.Wildcard {
|
||||
dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dgsts[i] = []byte(dgst)
|
||||
} else {
|
||||
dgst, err := contenthash.ChecksumWildcard(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dgsts[i] = []byte(dgst)
|
||||
}
|
||||
// return nil
|
||||
// })
|
||||
// }(i)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return digest.FromBytes(bytes.Join(dgsts, []byte{0})), nil
|
||||
}
|
||||
}
|
||||
|
||||
func workerRefConverter(ctx context.Context, res solver.Result) (*solver.Remote, error) {
|
||||
ref, ok := res.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid result: %T", res.Sys())
|
||||
}
|
||||
|
||||
return ref.Worker.GetRemote(ctx, ref.ImmutableRef, true)
|
||||
}
|
418
vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
generated
vendored
Normal file
418
vendor/github.com/moby/buildkit/solver/llbsolver/solver.go
generated
vendored
Normal file
@@ -0,0 +1,418 @@
|
||||
package llbsolver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/remotecache"
|
||||
"github.com/moby/buildkit/client"
|
||||
controlgateway "github.com/moby/buildkit/control/gateway"
|
||||
"github.com/moby/buildkit/exporter"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/frontend"
|
||||
"github.com/moby/buildkit/frontend/gateway"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const keyEntitlements = "llb.entitlements"
|
||||
|
||||
type ExporterRequest struct {
|
||||
Exporter exporter.ExporterInstance
|
||||
CacheExporter remotecache.Exporter
|
||||
CacheExportMode solver.CacheExportMode
|
||||
}
|
||||
|
||||
// ResolveWorkerFunc returns default worker for the temporary default non-distributed use cases
|
||||
type ResolveWorkerFunc func() (worker.Worker, error)
|
||||
|
||||
type Solver struct {
|
||||
workerController *worker.Controller
|
||||
solver *solver.Solver
|
||||
resolveWorker ResolveWorkerFunc
|
||||
eachWorker func(func(worker.Worker) error) error
|
||||
frontends map[string]frontend.Frontend
|
||||
resolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc
|
||||
platforms []specs.Platform
|
||||
gatewayForwarder *controlgateway.GatewayForwarder
|
||||
sm *session.Manager
|
||||
entitlements []string
|
||||
}
|
||||
|
||||
func New(wc *worker.Controller, f map[string]frontend.Frontend, cache solver.CacheManager, resolveCI map[string]remotecache.ResolveCacheImporterFunc, gatewayForwarder *controlgateway.GatewayForwarder, sm *session.Manager, ents []string) (*Solver, error) {
|
||||
s := &Solver{
|
||||
workerController: wc,
|
||||
resolveWorker: defaultResolver(wc),
|
||||
eachWorker: allWorkers(wc),
|
||||
frontends: f,
|
||||
resolveCacheImporterFuncs: resolveCI,
|
||||
gatewayForwarder: gatewayForwarder,
|
||||
sm: sm,
|
||||
entitlements: ents,
|
||||
}
|
||||
|
||||
// executing is currently only allowed on default worker
|
||||
w, err := wc.GetDefault()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.platforms = w.Platforms(false)
|
||||
|
||||
s.solver = solver.NewSolver(solver.SolverOpt{
|
||||
ResolveOpFunc: s.resolver(),
|
||||
DefaultCache: cache,
|
||||
})
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Solver) resolver() solver.ResolveOpFunc {
|
||||
return func(v solver.Vertex, b solver.Builder) (solver.Op, error) {
|
||||
w, err := s.resolveWorker()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return w.ResolveOp(v, s.Bridge(b), s.sm)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Solver) Bridge(b solver.Builder) frontend.FrontendLLBBridge {
|
||||
return &llbBridge{
|
||||
builder: b,
|
||||
frontends: s.frontends,
|
||||
resolveWorker: s.resolveWorker,
|
||||
eachWorker: s.eachWorker,
|
||||
resolveCacheImporterFuncs: s.resolveCacheImporterFuncs,
|
||||
cms: map[string]solver.CacheManager{},
|
||||
platforms: s.platforms,
|
||||
sm: s.sm,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Solver) Solve(ctx context.Context, id string, req frontend.SolveRequest, exp ExporterRequest, ent []entitlements.Entitlement) (*client.SolveResponse, error) {
|
||||
j, err := s.solver.NewJob(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer j.Discard()
|
||||
|
||||
set, err := entitlements.WhiteList(ent, supportedEntitlements(s.entitlements))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.SetValue(keyEntitlements, set)
|
||||
|
||||
j.SessionID = session.FromContext(ctx)
|
||||
|
||||
var res *frontend.Result
|
||||
if s.gatewayForwarder != nil && req.Definition == nil && req.Frontend == "" {
|
||||
fwd := gateway.NewBridgeForwarder(ctx, s.Bridge(j), s.workerController, req.FrontendInputs)
|
||||
defer fwd.Discard()
|
||||
if err := s.gatewayForwarder.RegisterBuild(ctx, id, fwd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer s.gatewayForwarder.UnregisterBuild(ctx, id)
|
||||
|
||||
var err error
|
||||
select {
|
||||
case <-fwd.Done():
|
||||
res, err = fwd.Result()
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
res, err = s.Bridge(j).Solve(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
res.EachRef(func(ref solver.ResultProxy) error {
|
||||
go ref.Release(context.TODO())
|
||||
return nil
|
||||
})
|
||||
}()
|
||||
|
||||
eg, ctx2 := errgroup.WithContext(ctx)
|
||||
res.EachRef(func(ref solver.ResultProxy) error {
|
||||
eg.Go(func() error {
|
||||
_, err := ref.Result(ctx2)
|
||||
return err
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var exporterResponse map[string]string
|
||||
if e := exp.Exporter; e != nil {
|
||||
inp := exporter.Source{
|
||||
Metadata: res.Metadata,
|
||||
}
|
||||
if inp.Metadata == nil {
|
||||
inp.Metadata = make(map[string][]byte)
|
||||
}
|
||||
if res := res.Ref; res != nil {
|
||||
r, err := res.Result(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workerRef, ok := r.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid reference: %T", r.Sys())
|
||||
}
|
||||
inp.Ref = workerRef.ImmutableRef
|
||||
|
||||
dt, err := inlineCache(ctx, exp.CacheExporter, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dt != nil {
|
||||
inp.Metadata[exptypes.ExporterInlineCache] = dt
|
||||
}
|
||||
}
|
||||
if res.Refs != nil {
|
||||
m := make(map[string]cache.ImmutableRef, len(res.Refs))
|
||||
for k, res := range res.Refs {
|
||||
if res == nil {
|
||||
m[k] = nil
|
||||
} else {
|
||||
r, err := res.Result(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workerRef, ok := r.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid reference: %T", r.Sys())
|
||||
}
|
||||
m[k] = workerRef.ImmutableRef
|
||||
|
||||
dt, err := inlineCache(ctx, exp.CacheExporter, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dt != nil {
|
||||
inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, k)] = dt
|
||||
}
|
||||
}
|
||||
}
|
||||
inp.Refs = m
|
||||
}
|
||||
|
||||
if err := inVertexContext(j.Context(ctx), e.Name(), "", func(ctx context.Context) error {
|
||||
exporterResponse, err = e.Export(ctx, inp)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var cacheExporterResponse map[string]string
|
||||
if e := exp.CacheExporter; e != nil {
|
||||
if err := inVertexContext(j.Context(ctx), "exporting cache", "", func(ctx context.Context) error {
|
||||
prepareDone := oneOffProgress(ctx, "preparing build cache for export")
|
||||
if err := res.EachRef(func(res solver.ResultProxy) error {
|
||||
r, err := res.Result(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// all keys have same export chain so exporting others is not needed
|
||||
_, err = r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
|
||||
Convert: workerRefConverter,
|
||||
Mode: exp.CacheExportMode,
|
||||
})
|
||||
return err
|
||||
}); err != nil {
|
||||
return prepareDone(err)
|
||||
}
|
||||
prepareDone(nil)
|
||||
cacheExporterResponse, err = e.Finalize(ctx)
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if exporterResponse == nil {
|
||||
exporterResponse = make(map[string]string)
|
||||
}
|
||||
|
||||
for k, v := range res.Metadata {
|
||||
if strings.HasPrefix(k, "frontend.") {
|
||||
exporterResponse[k] = string(v)
|
||||
}
|
||||
}
|
||||
for k, v := range cacheExporterResponse {
|
||||
if strings.HasPrefix(k, "cache.") {
|
||||
exporterResponse[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return &client.SolveResponse{
|
||||
ExporterResponse: exporterResponse,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult) ([]byte, error) {
|
||||
if efl, ok := e.(interface {
|
||||
ExportForLayers([]digest.Digest) ([]byte, error)
|
||||
}); ok {
|
||||
workerRef, ok := res.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid reference: %T", res.Sys())
|
||||
}
|
||||
|
||||
remote, err := workerRef.Worker.GetRemote(ctx, workerRef.ImmutableRef, true)
|
||||
if err != nil || remote == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
digests := make([]digest.Digest, 0, len(remote.Descriptors))
|
||||
for _, desc := range remote.Descriptors {
|
||||
digests = append(digests, desc.Digest)
|
||||
}
|
||||
|
||||
if _, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{
|
||||
Convert: workerRefConverter,
|
||||
Mode: solver.CacheExportModeMin,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return efl.ExportForLayers(digests)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *Solver) Status(ctx context.Context, id string, statusChan chan *client.SolveStatus) error {
|
||||
j, err := s.solver.Get(id)
|
||||
if err != nil {
|
||||
close(statusChan)
|
||||
return err
|
||||
}
|
||||
return j.Status(ctx, statusChan)
|
||||
}
|
||||
|
||||
func defaultResolver(wc *worker.Controller) ResolveWorkerFunc {
|
||||
return func() (worker.Worker, error) {
|
||||
return wc.GetDefault()
|
||||
}
|
||||
}
|
||||
func allWorkers(wc *worker.Controller) func(func(w worker.Worker) error) error {
|
||||
return func(f func(worker.Worker) error) error {
|
||||
all, err := wc.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, w := range all {
|
||||
if err := f(w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
||||
pw, _, _ := progress.FromContext(ctx)
|
||||
now := time.Now()
|
||||
st := progress.Status{
|
||||
Started: &now,
|
||||
}
|
||||
pw.Write(id, st)
|
||||
return func(err error) error {
|
||||
// TODO: set error on status
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
pw.Write(id, st)
|
||||
pw.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func inVertexContext(ctx context.Context, name, id string, f func(ctx context.Context) error) error {
|
||||
if id == "" {
|
||||
id = name
|
||||
}
|
||||
v := client.Vertex{
|
||||
Digest: digest.FromBytes([]byte(id)),
|
||||
Name: name,
|
||||
}
|
||||
pw, _, ctx := progress.FromContext(ctx, progress.WithMetadata("vertex", v.Digest))
|
||||
notifyStarted(ctx, &v, false)
|
||||
defer pw.Close()
|
||||
err := f(ctx)
|
||||
notifyCompleted(ctx, &v, err, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) {
|
||||
pw, _, _ := progress.FromContext(ctx)
|
||||
defer pw.Close()
|
||||
now := time.Now()
|
||||
v.Started = &now
|
||||
v.Completed = nil
|
||||
v.Cached = cached
|
||||
pw.Write(v.Digest.String(), *v)
|
||||
}
|
||||
|
||||
func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) {
|
||||
pw, _, _ := progress.FromContext(ctx)
|
||||
defer pw.Close()
|
||||
now := time.Now()
|
||||
if v.Started == nil {
|
||||
v.Started = &now
|
||||
}
|
||||
v.Completed = &now
|
||||
v.Cached = cached
|
||||
if err != nil {
|
||||
v.Error = err.Error()
|
||||
}
|
||||
pw.Write(v.Digest.String(), *v)
|
||||
}
|
||||
|
||||
func supportedEntitlements(ents []string) []entitlements.Entitlement {
|
||||
out := []entitlements.Entitlement{} // nil means no filter
|
||||
for _, e := range ents {
|
||||
if e == string(entitlements.EntitlementNetworkHost) {
|
||||
out = append(out, entitlements.EntitlementNetworkHost)
|
||||
}
|
||||
if e == string(entitlements.EntitlementSecurityInsecure) {
|
||||
out = append(out, entitlements.EntitlementSecurityInsecure)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func loadEntitlements(b solver.Builder) (entitlements.Set, error) {
|
||||
var ent entitlements.Set = map[entitlements.Entitlement]struct{}{}
|
||||
err := b.EachValue(context.TODO(), keyEntitlements, func(v interface{}) error {
|
||||
set, ok := v.(entitlements.Set)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid entitlements %T", v)
|
||||
}
|
||||
for k := range set {
|
||||
ent[k] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ent, nil
|
||||
}
|
353
vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
generated
vendored
Normal file
353
vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go
generated
vendored
Normal file
@@ -0,0 +1,353 @@
|
||||
package llbsolver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/util/binfmt_misc"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type vertex struct {
|
||||
sys interface{}
|
||||
options solver.VertexOptions
|
||||
inputs []solver.Edge
|
||||
digest digest.Digest
|
||||
name string
|
||||
}
|
||||
|
||||
func (v *vertex) Digest() digest.Digest {
|
||||
return v.digest
|
||||
}
|
||||
|
||||
func (v *vertex) Sys() interface{} {
|
||||
return v.sys
|
||||
}
|
||||
|
||||
func (v *vertex) Options() solver.VertexOptions {
|
||||
return v.options
|
||||
}
|
||||
|
||||
func (v *vertex) Inputs() []solver.Edge {
|
||||
return v.inputs
|
||||
}
|
||||
|
||||
func (v *vertex) Name() string {
|
||||
if name, ok := v.options.Description["llb.customname"]; ok {
|
||||
return name
|
||||
}
|
||||
return v.name
|
||||
}
|
||||
|
||||
type LoadOpt func(*pb.Op, *pb.OpMetadata, *solver.VertexOptions) error
|
||||
|
||||
func WithValidateCaps() LoadOpt {
|
||||
cs := pb.Caps.CapSet(pb.Caps.All())
|
||||
return func(_ *pb.Op, md *pb.OpMetadata, opt *solver.VertexOptions) error {
|
||||
if md != nil {
|
||||
for c := range md.Caps {
|
||||
if err := cs.Supports(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithCacheSources(cms []solver.CacheManager) LoadOpt {
|
||||
return func(_ *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {
|
||||
opt.CacheSources = cms
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func RuntimePlatforms(p []specs.Platform) LoadOpt {
|
||||
var defaultPlatform *pb.Platform
|
||||
pp := make([]specs.Platform, len(p))
|
||||
for i := range p {
|
||||
pp[i] = platforms.Normalize(p[i])
|
||||
}
|
||||
return func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {
|
||||
if op.Platform == nil {
|
||||
if defaultPlatform == nil {
|
||||
p := platforms.DefaultSpec()
|
||||
defaultPlatform = &pb.Platform{
|
||||
OS: p.OS,
|
||||
Architecture: p.Architecture,
|
||||
Variant: p.Variant,
|
||||
}
|
||||
}
|
||||
op.Platform = defaultPlatform
|
||||
}
|
||||
platform := specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant}
|
||||
normalizedPlatform := platforms.Normalize(platform)
|
||||
|
||||
op.Platform = &pb.Platform{
|
||||
OS: normalizedPlatform.OS,
|
||||
Architecture: normalizedPlatform.Architecture,
|
||||
Variant: normalizedPlatform.Variant,
|
||||
}
|
||||
|
||||
if _, ok := op.Op.(*pb.Op_Exec); ok {
|
||||
var found bool
|
||||
for _, pp := range pp {
|
||||
if pp.OS == op.Platform.OS && pp.Architecture == op.Platform.Architecture && pp.Variant == op.Platform.Variant {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
if !binfmt_misc.Check(normalizedPlatform) {
|
||||
return errors.Errorf("runtime execution on platform %s not supported", platforms.Format(specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant}))
|
||||
} else {
|
||||
pp = append(pp, normalizedPlatform)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func ValidateEntitlements(ent entitlements.Set) LoadOpt {
|
||||
return func(op *pb.Op, _ *pb.OpMetadata, opt *solver.VertexOptions) error {
|
||||
switch op := op.Op.(type) {
|
||||
case *pb.Op_Exec:
|
||||
if op.Exec.Network == pb.NetMode_HOST {
|
||||
if !ent.Allowed(entitlements.EntitlementNetworkHost) {
|
||||
return errors.Errorf("%s is not allowed", entitlements.EntitlementNetworkHost)
|
||||
}
|
||||
}
|
||||
|
||||
if op.Exec.Security == pb.SecurityMode_INSECURE {
|
||||
if !ent.Allowed(entitlements.EntitlementSecurityInsecure) {
|
||||
return errors.Errorf("%s is not allowed", entitlements.EntitlementSecurityInsecure)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type detectPrunedCacheID struct {
|
||||
ids map[string]struct{}
|
||||
}
|
||||
|
||||
func (dpc *detectPrunedCacheID) Load(op *pb.Op, md *pb.OpMetadata, opt *solver.VertexOptions) error {
|
||||
if md == nil || !md.IgnoreCache {
|
||||
return nil
|
||||
}
|
||||
switch op := op.Op.(type) {
|
||||
case *pb.Op_Exec:
|
||||
for _, m := range op.Exec.GetMounts() {
|
||||
if m.MountType == pb.MountType_CACHE {
|
||||
if m.CacheOpt != nil {
|
||||
id := m.CacheOpt.ID
|
||||
if id == "" {
|
||||
id = m.Dest
|
||||
}
|
||||
if dpc.ids == nil {
|
||||
dpc.ids = map[string]struct{}{}
|
||||
}
|
||||
dpc.ids[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Load(def *pb.Definition, opts ...LoadOpt) (solver.Edge, error) {
|
||||
return loadLLB(def, func(dgst digest.Digest, pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error) {
|
||||
opMetadata := def.Metadata[dgst]
|
||||
vtx, err := newVertex(dgst, pbOp, &opMetadata, load, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return vtx, nil
|
||||
})
|
||||
}
|
||||
|
||||
func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(digest.Digest) (solver.Vertex, error), opts ...LoadOpt) (*vertex, error) {
|
||||
opt := solver.VertexOptions{}
|
||||
if opMeta != nil {
|
||||
opt.IgnoreCache = opMeta.IgnoreCache
|
||||
opt.Description = opMeta.Description
|
||||
if opMeta.ExportCache != nil {
|
||||
opt.ExportCache = &opMeta.ExportCache.Value
|
||||
}
|
||||
}
|
||||
for _, fn := range opts {
|
||||
if err := fn(op, opMeta, &opt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
vtx := &vertex{sys: op, options: opt, digest: dgst, name: llbOpName(op)}
|
||||
for _, in := range op.Inputs {
|
||||
sub, err := load(in.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vtx.inputs = append(vtx.inputs, solver.Edge{Index: solver.Index(in.Index), Vertex: sub})
|
||||
}
|
||||
return vtx, nil
|
||||
}
|
||||
|
||||
// loadLLB loads LLB.
|
||||
// fn is executed sequentially.
|
||||
func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Digest) (solver.Vertex, error)) (solver.Vertex, error)) (solver.Edge, error) {
|
||||
if len(def.Def) == 0 {
|
||||
return solver.Edge{}, errors.New("invalid empty definition")
|
||||
}
|
||||
|
||||
allOps := make(map[digest.Digest]*pb.Op)
|
||||
|
||||
var dgst digest.Digest
|
||||
|
||||
for _, dt := range def.Def {
|
||||
var op pb.Op
|
||||
if err := (&op).Unmarshal(dt); err != nil {
|
||||
return solver.Edge{}, errors.Wrap(err, "failed to parse llb proto op")
|
||||
}
|
||||
dgst = digest.FromBytes(dt)
|
||||
allOps[dgst] = &op
|
||||
}
|
||||
|
||||
if len(allOps) < 2 {
|
||||
return solver.Edge{}, errors.Errorf("invalid LLB with %d vertexes", len(allOps))
|
||||
}
|
||||
|
||||
lastOp := allOps[dgst]
|
||||
delete(allOps, dgst)
|
||||
if len(lastOp.Inputs) == 0 {
|
||||
return solver.Edge{}, errors.Errorf("invalid LLB with no inputs on last vertex")
|
||||
}
|
||||
dgst = lastOp.Inputs[0].Digest
|
||||
|
||||
cache := make(map[digest.Digest]solver.Vertex)
|
||||
|
||||
var rec func(dgst digest.Digest) (solver.Vertex, error)
|
||||
rec = func(dgst digest.Digest) (solver.Vertex, error) {
|
||||
if v, ok := cache[dgst]; ok {
|
||||
return v, nil
|
||||
}
|
||||
op, ok := allOps[dgst]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid missing input digest %s", dgst)
|
||||
}
|
||||
|
||||
if err := ValidateOp(op); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v, err := fn(dgst, op, rec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cache[dgst] = v
|
||||
return v, nil
|
||||
}
|
||||
|
||||
v, err := rec(dgst)
|
||||
if err != nil {
|
||||
return solver.Edge{}, err
|
||||
}
|
||||
return solver.Edge{Vertex: v, Index: solver.Index(lastOp.Inputs[0].Index)}, nil
|
||||
}
|
||||
|
||||
func llbOpName(op *pb.Op) string {
|
||||
switch op := op.Op.(type) {
|
||||
case *pb.Op_Source:
|
||||
if id, err := source.FromLLB(op, nil); err == nil {
|
||||
if id, ok := id.(*source.LocalIdentifier); ok {
|
||||
if len(id.IncludePatterns) == 1 {
|
||||
return op.Source.Identifier + " (" + id.IncludePatterns[0] + ")"
|
||||
}
|
||||
}
|
||||
}
|
||||
return op.Source.Identifier
|
||||
case *pb.Op_Exec:
|
||||
return strings.Join(op.Exec.Meta.Args, " ")
|
||||
case *pb.Op_File:
|
||||
return fileOpName(op.File.Actions)
|
||||
case *pb.Op_Build:
|
||||
return "build"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func ValidateOp(op *pb.Op) error {
|
||||
if op == nil {
|
||||
return errors.Errorf("invalid nil op")
|
||||
}
|
||||
|
||||
switch op := op.Op.(type) {
|
||||
case *pb.Op_Source:
|
||||
if op.Source == nil {
|
||||
return errors.Errorf("invalid nil source op")
|
||||
}
|
||||
case *pb.Op_Exec:
|
||||
if op.Exec == nil {
|
||||
return errors.Errorf("invalid nil exec op")
|
||||
}
|
||||
if op.Exec.Meta == nil {
|
||||
return errors.Errorf("invalid exec op with no meta")
|
||||
}
|
||||
if len(op.Exec.Meta.Args) == 0 {
|
||||
return errors.Errorf("invalid exec op with no args")
|
||||
}
|
||||
if len(op.Exec.Mounts) == 0 {
|
||||
return errors.Errorf("invalid exec op with no mounts")
|
||||
}
|
||||
|
||||
isRoot := false
|
||||
for _, m := range op.Exec.Mounts {
|
||||
if m.Dest == pb.RootMount {
|
||||
isRoot = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isRoot {
|
||||
return errors.Errorf("invalid exec op with no rootfs")
|
||||
}
|
||||
case *pb.Op_File:
|
||||
if op.File == nil {
|
||||
return errors.Errorf("invalid nil file op")
|
||||
}
|
||||
if len(op.File.Actions) == 0 {
|
||||
return errors.Errorf("invalid file op with no actions")
|
||||
}
|
||||
case *pb.Op_Build:
|
||||
if op.Build == nil {
|
||||
return errors.Errorf("invalid nil build op")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fileOpName(actions []*pb.FileAction) string {
|
||||
names := make([]string, 0, len(actions))
|
||||
for _, action := range actions {
|
||||
switch a := action.Action.(type) {
|
||||
case *pb.FileAction_Mkdir:
|
||||
names = append(names, fmt.Sprintf("mkdir %s", a.Mkdir.Path))
|
||||
case *pb.FileAction_Mkfile:
|
||||
names = append(names, fmt.Sprintf("mkfile %s", a.Mkfile.Path))
|
||||
case *pb.FileAction_Rm:
|
||||
names = append(names, fmt.Sprintf("rm %s", a.Rm.Path))
|
||||
case *pb.FileAction_Copy:
|
||||
names = append(names, fmt.Sprintf("copy %s %s", a.Copy.Src, a.Copy.Dest))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(names, ", ")
|
||||
}
|
Reference in New Issue
Block a user