mirror of
https://github.com/mudler/luet.git
synced 2025-09-03 16:25:19 +00:00
Support priv/unpriv image extraction
Optionally add back privileged extraction which can be enabled with LUET_PRIVILEGED_EXTRACT=true Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
This commit is contained in:
295
vendor/github.com/moby/buildkit/source/containerimage/pull.go
generated
vendored
Normal file
295
vendor/github.com/moby/buildkit/source/containerimage/pull.go
generated
vendored
Normal file
@@ -0,0 +1,295 @@
|
||||
package containerimage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/diff"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/imageutil"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
"github.com/moby/buildkit/util/pull"
|
||||
"github.com/moby/buildkit/util/winlayers"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TODO: break apart containerd specifics like contentstore so the resolver
|
||||
// code can be used with any implementation
|
||||
|
||||
type SourceOpt struct {
|
||||
Snapshotter snapshot.Snapshotter
|
||||
ContentStore content.Store
|
||||
Applier diff.Applier
|
||||
CacheAccessor cache.Accessor
|
||||
ImageStore images.Store // optional
|
||||
RegistryHosts docker.RegistryHosts
|
||||
LeaseManager leases.Manager
|
||||
}
|
||||
|
||||
type imageSource struct {
|
||||
SourceOpt
|
||||
g flightcontrol.Group
|
||||
}
|
||||
|
||||
func NewSource(opt SourceOpt) (source.Source, error) {
|
||||
is := &imageSource{
|
||||
SourceOpt: opt,
|
||||
}
|
||||
|
||||
return is, nil
|
||||
}
|
||||
|
||||
func (is *imageSource) ID() string {
|
||||
return source.DockerImageScheme
|
||||
}
|
||||
|
||||
func (is *imageSource) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager) (digest.Digest, []byte, error) {
|
||||
type t struct {
|
||||
dgst digest.Digest
|
||||
dt []byte
|
||||
}
|
||||
key := ref
|
||||
if platform := opt.Platform; platform != nil {
|
||||
key += platforms.Format(*platform)
|
||||
}
|
||||
|
||||
rm, err := source.ParseImageResolveMode(opt.ResolveMode)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) {
|
||||
dgst, dt, err := imageutil.Config(ctx, ref, pull.NewResolver(ctx, is.RegistryHosts, sm, is.ImageStore, rm, ref), is.ContentStore, is.LeaseManager, opt.Platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &t{dgst: dgst, dt: dt}, nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
typed := res.(*t)
|
||||
return typed.dgst, typed.dt, nil
|
||||
}
|
||||
|
||||
func (is *imageSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) {
|
||||
imageIdentifier, ok := id.(*source.ImageIdentifier)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid image identifier %v", id)
|
||||
}
|
||||
|
||||
platform := platforms.DefaultSpec()
|
||||
if imageIdentifier.Platform != nil {
|
||||
platform = *imageIdentifier.Platform
|
||||
}
|
||||
|
||||
pullerUtil := &pull.Puller{
|
||||
Snapshotter: is.Snapshotter,
|
||||
ContentStore: is.ContentStore,
|
||||
Applier: is.Applier,
|
||||
Src: imageIdentifier.Reference,
|
||||
Resolver: pull.NewResolver(ctx, is.RegistryHosts, sm, is.ImageStore, imageIdentifier.ResolveMode, imageIdentifier.Reference.String()),
|
||||
Platform: &platform,
|
||||
}
|
||||
p := &puller{
|
||||
CacheAccessor: is.CacheAccessor,
|
||||
Puller: pullerUtil,
|
||||
Platform: platform,
|
||||
id: imageIdentifier,
|
||||
LeaseManager: is.LeaseManager,
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type puller struct {
|
||||
CacheAccessor cache.Accessor
|
||||
LeaseManager leases.Manager
|
||||
Platform specs.Platform
|
||||
id *source.ImageIdentifier
|
||||
*pull.Puller
|
||||
}
|
||||
|
||||
func mainManifestKey(ctx context.Context, desc specs.Descriptor, platform specs.Platform) (digest.Digest, error) {
|
||||
dt, err := json.Marshal(struct {
|
||||
Digest digest.Digest
|
||||
OS string
|
||||
Arch string
|
||||
Variant string `json:",omitempty"`
|
||||
}{
|
||||
Digest: desc.Digest,
|
||||
OS: platform.OS,
|
||||
Arch: platform.Architecture,
|
||||
Variant: platform.Variant,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return digest.FromBytes(dt), nil
|
||||
}
|
||||
|
||||
func (p *puller) CacheKey(ctx context.Context, index int) (string, bool, error) {
|
||||
_, desc, err := p.Puller.Resolve(ctx)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
if index == 0 || desc.Digest == "" {
|
||||
k, err := mainManifestKey(ctx, desc, p.Platform)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return k.String(), false, nil
|
||||
}
|
||||
ref, err := reference.ParseNormalizedNamed(p.Src.String())
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
ref, err = reference.WithDigest(ref, desc.Digest)
|
||||
if err != nil {
|
||||
return "", false, nil
|
||||
}
|
||||
_, dt, err := imageutil.Config(ctx, ref.String(), p.Resolver, p.ContentStore, p.LeaseManager, &p.Platform)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
k := cacheKeyFromConfig(dt).String()
|
||||
if k == "" {
|
||||
k, err := mainManifestKey(ctx, desc, p.Platform)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return k.String(), true, nil
|
||||
}
|
||||
return k, true, nil
|
||||
}
|
||||
|
||||
func (p *puller) Snapshot(ctx context.Context) (ir cache.ImmutableRef, err error) {
|
||||
layerNeedsTypeWindows := false
|
||||
if platform := p.Puller.Platform; platform != nil {
|
||||
if platform.OS == "windows" && runtime.GOOS != "windows" {
|
||||
ctx = winlayers.UseWindowsLayerMode(ctx)
|
||||
layerNeedsTypeWindows = true
|
||||
}
|
||||
}
|
||||
|
||||
// workaround for gcr, authentication not supported on blob endpoints
|
||||
pull.EnsureManifestRequested(ctx, p.Puller.Resolver, p.Puller.Src.String())
|
||||
|
||||
ctx, done, err := leaseutil.WithLease(ctx, p.LeaseManager, leaseutil.MakeTemporary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer done(ctx)
|
||||
|
||||
pulled, err := p.Puller.Pull(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pulled.Layers) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
extractDone := oneOffProgress(ctx, "unpacking "+pulled.Ref)
|
||||
var current cache.ImmutableRef
|
||||
defer func() {
|
||||
if err != nil && current != nil {
|
||||
current.Release(context.TODO())
|
||||
}
|
||||
extractDone(err)
|
||||
}()
|
||||
for _, l := range pulled.Layers {
|
||||
ref, err := p.CacheAccessor.GetByBlob(ctx, l, current, cache.WithDescription("pulled from "+pulled.Ref))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ref.Extract(ctx); err != nil {
|
||||
ref.Release(context.TODO())
|
||||
return nil, err
|
||||
}
|
||||
if current != nil {
|
||||
current.Release(context.TODO())
|
||||
}
|
||||
current = ref
|
||||
}
|
||||
|
||||
for _, desc := range pulled.MetadataBlobs {
|
||||
if err := p.LeaseManager.AddResource(ctx, leases.Lease{ID: current.ID()}, leases.Resource{
|
||||
ID: desc.Digest.String(),
|
||||
Type: "content",
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if layerNeedsTypeWindows && current != nil {
|
||||
if err := markRefLayerTypeWindows(current); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if p.id.RecordType != "" && cache.GetRecordType(current) == "" {
|
||||
if err := cache.SetRecordType(current, p.id.RecordType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return current, nil
|
||||
}
|
||||
|
||||
func markRefLayerTypeWindows(ref cache.ImmutableRef) error {
|
||||
if parent := ref.Parent(); parent != nil {
|
||||
defer parent.Release(context.TODO())
|
||||
if err := markRefLayerTypeWindows(parent); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return cache.SetLayerType(ref, "windows")
|
||||
}
|
||||
|
||||
// cacheKeyFromConfig returns a stable digest from image config. If image config
|
||||
// is a known oci image we will use chainID of layers.
|
||||
func cacheKeyFromConfig(dt []byte) digest.Digest {
|
||||
var img specs.Image
|
||||
err := json.Unmarshal(dt, &img)
|
||||
if err != nil {
|
||||
return digest.FromBytes(dt)
|
||||
}
|
||||
if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 {
|
||||
return ""
|
||||
}
|
||||
return identity.ChainID(img.RootFS.DiffIDs)
|
||||
}
|
||||
|
||||
func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
||||
pw, _, _ := progress.FromContext(ctx)
|
||||
now := time.Now()
|
||||
st := progress.Status{
|
||||
Started: &now,
|
||||
}
|
||||
pw.Write(id, st)
|
||||
return func(err error) error {
|
||||
// TODO: set error on status
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
pw.Write(id, st)
|
||||
pw.Close()
|
||||
return err
|
||||
}
|
||||
}
|
441
vendor/github.com/moby/buildkit/source/git/gitsource.go
generated
vendored
Normal file
441
vendor/github.com/moby/buildkit/source/git/gitsource.go
generated
vendored
Normal file
@@ -0,0 +1,441 @@
|
||||
package git
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/util/progress/logs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var validHex = regexp.MustCompile(`^[a-f0-9]{40}$`)
|
||||
|
||||
type Opt struct {
|
||||
CacheAccessor cache.Accessor
|
||||
MetadataStore *metadata.Store
|
||||
}
|
||||
|
||||
type gitSource struct {
|
||||
md *metadata.Store
|
||||
cache cache.Accessor
|
||||
locker *locker.Locker
|
||||
}
|
||||
|
||||
// Supported returns nil if the system supports Git source
|
||||
func Supported() error {
|
||||
if err := exec.Command("git", "version").Run(); err != nil {
|
||||
return errors.Wrap(err, "failed to find git binary")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewSource(opt Opt) (source.Source, error) {
|
||||
gs := &gitSource{
|
||||
md: opt.MetadataStore,
|
||||
cache: opt.CacheAccessor,
|
||||
locker: locker.New(),
|
||||
}
|
||||
return gs, nil
|
||||
}
|
||||
|
||||
func (gs *gitSource) ID() string {
|
||||
return source.GitScheme
|
||||
}
|
||||
|
||||
// needs to be called with repo lock
|
||||
func (gs *gitSource) mountRemote(ctx context.Context, remote string) (target string, release func(), retErr error) {
|
||||
remoteKey := "git-remote::" + remote
|
||||
|
||||
sis, err := gs.md.Search(remoteKey)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to search metadata for %s", remote)
|
||||
}
|
||||
|
||||
var remoteRef cache.MutableRef
|
||||
for _, si := range sis {
|
||||
remoteRef, err = gs.cache.GetMutable(ctx, si.ID())
|
||||
if err != nil {
|
||||
if cache.IsLocked(err) {
|
||||
// should never really happen as no other function should access this metadata, but lets be graceful
|
||||
logrus.Warnf("mutable ref for %s %s was locked: %v", remote, si.ID(), err)
|
||||
continue
|
||||
}
|
||||
return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", remote)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
initializeRepo := false
|
||||
if remoteRef == nil {
|
||||
remoteRef, err = gs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", remote)))
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", remote)
|
||||
}
|
||||
initializeRepo = true
|
||||
}
|
||||
|
||||
releaseRemoteRef := func() {
|
||||
remoteRef.Release(context.TODO())
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil && remoteRef != nil {
|
||||
releaseRemoteRef()
|
||||
}
|
||||
}()
|
||||
|
||||
mount, err := remoteRef.Mount(ctx, false)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mount)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
lm.Unmount()
|
||||
}
|
||||
}()
|
||||
|
||||
if initializeRepo {
|
||||
if _, err := gitWithinDir(ctx, dir, "", "init", "--bare"); err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed to init repo at %s", dir)
|
||||
}
|
||||
|
||||
if _, err := gitWithinDir(ctx, dir, "", "remote", "add", "origin", remote); err != nil {
|
||||
return "", nil, errors.Wrapf(err, "failed add origin repo at %s", dir)
|
||||
}
|
||||
|
||||
// same new remote metadata
|
||||
si, _ := gs.md.Get(remoteRef.ID())
|
||||
v, err := metadata.NewValue(remoteKey)
|
||||
v.Index = remoteKey
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if err := si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, "git-remote", v)
|
||||
}); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
return dir, func() {
|
||||
lm.Unmount()
|
||||
releaseRemoteRef()
|
||||
}, nil
|
||||
}
|
||||
|
||||
type gitSourceHandler struct {
|
||||
*gitSource
|
||||
src source.GitIdentifier
|
||||
cacheKey string
|
||||
}
|
||||
|
||||
func (gs *gitSourceHandler) shaToCacheKey(sha string) string {
|
||||
key := sha
|
||||
if gs.src.KeepGitDir {
|
||||
key += ".git"
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
func (gs *gitSource) Resolve(ctx context.Context, id source.Identifier, _ *session.Manager) (source.SourceInstance, error) {
|
||||
gitIdentifier, ok := id.(*source.GitIdentifier)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid git identifier %v", id)
|
||||
}
|
||||
|
||||
return &gitSourceHandler{
|
||||
src: *gitIdentifier,
|
||||
gitSource: gs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (gs *gitSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) {
|
||||
remote := gs.src.Remote
|
||||
ref := gs.src.Ref
|
||||
if ref == "" {
|
||||
ref = "master"
|
||||
}
|
||||
gs.locker.Lock(remote)
|
||||
defer gs.locker.Unlock(remote)
|
||||
|
||||
if isCommitSHA(ref) {
|
||||
ref = gs.shaToCacheKey(ref)
|
||||
gs.cacheKey = ref
|
||||
return ref, true, nil
|
||||
}
|
||||
|
||||
gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
defer unmountGitDir()
|
||||
|
||||
// TODO: should we assume that remote tag is immutable? add a timer?
|
||||
|
||||
buf, err := gitWithinDir(ctx, gitDir, "", "ls-remote", "origin", ref)
|
||||
if err != nil {
|
||||
return "", false, errors.Wrapf(err, "failed to fetch remote %s", remote)
|
||||
}
|
||||
out := buf.String()
|
||||
idx := strings.Index(out, "\t")
|
||||
if idx == -1 {
|
||||
return "", false, errors.Errorf("failed to find commit SHA from output: %s", string(out))
|
||||
}
|
||||
|
||||
sha := string(out[:idx])
|
||||
if !isCommitSHA(sha) {
|
||||
return "", false, errors.Errorf("invalid commit sha %q", sha)
|
||||
}
|
||||
sha = gs.shaToCacheKey(sha)
|
||||
gs.cacheKey = sha
|
||||
return sha, true, nil
|
||||
}
|
||||
|
||||
func (gs *gitSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRef, retErr error) {
|
||||
ref := gs.src.Ref
|
||||
if ref == "" {
|
||||
ref = "master"
|
||||
}
|
||||
|
||||
cacheKey := gs.cacheKey
|
||||
if cacheKey == "" {
|
||||
var err error
|
||||
cacheKey, _, err = gs.CacheKey(ctx, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
snapshotKey := "git-snapshot::" + cacheKey + ":" + gs.src.Subdir
|
||||
gs.locker.Lock(snapshotKey)
|
||||
defer gs.locker.Unlock(snapshotKey)
|
||||
|
||||
sis, err := gs.md.Search(snapshotKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to search metadata for %s", snapshotKey)
|
||||
}
|
||||
if len(sis) > 0 {
|
||||
return gs.cache.Get(ctx, sis[0].ID())
|
||||
}
|
||||
|
||||
gs.locker.Lock(gs.src.Remote)
|
||||
defer gs.locker.Unlock(gs.src.Remote)
|
||||
gitDir, unmountGitDir, err := gs.mountRemote(ctx, gs.src.Remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer unmountGitDir()
|
||||
|
||||
doFetch := true
|
||||
if isCommitSHA(ref) {
|
||||
// skip fetch if commit already exists
|
||||
if _, err := gitWithinDir(ctx, gitDir, "", "cat-file", "-e", ref+"^{commit}"); err == nil {
|
||||
doFetch = false
|
||||
}
|
||||
}
|
||||
|
||||
if doFetch {
|
||||
// make sure no old lock files have leaked
|
||||
os.RemoveAll(filepath.Join(gitDir, "shallow.lock"))
|
||||
|
||||
args := []string{"fetch"}
|
||||
if !isCommitSHA(ref) { // TODO: find a branch from ls-remote?
|
||||
args = append(args, "--depth=1", "--no-tags")
|
||||
} else {
|
||||
if _, err := os.Lstat(filepath.Join(gitDir, "shallow")); err == nil {
|
||||
args = append(args, "--unshallow")
|
||||
}
|
||||
}
|
||||
args = append(args, "origin")
|
||||
if !isCommitSHA(ref) {
|
||||
args = append(args, "--force", ref+":tags/"+ref)
|
||||
// local refs are needed so they would be advertised on next fetches. Force is used
|
||||
// in case the ref is a branch and it now points to a different commit sha
|
||||
// TODO: is there a better way to do this?
|
||||
}
|
||||
if _, err := gitWithinDir(ctx, gitDir, "", args...); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to fetch remote %s", gs.src.Remote)
|
||||
}
|
||||
}
|
||||
|
||||
checkoutRef, err := gs.cache.New(ctx, nil, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref)))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create new mutable for %s", gs.src.Remote)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil && checkoutRef != nil {
|
||||
checkoutRef.Release(context.TODO())
|
||||
}
|
||||
}()
|
||||
|
||||
mount, err := checkoutRef.Mount(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lm := snapshot.LocalMounter(mount)
|
||||
checkoutDir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil && lm != nil {
|
||||
lm.Unmount()
|
||||
}
|
||||
}()
|
||||
|
||||
if gs.src.KeepGitDir {
|
||||
checkoutDirGit := filepath.Join(checkoutDir, ".git")
|
||||
if err := os.MkdirAll(checkoutDir, 0711); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = gitWithinDir(ctx, checkoutDirGit, "", "init")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = gitWithinDir(ctx, checkoutDirGit, "", "remote", "add", "origin", gitDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pullref := ref
|
||||
if isCommitSHA(ref) {
|
||||
pullref = "refs/buildkit/" + identity.NewID()
|
||||
_, err = gitWithinDir(ctx, gitDir, "", "update-ref", pullref, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
pullref += ":" + pullref
|
||||
}
|
||||
_, err = gitWithinDir(ctx, checkoutDirGit, "", "fetch", "-u", "--depth=1", "origin", pullref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = gitWithinDir(ctx, checkoutDirGit, checkoutDir, "checkout", "FETCH_HEAD")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote)
|
||||
}
|
||||
gitDir = checkoutDirGit
|
||||
} else {
|
||||
_, err = gitWithinDir(ctx, gitDir, checkoutDir, "checkout", ref, "--", ".")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to checkout remote %s", gs.src.Remote)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = gitWithinDir(ctx, gitDir, checkoutDir, "submodule", "update", "--init", "--recursive", "--depth=1")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to update submodules for %s", gs.src.Remote)
|
||||
}
|
||||
|
||||
if idmap := mount.IdentityMapping(); idmap != nil {
|
||||
u := idmap.RootPair()
|
||||
err := filepath.Walk(gitDir, func(p string, f os.FileInfo, err error) error {
|
||||
return os.Lchown(p, u.UID, u.GID)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to remap git checkout")
|
||||
}
|
||||
}
|
||||
|
||||
lm.Unmount()
|
||||
lm = nil
|
||||
|
||||
snap, err := checkoutRef.Commit(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
checkoutRef = nil
|
||||
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
snap.Release(context.TODO())
|
||||
}
|
||||
}()
|
||||
|
||||
si, _ := gs.md.Get(snap.ID())
|
||||
v, err := metadata.NewValue(snapshotKey)
|
||||
v.Index = snapshotKey
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, "git-snapshot", v)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func isCommitSHA(str string) bool {
|
||||
return validHex.MatchString(str)
|
||||
}
|
||||
|
||||
func gitWithinDir(ctx context.Context, gitDir, workDir string, args ...string) (*bytes.Buffer, error) {
|
||||
a := []string{"--git-dir", gitDir}
|
||||
if workDir != "" {
|
||||
a = append(a, "--work-tree", workDir)
|
||||
}
|
||||
return git(ctx, workDir, append(a, args...)...)
|
||||
}
|
||||
|
||||
func git(ctx context.Context, dir string, args ...string) (*bytes.Buffer, error) {
|
||||
for {
|
||||
stdout, stderr := logs.NewLogStreams(ctx, false)
|
||||
defer stdout.Close()
|
||||
defer stderr.Close()
|
||||
cmd := exec.Command("git", args...)
|
||||
cmd.Dir = dir // some commands like submodule require this
|
||||
buf := bytes.NewBuffer(nil)
|
||||
errbuf := bytes.NewBuffer(nil)
|
||||
cmd.Stdout = io.MultiWriter(stdout, buf)
|
||||
cmd.Stderr = io.MultiWriter(stderr, errbuf)
|
||||
// remote git commands spawn helper processes that inherit FDs and don't
|
||||
// handle parent death signal so exec.CommandContext can't be used
|
||||
err := runProcessGroup(ctx, cmd)
|
||||
if err != nil {
|
||||
if strings.Contains(errbuf.String(), "--depth") || strings.Contains(errbuf.String(), "shallow") {
|
||||
if newArgs := argsNoDepth(args); len(args) > len(newArgs) {
|
||||
args = newArgs
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
return buf, err
|
||||
}
|
||||
}
|
||||
|
||||
func argsNoDepth(args []string) []string {
|
||||
out := make([]string, 0, len(args))
|
||||
for _, a := range args {
|
||||
if a != "--depth=1" {
|
||||
out = append(out, a)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
35
vendor/github.com/moby/buildkit/source/git/gitsource_unix.go
generated
vendored
Normal file
35
vendor/github.com/moby/buildkit/source/git/gitsource_unix.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
// +build !windows
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error {
|
||||
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
waitDone := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
syscall.Kill(-cmd.Process.Pid, syscall.SIGTERM)
|
||||
go func() {
|
||||
select {
|
||||
case <-waitDone:
|
||||
case <-time.After(10 * time.Second):
|
||||
syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL)
|
||||
}
|
||||
}()
|
||||
case <-waitDone:
|
||||
}
|
||||
}()
|
||||
err := cmd.Wait()
|
||||
close(waitDone)
|
||||
return err
|
||||
}
|
23
vendor/github.com/moby/buildkit/source/git/gitsource_windows.go
generated
vendored
Normal file
23
vendor/github.com/moby/buildkit/source/git/gitsource_windows.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// +build windows
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error {
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
waitDone := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cmd.Process.Kill()
|
||||
case <-waitDone:
|
||||
}
|
||||
}()
|
||||
return cmd.Wait()
|
||||
}
|
70
vendor/github.com/moby/buildkit/source/gitidentifier.go
generated
vendored
Normal file
70
vendor/github.com/moby/buildkit/source/gitidentifier.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type GitIdentifier struct {
|
||||
Remote string
|
||||
Ref string
|
||||
Subdir string
|
||||
KeepGitDir bool
|
||||
}
|
||||
|
||||
func NewGitIdentifier(remoteURL string) (*GitIdentifier, error) {
|
||||
repo := GitIdentifier{}
|
||||
|
||||
if !isGitTransport(remoteURL) {
|
||||
remoteURL = "https://" + remoteURL
|
||||
}
|
||||
|
||||
var fragment string
|
||||
if strings.HasPrefix(remoteURL, "git@") {
|
||||
// git@.. is not an URL, so cannot be parsed as URL
|
||||
parts := strings.SplitN(remoteURL, "#", 2)
|
||||
|
||||
repo.Remote = parts[0]
|
||||
if len(parts) == 2 {
|
||||
fragment = parts[1]
|
||||
}
|
||||
repo.Ref, repo.Subdir = getRefAndSubdir(fragment)
|
||||
} else {
|
||||
u, err := url.Parse(remoteURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo.Ref, repo.Subdir = getRefAndSubdir(u.Fragment)
|
||||
u.Fragment = ""
|
||||
repo.Remote = u.String()
|
||||
}
|
||||
if repo.Subdir != "" {
|
||||
return nil, errors.Errorf("subdir not supported yet")
|
||||
}
|
||||
return &repo, nil
|
||||
}
|
||||
|
||||
func (i *GitIdentifier) ID() string {
|
||||
return "git"
|
||||
}
|
||||
|
||||
// isGitTransport returns true if the provided str is a git transport by inspecting
|
||||
// the prefix of the string for known protocols used in git.
|
||||
func isGitTransport(str string) bool {
|
||||
return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@")
|
||||
}
|
||||
|
||||
func getRefAndSubdir(fragment string) (ref string, subdir string) {
|
||||
refAndDir := strings.SplitN(fragment, ":", 2)
|
||||
ref = "master"
|
||||
if len(refAndDir[0]) != 0 {
|
||||
ref = refAndDir[0]
|
||||
}
|
||||
if len(refAndDir) > 1 && len(refAndDir[1]) != 0 {
|
||||
subdir = refAndDir[1]
|
||||
}
|
||||
return
|
||||
}
|
500
vendor/github.com/moby/buildkit/source/http/httpsource.go
generated
vendored
Normal file
500
vendor/github.com/moby/buildkit/source/http/httpsource.go
generated
vendored
Normal file
@@ -0,0 +1,500 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type Opt struct {
|
||||
CacheAccessor cache.Accessor
|
||||
MetadataStore *metadata.Store
|
||||
Transport http.RoundTripper
|
||||
}
|
||||
|
||||
type httpSource struct {
|
||||
md *metadata.Store
|
||||
cache cache.Accessor
|
||||
locker *locker.Locker
|
||||
transport http.RoundTripper
|
||||
}
|
||||
|
||||
func NewSource(opt Opt) (source.Source, error) {
|
||||
transport := opt.Transport
|
||||
if transport == nil {
|
||||
transport = tracing.DefaultTransport
|
||||
}
|
||||
hs := &httpSource{
|
||||
md: opt.MetadataStore,
|
||||
cache: opt.CacheAccessor,
|
||||
locker: locker.New(),
|
||||
transport: transport,
|
||||
}
|
||||
return hs, nil
|
||||
}
|
||||
|
||||
func (hs *httpSource) ID() string {
|
||||
return source.HttpsScheme
|
||||
}
|
||||
|
||||
type httpSourceHandler struct {
|
||||
*httpSource
|
||||
src source.HttpIdentifier
|
||||
refID string
|
||||
cacheKey digest.Digest
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (hs *httpSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) {
|
||||
httpIdentifier, ok := id.(*source.HttpIdentifier)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid http identifier %v", id)
|
||||
}
|
||||
|
||||
sessionID := session.FromContext(ctx)
|
||||
|
||||
return &httpSourceHandler{
|
||||
src: *httpIdentifier,
|
||||
httpSource: hs,
|
||||
client: &http.Client{Transport: newTransport(hs.transport, sm, sessionID)},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// urlHash is internal hash the etag is stored by that doesn't leak outside
|
||||
// this package.
|
||||
func (hs *httpSourceHandler) urlHash() (digest.Digest, error) {
|
||||
dt, err := json.Marshal(struct {
|
||||
Filename string
|
||||
Perm, UID, GID int
|
||||
}{
|
||||
Filename: getFileName(hs.src.URL, hs.src.Filename, nil),
|
||||
Perm: hs.src.Perm,
|
||||
UID: hs.src.UID,
|
||||
GID: hs.src.GID,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return digest.FromBytes(dt), nil
|
||||
}
|
||||
|
||||
func (hs *httpSourceHandler) formatCacheKey(filename string, dgst digest.Digest, lastModTime string) digest.Digest {
|
||||
dt, err := json.Marshal(struct {
|
||||
Filename string
|
||||
Perm, UID, GID int
|
||||
Checksum digest.Digest
|
||||
LastModTime string `json:",omitempty"`
|
||||
}{
|
||||
Filename: filename,
|
||||
Perm: hs.src.Perm,
|
||||
UID: hs.src.UID,
|
||||
GID: hs.src.GID,
|
||||
Checksum: dgst,
|
||||
LastModTime: lastModTime,
|
||||
})
|
||||
if err != nil {
|
||||
return dgst
|
||||
}
|
||||
return digest.FromBytes(dt)
|
||||
}
|
||||
|
||||
func (hs *httpSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) {
|
||||
if hs.src.Checksum != "" {
|
||||
hs.cacheKey = hs.src.Checksum
|
||||
return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, nil), hs.src.Checksum, "").String(), true, nil
|
||||
}
|
||||
|
||||
uh, err := hs.urlHash()
|
||||
if err != nil {
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// look up metadata(previously stored headers) for that URL
|
||||
sis, err := hs.md.Search(uh.String())
|
||||
if err != nil {
|
||||
return "", false, errors.Wrapf(err, "failed to search metadata for %s", uh)
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", hs.src.URL, nil)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
m := map[string]*metadata.StorageItem{}
|
||||
|
||||
// If we request a single ETag in 'If-None-Match', some servers omit the
|
||||
// unambiguous ETag in their response.
|
||||
// See: https://github.com/moby/buildkit/issues/905
|
||||
var onlyETag string
|
||||
|
||||
if len(sis) > 0 {
|
||||
for _, si := range sis {
|
||||
// if metaDigest := getMetaDigest(si); metaDigest == hs.formatCacheKey("") {
|
||||
if etag := getETag(si); etag != "" {
|
||||
if dgst := getChecksum(si); dgst != "" {
|
||||
m[etag] = si
|
||||
}
|
||||
}
|
||||
// }
|
||||
}
|
||||
if len(m) > 0 {
|
||||
etags := make([]string, 0, len(m))
|
||||
for t := range m {
|
||||
etags = append(etags, t)
|
||||
}
|
||||
req.Header.Add("If-None-Match", strings.Join(etags, ", "))
|
||||
|
||||
if len(etags) == 1 {
|
||||
onlyETag = etags[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Some servers seem to have trouble supporting If-None-Match properly even
|
||||
// though they return ETag-s. So first, optionally try a HEAD request with
|
||||
// manual ETag value comparison.
|
||||
if len(m) > 0 {
|
||||
req.Method = "HEAD"
|
||||
resp, err := hs.client.Do(req)
|
||||
if err == nil {
|
||||
if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotModified {
|
||||
respETag := resp.Header.Get("ETag")
|
||||
|
||||
// If a 304 is returned without an ETag and we had only sent one ETag,
|
||||
// the response refers to the ETag we asked about.
|
||||
if respETag == "" && onlyETag != "" && resp.StatusCode == http.StatusNotModified {
|
||||
respETag = onlyETag
|
||||
}
|
||||
si, ok := m[respETag]
|
||||
if ok {
|
||||
hs.refID = si.ID()
|
||||
dgst := getChecksum(si)
|
||||
if dgst != "" {
|
||||
modTime := getModTime(si)
|
||||
resp.Body.Close()
|
||||
return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
resp.Body.Close()
|
||||
}
|
||||
req.Method = "GET"
|
||||
}
|
||||
|
||||
resp, err := hs.client.Do(req)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return "", false, errors.Errorf("invalid response status %d", resp.StatusCode)
|
||||
}
|
||||
if resp.StatusCode == http.StatusNotModified {
|
||||
respETag := resp.Header.Get("ETag")
|
||||
if respETag == "" && onlyETag != "" {
|
||||
respETag = onlyETag
|
||||
|
||||
// Set the missing ETag header on the response so that it's available
|
||||
// to .save()
|
||||
resp.Header.Set("ETag", onlyETag)
|
||||
}
|
||||
si, ok := m[respETag]
|
||||
if !ok {
|
||||
return "", false, errors.Errorf("invalid not-modified ETag: %v", respETag)
|
||||
}
|
||||
hs.refID = si.ID()
|
||||
dgst := getChecksum(si)
|
||||
if dgst == "" {
|
||||
return "", false, errors.Errorf("invalid metadata change")
|
||||
}
|
||||
modTime := getModTime(si)
|
||||
resp.Body.Close()
|
||||
return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), true, nil
|
||||
}
|
||||
|
||||
ref, dgst, err := hs.save(ctx, resp)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
ref.Release(context.TODO())
|
||||
|
||||
hs.cacheKey = dgst
|
||||
|
||||
return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, resp.Header.Get("Last-Modified")).String(), true, nil
|
||||
}
|
||||
|
||||
func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response) (ref cache.ImmutableRef, dgst digest.Digest, retErr error) {
|
||||
newRef, err := hs.cache.New(ctx, nil, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("http url %s", hs.src.URL)))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
releaseRef := func() {
|
||||
newRef.Release(context.TODO())
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil && newRef != nil {
|
||||
releaseRef()
|
||||
}
|
||||
}()
|
||||
|
||||
mount, err := newRef.Mount(ctx, false)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mount)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil && lm != nil {
|
||||
lm.Unmount()
|
||||
}
|
||||
}()
|
||||
perm := 0600
|
||||
if hs.src.Perm != 0 {
|
||||
perm = hs.src.Perm
|
||||
}
|
||||
fp := filepath.Join(dir, getFileName(hs.src.URL, hs.src.Filename, resp))
|
||||
|
||||
f, err := os.OpenFile(fp, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
defer func() {
|
||||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
h := sha256.New()
|
||||
|
||||
if _, err := io.Copy(io.MultiWriter(f, h), resp.Body); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
f = nil
|
||||
|
||||
uid := hs.src.UID
|
||||
gid := hs.src.GID
|
||||
if idmap := mount.IdentityMapping(); idmap != nil {
|
||||
identity, err := idmap.ToHost(idtools.Identity{
|
||||
UID: int(uid),
|
||||
GID: int(gid),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
uid = identity.UID
|
||||
gid = identity.GID
|
||||
}
|
||||
|
||||
if gid != 0 || uid != 0 {
|
||||
if err := os.Chown(fp, uid, gid); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
mTime := time.Unix(0, 0)
|
||||
lastMod := resp.Header.Get("Last-Modified")
|
||||
if lastMod != "" {
|
||||
if parsedMTime, err := http.ParseTime(lastMod); err == nil {
|
||||
mTime = parsedMTime
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Chtimes(fp, mTime, mTime); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
lm.Unmount()
|
||||
lm = nil
|
||||
|
||||
ref, err = newRef.Commit(ctx)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
newRef = nil
|
||||
|
||||
hs.refID = ref.ID()
|
||||
dgst = digest.NewDigest(digest.SHA256, h)
|
||||
|
||||
if respETag := resp.Header.Get("ETag"); respETag != "" {
|
||||
setETag(ref.Metadata(), respETag)
|
||||
uh, err := hs.urlHash()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
setChecksum(ref.Metadata(), uh.String(), dgst)
|
||||
if err := ref.Metadata().Commit(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
|
||||
if modTime := resp.Header.Get("Last-Modified"); modTime != "" {
|
||||
setModTime(ref.Metadata(), modTime)
|
||||
}
|
||||
|
||||
return ref, dgst, nil
|
||||
}
|
||||
|
||||
func (hs *httpSourceHandler) Snapshot(ctx context.Context) (cache.ImmutableRef, error) {
|
||||
if hs.refID != "" {
|
||||
ref, err := hs.cache.Get(ctx, hs.refID)
|
||||
if err == nil {
|
||||
return ref, nil
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", hs.src.URL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
resp, err := hs.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref, dgst, err := hs.save(ctx, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dgst != hs.cacheKey {
|
||||
ref.Release(context.TODO())
|
||||
return nil, errors.Errorf("digest mismatch %s: %s", dgst, hs.cacheKey)
|
||||
}
|
||||
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
const keyETag = "etag"
|
||||
const keyChecksum = "http.checksum"
|
||||
const keyModTime = "http.modtime"
|
||||
|
||||
func setETag(si *metadata.StorageItem, s string) error {
|
||||
v, err := metadata.NewValue(s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create etag value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyETag, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getETag(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyETag)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var etag string
|
||||
if err := v.Unmarshal(&etag); err != nil {
|
||||
return ""
|
||||
}
|
||||
return etag
|
||||
}
|
||||
|
||||
func setModTime(si *metadata.StorageItem, s string) error {
|
||||
v, err := metadata.NewValue(s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create modtime value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyModTime, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getModTime(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyModTime)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var modTime string
|
||||
if err := v.Unmarshal(&modTime); err != nil {
|
||||
return ""
|
||||
}
|
||||
return modTime
|
||||
}
|
||||
|
||||
func setChecksum(si *metadata.StorageItem, url string, d digest.Digest) error {
|
||||
v, err := metadata.NewValue(d)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create checksum value")
|
||||
}
|
||||
v.Index = url
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyChecksum, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getChecksum(si *metadata.StorageItem) digest.Digest {
|
||||
v := si.Get(keyChecksum)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var dgstStr string
|
||||
if err := v.Unmarshal(&dgstStr); err != nil {
|
||||
return ""
|
||||
}
|
||||
dgst, err := digest.Parse(dgstStr)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return dgst
|
||||
}
|
||||
|
||||
func getFileName(urlStr, manualFilename string, resp *http.Response) string {
|
||||
if manualFilename != "" {
|
||||
return manualFilename
|
||||
}
|
||||
if resp != nil {
|
||||
if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" {
|
||||
if _, params, err := mime.ParseMediaType(contentDisposition); err == nil {
|
||||
if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") {
|
||||
if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" {
|
||||
return filename
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
u, err := url.Parse(urlStr)
|
||||
if err == nil {
|
||||
if base := path.Base(u.Path); base != "." && base != "/" {
|
||||
return base
|
||||
}
|
||||
}
|
||||
return "download"
|
||||
}
|
60
vendor/github.com/moby/buildkit/source/http/transport.go
generated
vendored
Normal file
60
vendor/github.com/moby/buildkit/source/http/transport.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/upload"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func newTransport(rt http.RoundTripper, sm *session.Manager, id string) http.RoundTripper {
|
||||
return &sessionHandler{rt: rt, sm: sm, id: id}
|
||||
}
|
||||
|
||||
type sessionHandler struct {
|
||||
sm *session.Manager
|
||||
rt http.RoundTripper
|
||||
id string
|
||||
}
|
||||
|
||||
func (h *sessionHandler) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if req.URL.Host != "buildkit-session" {
|
||||
return h.rt.RoundTrip(req)
|
||||
}
|
||||
|
||||
if req.Method != "GET" {
|
||||
return nil, errors.Errorf("invalid request")
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
caller, err := h.sm.Get(timeoutCtx, h.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
up, err := upload.New(context.TODO(), caller, req.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
_, err := up.WriteTo(pw)
|
||||
pw.CloseWithError(err)
|
||||
}()
|
||||
|
||||
resp := &http.Response{
|
||||
Status: "200 OK",
|
||||
StatusCode: 200,
|
||||
Body: pr,
|
||||
ContentLength: -1,
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
275
vendor/github.com/moby/buildkit/source/identifier.go
generated
vendored
Normal file
275
vendor/github.com/moby/buildkit/source/identifier.go
generated
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/reference"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalid = errors.New("invalid")
|
||||
errNotFound = errors.New("not found")
|
||||
)
|
||||
|
||||
type ResolveMode int
|
||||
|
||||
const (
|
||||
ResolveModeDefault ResolveMode = iota
|
||||
ResolveModeForcePull
|
||||
ResolveModePreferLocal
|
||||
)
|
||||
|
||||
const (
|
||||
DockerImageScheme = "docker-image"
|
||||
GitScheme = "git"
|
||||
LocalScheme = "local"
|
||||
HttpScheme = "http"
|
||||
HttpsScheme = "https"
|
||||
)
|
||||
|
||||
type Identifier interface {
|
||||
ID() string // until sources are in process this string comparison could be avoided
|
||||
}
|
||||
|
||||
func FromString(s string) (Identifier, error) {
|
||||
// TODO: improve this
|
||||
parts := strings.SplitN(s, "://", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.Wrapf(errInvalid, "failed to parse %s", s)
|
||||
}
|
||||
|
||||
switch parts[0] {
|
||||
case DockerImageScheme:
|
||||
return NewImageIdentifier(parts[1])
|
||||
case GitScheme:
|
||||
return NewGitIdentifier(parts[1])
|
||||
case LocalScheme:
|
||||
return NewLocalIdentifier(parts[1])
|
||||
case HttpsScheme:
|
||||
return NewHttpIdentifier(parts[1], true)
|
||||
case HttpScheme:
|
||||
return NewHttpIdentifier(parts[1], false)
|
||||
default:
|
||||
return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0])
|
||||
}
|
||||
}
|
||||
|
||||
func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) {
|
||||
id, err := FromString(op.Source.Identifier)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if id, ok := id.(*ImageIdentifier); ok {
|
||||
if platform != nil {
|
||||
id.Platform = &specs.Platform{
|
||||
OS: platform.OS,
|
||||
Architecture: platform.Architecture,
|
||||
Variant: platform.Variant,
|
||||
OSVersion: platform.OSVersion,
|
||||
OSFeatures: platform.OSFeatures,
|
||||
}
|
||||
}
|
||||
for k, v := range op.Source.Attrs {
|
||||
switch k {
|
||||
case pb.AttrImageResolveMode:
|
||||
rm, err := ParseImageResolveMode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.ResolveMode = rm
|
||||
case pb.AttrImageRecordType:
|
||||
rt, err := parseImageRecordType(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.RecordType = rt
|
||||
}
|
||||
}
|
||||
}
|
||||
if id, ok := id.(*GitIdentifier); ok {
|
||||
for k, v := range op.Source.Attrs {
|
||||
switch k {
|
||||
case pb.AttrKeepGitDir:
|
||||
if v == "true" {
|
||||
id.KeepGitDir = true
|
||||
}
|
||||
case pb.AttrFullRemoteURL:
|
||||
id.Remote = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if id, ok := id.(*LocalIdentifier); ok {
|
||||
for k, v := range op.Source.Attrs {
|
||||
switch k {
|
||||
case pb.AttrLocalSessionID:
|
||||
id.SessionID = v
|
||||
if p := strings.SplitN(v, ":", 2); len(p) == 2 {
|
||||
id.Name = p[0] + "-" + id.Name
|
||||
id.SessionID = p[1]
|
||||
}
|
||||
case pb.AttrIncludePatterns:
|
||||
var patterns []string
|
||||
if err := json.Unmarshal([]byte(v), &patterns); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.IncludePatterns = patterns
|
||||
case pb.AttrExcludePatterns:
|
||||
var patterns []string
|
||||
if err := json.Unmarshal([]byte(v), &patterns); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.ExcludePatterns = patterns
|
||||
case pb.AttrFollowPaths:
|
||||
var paths []string
|
||||
if err := json.Unmarshal([]byte(v), &paths); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.FollowPaths = paths
|
||||
case pb.AttrSharedKeyHint:
|
||||
id.SharedKeyHint = v
|
||||
}
|
||||
}
|
||||
}
|
||||
if id, ok := id.(*HttpIdentifier); ok {
|
||||
for k, v := range op.Source.Attrs {
|
||||
switch k {
|
||||
case pb.AttrHTTPChecksum:
|
||||
dgst, err := digest.Parse(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.Checksum = dgst
|
||||
case pb.AttrHTTPFilename:
|
||||
id.Filename = v
|
||||
case pb.AttrHTTPPerm:
|
||||
i, err := strconv.ParseInt(v, 0, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.Perm = int(i)
|
||||
case pb.AttrHTTPUID:
|
||||
i, err := strconv.ParseInt(v, 0, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.UID = int(i)
|
||||
case pb.AttrHTTPGID:
|
||||
i, err := strconv.ParseInt(v, 0, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id.GID = int(i)
|
||||
}
|
||||
}
|
||||
}
|
||||
return id, nil
|
||||
}
|
||||
|
||||
type ImageIdentifier struct {
|
||||
Reference reference.Spec
|
||||
Platform *specs.Platform
|
||||
ResolveMode ResolveMode
|
||||
RecordType client.UsageRecordType
|
||||
}
|
||||
|
||||
func NewImageIdentifier(str string) (*ImageIdentifier, error) {
|
||||
ref, err := reference.Parse(str)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
||||
if ref.Object == "" {
|
||||
return nil, errors.WithStack(reference.ErrObjectRequired)
|
||||
}
|
||||
return &ImageIdentifier{Reference: ref}, nil
|
||||
}
|
||||
|
||||
func (_ *ImageIdentifier) ID() string {
|
||||
return DockerImageScheme
|
||||
}
|
||||
|
||||
type LocalIdentifier struct {
|
||||
Name string
|
||||
SessionID string
|
||||
IncludePatterns []string
|
||||
ExcludePatterns []string
|
||||
FollowPaths []string
|
||||
SharedKeyHint string
|
||||
}
|
||||
|
||||
func NewLocalIdentifier(str string) (*LocalIdentifier, error) {
|
||||
return &LocalIdentifier{Name: str}, nil
|
||||
}
|
||||
|
||||
func (*LocalIdentifier) ID() string {
|
||||
return LocalScheme
|
||||
}
|
||||
|
||||
func NewHttpIdentifier(str string, tls bool) (*HttpIdentifier, error) {
|
||||
proto := "https://"
|
||||
if !tls {
|
||||
proto = "http://"
|
||||
}
|
||||
return &HttpIdentifier{TLS: tls, URL: proto + str}, nil
|
||||
}
|
||||
|
||||
type HttpIdentifier struct {
|
||||
TLS bool
|
||||
URL string
|
||||
Checksum digest.Digest
|
||||
Filename string
|
||||
Perm int
|
||||
UID int
|
||||
GID int
|
||||
}
|
||||
|
||||
func (_ *HttpIdentifier) ID() string {
|
||||
return HttpsScheme
|
||||
}
|
||||
|
||||
func (r ResolveMode) String() string {
|
||||
switch r {
|
||||
case ResolveModeDefault:
|
||||
return pb.AttrImageResolveModeDefault
|
||||
case ResolveModeForcePull:
|
||||
return pb.AttrImageResolveModeForcePull
|
||||
case ResolveModePreferLocal:
|
||||
return pb.AttrImageResolveModePreferLocal
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func ParseImageResolveMode(v string) (ResolveMode, error) {
|
||||
switch v {
|
||||
case pb.AttrImageResolveModeDefault, "":
|
||||
return ResolveModeDefault, nil
|
||||
case pb.AttrImageResolveModeForcePull:
|
||||
return ResolveModeForcePull, nil
|
||||
case pb.AttrImageResolveModePreferLocal:
|
||||
return ResolveModePreferLocal, nil
|
||||
default:
|
||||
return 0, errors.Errorf("invalid resolvemode: %s", v)
|
||||
}
|
||||
}
|
||||
|
||||
func parseImageRecordType(v string) (client.UsageRecordType, error) {
|
||||
switch client.UsageRecordType(v) {
|
||||
case "", client.UsageRecordTypeRegular:
|
||||
return client.UsageRecordTypeRegular, nil
|
||||
case client.UsageRecordTypeInternal:
|
||||
return client.UsageRecordTypeInternal, nil
|
||||
case client.UsageRecordTypeFrontend:
|
||||
return client.UsageRecordTypeFrontend, nil
|
||||
default:
|
||||
return "", errors.Errorf("invalid record type %s", v)
|
||||
}
|
||||
}
|
279
vendor/github.com/moby/buildkit/source/local/local.go
generated
vendored
Normal file
279
vendor/github.com/moby/buildkit/source/local/local.go
generated
vendored
Normal file
@@ -0,0 +1,279 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/contenthash"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/filesync"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/source"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"golang.org/x/time/rate"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const keySharedKey = "local.sharedKey"
|
||||
|
||||
type Opt struct {
|
||||
CacheAccessor cache.Accessor
|
||||
MetadataStore *metadata.Store
|
||||
}
|
||||
|
||||
func NewSource(opt Opt) (source.Source, error) {
|
||||
ls := &localSource{
|
||||
cm: opt.CacheAccessor,
|
||||
md: opt.MetadataStore,
|
||||
}
|
||||
return ls, nil
|
||||
}
|
||||
|
||||
type localSource struct {
|
||||
cm cache.Accessor
|
||||
md *metadata.Store
|
||||
}
|
||||
|
||||
func (ls *localSource) ID() string {
|
||||
return source.LocalScheme
|
||||
}
|
||||
|
||||
func (ls *localSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager) (source.SourceInstance, error) {
|
||||
localIdentifier, ok := id.(*source.LocalIdentifier)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid local identifier %v", id)
|
||||
}
|
||||
|
||||
return &localSourceHandler{
|
||||
src: *localIdentifier,
|
||||
sm: sm,
|
||||
localSource: ls,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type localSourceHandler struct {
|
||||
src source.LocalIdentifier
|
||||
sm *session.Manager
|
||||
*localSource
|
||||
}
|
||||
|
||||
func (ls *localSourceHandler) CacheKey(ctx context.Context, index int) (string, bool, error) {
|
||||
sessionID := ls.src.SessionID
|
||||
|
||||
if sessionID == "" {
|
||||
id := session.FromContext(ctx)
|
||||
if id == "" {
|
||||
return "", false, errors.New("could not access local files without session")
|
||||
}
|
||||
sessionID = id
|
||||
}
|
||||
dt, err := json.Marshal(struct {
|
||||
SessionID string
|
||||
IncludePatterns []string
|
||||
ExcludePatterns []string
|
||||
FollowPaths []string
|
||||
}{SessionID: sessionID, IncludePatterns: ls.src.IncludePatterns, ExcludePatterns: ls.src.ExcludePatterns, FollowPaths: ls.src.FollowPaths})
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
return "session:" + ls.src.Name + ":" + digest.FromBytes(dt).String(), true, nil
|
||||
}
|
||||
|
||||
func (ls *localSourceHandler) Snapshot(ctx context.Context) (out cache.ImmutableRef, retErr error) {
|
||||
|
||||
id := session.FromContext(ctx)
|
||||
if id == "" {
|
||||
return nil, errors.New("could not access local files without session")
|
||||
}
|
||||
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
caller, err := ls.sm.Get(timeoutCtx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedKey := keySharedKey + ":" + ls.src.Name + ":" + ls.src.SharedKeyHint + ":" + caller.SharedKey() // TODO: replace caller.SharedKey() with source based hint from client(absolute-path+nodeid)
|
||||
|
||||
var mutable cache.MutableRef
|
||||
sis, err := ls.md.Search(sharedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, si := range sis {
|
||||
if m, err := ls.cm.GetMutable(ctx, si.ID()); err == nil {
|
||||
logrus.Debugf("reusing ref for local: %s", m.ID())
|
||||
mutable = m
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if mutable == nil {
|
||||
m, err := ls.cm.New(ctx, nil, cache.CachePolicyRetain, cache.WithRecordType(client.UsageRecordTypeLocalSource), cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutable = m
|
||||
logrus.Debugf("new ref for local: %s", mutable.ID())
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil && mutable != nil {
|
||||
// on error remove the record as checksum update is in undefined state
|
||||
cache.CachePolicyDefault(mutable)
|
||||
if err := mutable.Metadata().Commit(); err != nil {
|
||||
logrus.Errorf("failed to reset mutable cachepolicy: %v", err)
|
||||
}
|
||||
contenthash.ClearCacheContext(mutable.Metadata())
|
||||
go mutable.Release(context.TODO())
|
||||
}
|
||||
}()
|
||||
|
||||
mount, err := mutable.Mount(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mount)
|
||||
|
||||
dest, err := lm.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil && lm != nil {
|
||||
lm.Unmount()
|
||||
}
|
||||
}()
|
||||
|
||||
cc, err := contenthash.GetCacheContext(ctx, mutable.Metadata(), mount.IdentityMapping())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opt := filesync.FSSendRequestOpt{
|
||||
Name: ls.src.Name,
|
||||
IncludePatterns: ls.src.IncludePatterns,
|
||||
ExcludePatterns: ls.src.ExcludePatterns,
|
||||
FollowPaths: ls.src.FollowPaths,
|
||||
OverrideExcludes: false,
|
||||
DestDir: dest,
|
||||
CacheUpdater: &cacheUpdater{cc, mount.IdentityMapping()},
|
||||
ProgressCb: newProgressHandler(ctx, "transferring "+ls.src.Name+":"),
|
||||
}
|
||||
|
||||
if idmap := mount.IdentityMapping(); idmap != nil {
|
||||
opt.Filter = func(p string, stat *fstypes.Stat) bool {
|
||||
identity, err := idmap.ToHost(idtools.Identity{
|
||||
UID: int(stat.Uid),
|
||||
GID: int(stat.Gid),
|
||||
})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
stat.Uid = uint32(identity.UID)
|
||||
stat.Gid = uint32(identity.GID)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if err := filesync.FSSync(ctx, caller, opt); err != nil {
|
||||
if status.Code(err) == codes.NotFound {
|
||||
return nil, errors.Errorf("local source %s not enabled from the client", ls.src.Name)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := lm.Unmount(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lm = nil
|
||||
|
||||
if err := contenthash.SetCacheContext(ctx, mutable.Metadata(), cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// skip storing snapshot by the shared key if it already exists
|
||||
skipStoreSharedKey := false
|
||||
si, _ := ls.md.Get(mutable.ID())
|
||||
if v := si.Get(keySharedKey); v != nil {
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
skipStoreSharedKey = str == sharedKey
|
||||
}
|
||||
if !skipStoreSharedKey {
|
||||
v, err := metadata.NewValue(sharedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v.Index = sharedKey
|
||||
if err := si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, sharedKey, v)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Debugf("saved %s as %s", mutable.ID(), sharedKey)
|
||||
}
|
||||
|
||||
snap, err := mutable.Commit(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mutable = nil // avoid deferred cleanup
|
||||
|
||||
return snap, nil
|
||||
}
|
||||
|
||||
func newProgressHandler(ctx context.Context, id string) func(int, bool) {
|
||||
limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1)
|
||||
pw, _, _ := progress.FromContext(ctx)
|
||||
now := time.Now()
|
||||
st := progress.Status{
|
||||
Started: &now,
|
||||
Action: "transferring",
|
||||
}
|
||||
pw.Write(id, st)
|
||||
return func(s int, last bool) {
|
||||
if last || limiter.Allow() {
|
||||
st.Current = s
|
||||
if last {
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
}
|
||||
pw.Write(id, st)
|
||||
if last {
|
||||
pw.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type cacheUpdater struct {
|
||||
contenthash.CacheContext
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
func (cu *cacheUpdater) MarkSupported(bool) {
|
||||
}
|
||||
|
||||
func (cu *cacheUpdater) ContentHasher() fsutil.ContentHasher {
|
||||
return contenthash.NewFromStat
|
||||
}
|
49
vendor/github.com/moby/buildkit/source/manager.go
generated
vendored
Normal file
49
vendor/github.com/moby/buildkit/source/manager.go
generated
vendored
Normal file
@@ -0,0 +1,49 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Source interface {
|
||||
ID() string
|
||||
Resolve(ctx context.Context, id Identifier, sm *session.Manager) (SourceInstance, error)
|
||||
}
|
||||
|
||||
type SourceInstance interface {
|
||||
CacheKey(ctx context.Context, index int) (string, bool, error)
|
||||
Snapshot(ctx context.Context) (cache.ImmutableRef, error)
|
||||
}
|
||||
|
||||
type Manager struct {
|
||||
mu sync.Mutex
|
||||
sources map[string]Source
|
||||
}
|
||||
|
||||
func NewManager() (*Manager, error) {
|
||||
return &Manager{
|
||||
sources: make(map[string]Source),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sm *Manager) Register(src Source) {
|
||||
sm.mu.Lock()
|
||||
sm.sources[src.ID()] = src
|
||||
sm.mu.Unlock()
|
||||
}
|
||||
|
||||
func (sm *Manager) Resolve(ctx context.Context, id Identifier, sessM *session.Manager) (SourceInstance, error) {
|
||||
sm.mu.Lock()
|
||||
src, ok := sm.sources[id.ID()]
|
||||
sm.mu.Unlock()
|
||||
|
||||
if !ok {
|
||||
return nil, errors.Errorf("no handler for %s", id.ID())
|
||||
}
|
||||
|
||||
return src.Resolve(ctx, id, sessM)
|
||||
}
|
Reference in New Issue
Block a user