mirror of
https://github.com/mudler/luet.git
synced 2025-09-11 03:59:35 +00:00
Support priv/unpriv image extraction
Optionally add back privileged extraction which can be enabled with LUET_PRIVILEGED_EXTRACT=true Signed-off-by: Ettore Di Giacinto <mudler@sabayon.org>
This commit is contained in:
194
vendor/github.com/moby/buildkit/cache/blobs/blobs.go
generated
vendored
Normal file
194
vendor/github.com/moby/buildkit/cache/blobs/blobs.go
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
package blobs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/diff"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/winlayers"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var g flightcontrol.Group
|
||||
|
||||
const containerdUncompressed = "containerd.io/uncompressed"
|
||||
|
||||
type DiffPair struct {
|
||||
DiffID digest.Digest
|
||||
Blobsum digest.Digest
|
||||
}
|
||||
|
||||
type CompareWithParent interface {
|
||||
CompareWithParent(ctx context.Context, ref string, opts ...diff.Opt) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
var ErrNoBlobs = errors.Errorf("no blobs for snapshot")
|
||||
|
||||
// GetDiffPairs returns the DiffID/Blobsum pairs for a giver reference and saves it.
|
||||
// Caller must hold a lease when calling this function.
|
||||
func GetDiffPairs(ctx context.Context, contentStore content.Store, differ diff.Comparer, ref cache.ImmutableRef, createBlobs bool, compression CompressionType) ([]DiffPair, error) {
|
||||
if ref == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if _, ok := leases.FromContext(ctx); !ok {
|
||||
return nil, errors.Errorf("missing lease requirement for GetDiffPairs")
|
||||
}
|
||||
|
||||
if err := ref.Finalize(ctx, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if isTypeWindows(ref) {
|
||||
ctx = winlayers.UseWindowsLayerMode(ctx)
|
||||
}
|
||||
|
||||
return getDiffPairs(ctx, contentStore, differ, ref, createBlobs, compression)
|
||||
}
|
||||
|
||||
func getDiffPairs(ctx context.Context, contentStore content.Store, differ diff.Comparer, ref cache.ImmutableRef, createBlobs bool, compression CompressionType) ([]DiffPair, error) {
|
||||
if ref == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
baseCtx := ctx
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
var diffPairs []DiffPair
|
||||
var currentDescr ocispec.Descriptor
|
||||
parent := ref.Parent()
|
||||
if parent != nil {
|
||||
defer parent.Release(context.TODO())
|
||||
eg.Go(func() error {
|
||||
dp, err := getDiffPairs(ctx, contentStore, differ, parent, createBlobs, compression)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diffPairs = dp
|
||||
return nil
|
||||
})
|
||||
}
|
||||
eg.Go(func() error {
|
||||
dp, err := g.Do(ctx, ref.ID(), func(ctx context.Context) (interface{}, error) {
|
||||
refInfo := ref.Info()
|
||||
if refInfo.Blob != "" {
|
||||
return nil, nil
|
||||
} else if !createBlobs {
|
||||
return nil, errors.WithStack(ErrNoBlobs)
|
||||
}
|
||||
|
||||
var mediaType string
|
||||
var descr ocispec.Descriptor
|
||||
var err error
|
||||
|
||||
switch compression {
|
||||
case Uncompressed:
|
||||
mediaType = ocispec.MediaTypeImageLayer
|
||||
case Gzip:
|
||||
mediaType = ocispec.MediaTypeImageLayerGzip
|
||||
default:
|
||||
return nil, errors.Errorf("unknown layer compression type")
|
||||
}
|
||||
|
||||
if pc, ok := differ.(CompareWithParent); ok {
|
||||
descr, err = pc.CompareWithParent(ctx, ref.ID(), diff.WithMediaType(mediaType))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if descr.Digest == "" {
|
||||
// reference needs to be committed
|
||||
parent := ref.Parent()
|
||||
var lower []mount.Mount
|
||||
var release func() error
|
||||
if parent != nil {
|
||||
defer parent.Release(context.TODO())
|
||||
m, err := parent.Mount(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lower, release, err = m.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if release != nil {
|
||||
defer release()
|
||||
}
|
||||
}
|
||||
m, err := ref.Mount(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
upper, release, err := m.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if release != nil {
|
||||
defer release()
|
||||
}
|
||||
descr, err = differ.Compare(ctx, lower, upper,
|
||||
diff.WithMediaType(mediaType),
|
||||
diff.WithReference(ref.ID()),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if descr.Annotations == nil {
|
||||
descr.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
info, err := contentStore.Info(ctx, descr.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if diffID, ok := info.Labels[containerdUncompressed]; ok {
|
||||
descr.Annotations[containerdUncompressed] = diffID
|
||||
} else if compression == Uncompressed {
|
||||
descr.Annotations[containerdUncompressed] = descr.Digest.String()
|
||||
} else {
|
||||
return nil, errors.Errorf("unknown layer compression type")
|
||||
}
|
||||
return descr, nil
|
||||
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if dp != nil {
|
||||
currentDescr = dp.(ocispec.Descriptor)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err := eg.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if currentDescr.Digest != "" {
|
||||
if err := ref.SetBlob(baseCtx, currentDescr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
refInfo := ref.Info()
|
||||
return append(diffPairs, DiffPair{DiffID: refInfo.DiffID, Blobsum: refInfo.Blob}), nil
|
||||
}
|
||||
|
||||
func isTypeWindows(ref cache.ImmutableRef) bool {
|
||||
if cache.GetLayerType(ref) == "windows" {
|
||||
return true
|
||||
}
|
||||
if parent := ref.Parent(); parent != nil {
|
||||
defer parent.Release(context.TODO())
|
||||
return isTypeWindows(parent)
|
||||
}
|
||||
return false
|
||||
}
|
122
vendor/github.com/moby/buildkit/cache/blobs/compression.go
generated
vendored
Normal file
122
vendor/github.com/moby/buildkit/cache/blobs/compression.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package blobs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/moby/buildkit/cache"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CompressionType represents compression type for blob data.
|
||||
type CompressionType int
|
||||
|
||||
const (
|
||||
// Uncompressed indicates no compression.
|
||||
Uncompressed CompressionType = iota
|
||||
|
||||
// Gzip is used for blob data.
|
||||
Gzip
|
||||
|
||||
// UnknownCompression means not supported yet.
|
||||
UnknownCompression CompressionType = -1
|
||||
)
|
||||
|
||||
var DefaultCompression = Gzip
|
||||
|
||||
func (ct CompressionType) String() string {
|
||||
switch ct {
|
||||
case Uncompressed:
|
||||
return "uncompressed"
|
||||
case Gzip:
|
||||
return "gzip"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// DetectCompressionType returns media type from existing blob data.
|
||||
func DetectLayerMediaType(ctx context.Context, cs content.Store, id digest.Digest, oci bool) (string, error) {
|
||||
ra, err := cs.ReaderAt(ctx, ocispec.Descriptor{Digest: id})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
ct, err := detectCompressionType(content.NewReader(ra))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
switch ct {
|
||||
case Uncompressed:
|
||||
if oci {
|
||||
return ocispec.MediaTypeImageLayer, nil
|
||||
} else {
|
||||
return images.MediaTypeDockerSchema2Layer, nil
|
||||
}
|
||||
case Gzip:
|
||||
if oci {
|
||||
return ocispec.MediaTypeImageLayerGzip, nil
|
||||
} else {
|
||||
return images.MediaTypeDockerSchema2LayerGzip, nil
|
||||
}
|
||||
default:
|
||||
return "", errors.Errorf("failed to detect layer %v compression type", id)
|
||||
}
|
||||
}
|
||||
|
||||
// detectCompressionType detects compression type from real blob data.
|
||||
func detectCompressionType(cr io.Reader) (CompressionType, error) {
|
||||
var buf [10]byte
|
||||
var n int
|
||||
var err error
|
||||
|
||||
if n, err = cr.Read(buf[:]); err != nil && err != io.EOF {
|
||||
// Note: we'll ignore any io.EOF error because there are some
|
||||
// odd cases where the layer.tar file will be empty (zero bytes)
|
||||
// and we'll just treat it as a non-compressed stream and that
|
||||
// means just create an empty layer.
|
||||
//
|
||||
// See issue docker/docker#18170
|
||||
return UnknownCompression, err
|
||||
}
|
||||
|
||||
for c, m := range map[CompressionType][]byte{
|
||||
Gzip: {0x1F, 0x8B, 0x08},
|
||||
} {
|
||||
if n < len(m) {
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(m, buf[:len(m)]) {
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
return Uncompressed, nil
|
||||
}
|
||||
|
||||
// GetMediaTypeForLayers retrieves media type for layer from ref information.
|
||||
func GetMediaTypeForLayers(diffPairs []DiffPair, ref cache.ImmutableRef) []string {
|
||||
tref := ref
|
||||
|
||||
layerTypes := make([]string, 0, len(diffPairs))
|
||||
for _, dp := range diffPairs {
|
||||
if tref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
info := tref.Info()
|
||||
if !(info.DiffID == dp.DiffID && info.Blob == dp.Blobsum) {
|
||||
return nil
|
||||
}
|
||||
|
||||
layerTypes = append(layerTypes, info.MediaType)
|
||||
tref = tref.Parent()
|
||||
}
|
||||
return layerTypes
|
||||
}
|
909
vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
generated
vendored
Normal file
909
vendor/github.com/moby/buildkit/cache/contenthash/checksum.go
generated
vendored
Normal file
@@ -0,0 +1,909 @@
|
||||
package contenthash
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/locker"
|
||||
iradix "github.com/hashicorp/go-immutable-radix"
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
)
|
||||
|
||||
var errNotFound = errors.Errorf("not found")
|
||||
|
||||
var defaultManager *cacheManager
|
||||
var defaultManagerOnce sync.Once
|
||||
|
||||
const keyContentHash = "buildkit.contenthash.v0"
|
||||
|
||||
func getDefaultManager() *cacheManager {
|
||||
defaultManagerOnce.Do(func() {
|
||||
lru, _ := simplelru.NewLRU(20, nil) // error is impossible on positive size
|
||||
defaultManager = &cacheManager{lru: lru, locker: locker.New()}
|
||||
})
|
||||
return defaultManager
|
||||
}
|
||||
|
||||
// Layout in the radix tree: Every path is saved by cleaned absolute unix path.
|
||||
// Directories have 2 records, one contains digest for directory header, other
|
||||
// the recursive digest for directory contents. "/dir/" is the record for
|
||||
// header, "/dir" is for contents. For the root node "" (empty string) is the
|
||||
// key for root, "/" for the root header
|
||||
|
||||
func Checksum(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool) (digest.Digest, error) {
|
||||
return getDefaultManager().Checksum(ctx, ref, path, followLinks)
|
||||
}
|
||||
|
||||
func ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool) (digest.Digest, error) {
|
||||
return getDefaultManager().ChecksumWildcard(ctx, ref, path, followLinks)
|
||||
}
|
||||
|
||||
func GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) {
|
||||
return getDefaultManager().GetCacheContext(ctx, md, idmap)
|
||||
}
|
||||
|
||||
func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheContext) error {
|
||||
return getDefaultManager().SetCacheContext(ctx, md, cc)
|
||||
}
|
||||
|
||||
func ClearCacheContext(md *metadata.StorageItem) {
|
||||
getDefaultManager().clearCacheContext(md.ID())
|
||||
}
|
||||
|
||||
type CacheContext interface {
|
||||
Checksum(ctx context.Context, ref cache.Mountable, p string, followLinks bool) (digest.Digest, error)
|
||||
ChecksumWildcard(ctx context.Context, ref cache.Mountable, p string, followLinks bool) (digest.Digest, error)
|
||||
HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) error
|
||||
}
|
||||
|
||||
type Hashed interface {
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
type Wildcard struct {
|
||||
Path string
|
||||
Record *CacheRecord
|
||||
}
|
||||
|
||||
type cacheManager struct {
|
||||
locker *locker.Locker
|
||||
lru *simplelru.LRU
|
||||
lruMu sync.Mutex
|
||||
}
|
||||
|
||||
func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) {
|
||||
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping())
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return cc.Checksum(ctx, ref, p, followLinks)
|
||||
}
|
||||
|
||||
func (cm *cacheManager) ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) {
|
||||
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping())
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return cc.ChecksumWildcard(ctx, ref, p, followLinks)
|
||||
}
|
||||
|
||||
func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) {
|
||||
cm.locker.Lock(md.ID())
|
||||
cm.lruMu.Lock()
|
||||
v, ok := cm.lru.Get(md.ID())
|
||||
cm.lruMu.Unlock()
|
||||
if ok {
|
||||
cm.locker.Unlock(md.ID())
|
||||
v.(*cacheContext).linkMap = map[string][][]byte{}
|
||||
return v.(*cacheContext), nil
|
||||
}
|
||||
cc, err := newCacheContext(md, idmap)
|
||||
if err != nil {
|
||||
cm.locker.Unlock(md.ID())
|
||||
return nil, err
|
||||
}
|
||||
cm.lruMu.Lock()
|
||||
cm.lru.Add(md.ID(), cc)
|
||||
cm.lruMu.Unlock()
|
||||
cm.locker.Unlock(md.ID())
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (cm *cacheManager) SetCacheContext(ctx context.Context, md *metadata.StorageItem, cci CacheContext) error {
|
||||
cc, ok := cci.(*cacheContext)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid cachecontext: %T", cc)
|
||||
}
|
||||
if md.ID() != cc.md.ID() {
|
||||
cc = &cacheContext{
|
||||
md: md,
|
||||
tree: cci.(*cacheContext).tree,
|
||||
dirtyMap: map[string]struct{}{},
|
||||
linkMap: map[string][][]byte{},
|
||||
}
|
||||
} else {
|
||||
if err := cc.save(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cm.lruMu.Lock()
|
||||
cm.lru.Add(md.ID(), cc)
|
||||
cm.lruMu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *cacheManager) clearCacheContext(id string) {
|
||||
cm.lruMu.Lock()
|
||||
cm.lru.Remove(id)
|
||||
cm.lruMu.Unlock()
|
||||
}
|
||||
|
||||
type cacheContext struct {
|
||||
mu sync.RWMutex
|
||||
md *metadata.StorageItem
|
||||
tree *iradix.Tree
|
||||
dirty bool // needs to be persisted to disk
|
||||
|
||||
// used in HandleChange
|
||||
txn *iradix.Txn
|
||||
node *iradix.Node
|
||||
dirtyMap map[string]struct{}
|
||||
linkMap map[string][][]byte
|
||||
idmap *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
type mount struct {
|
||||
mountable cache.Mountable
|
||||
mountPath string
|
||||
unmount func() error
|
||||
}
|
||||
|
||||
func (m *mount) mount(ctx context.Context) (string, error) {
|
||||
if m.mountPath != "" {
|
||||
return m.mountPath, nil
|
||||
}
|
||||
mounts, err := m.mountable.Mount(ctx, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mounts)
|
||||
|
||||
mp, err := lm.Mount()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
m.mountPath = mp
|
||||
m.unmount = lm.Unmount
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
func (m *mount) clean() error {
|
||||
if m.mountPath != "" {
|
||||
if err := m.unmount(); err != nil {
|
||||
return err
|
||||
}
|
||||
m.mountPath = ""
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCacheContext(md *metadata.StorageItem, idmap *idtools.IdentityMapping) (*cacheContext, error) {
|
||||
cc := &cacheContext{
|
||||
md: md,
|
||||
tree: iradix.New(),
|
||||
dirtyMap: map[string]struct{}{},
|
||||
linkMap: map[string][][]byte{},
|
||||
idmap: idmap,
|
||||
}
|
||||
if err := cc.load(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (cc *cacheContext) load() error {
|
||||
dt, err := cc.md.GetExternal(keyContentHash)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var l CacheRecords
|
||||
if err := l.Unmarshal(dt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
txn := cc.tree.Txn()
|
||||
for _, p := range l.Paths {
|
||||
txn.Insert([]byte(p.Path), p.Record)
|
||||
}
|
||||
cc.tree = txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cc *cacheContext) save() error {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
|
||||
if cc.txn != nil {
|
||||
cc.commitActiveTransaction()
|
||||
}
|
||||
|
||||
var l CacheRecords
|
||||
node := cc.tree.Root()
|
||||
node.Walk(func(k []byte, v interface{}) bool {
|
||||
l.Paths = append(l.Paths, &CacheRecordWithPath{
|
||||
Path: string(k),
|
||||
Record: v.(*CacheRecord),
|
||||
})
|
||||
return false
|
||||
})
|
||||
|
||||
dt, err := l.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return cc.md.SetExternal(keyContentHash, dt)
|
||||
}
|
||||
|
||||
// HandleChange notifies the source about a modification operation
|
||||
func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
|
||||
p = path.Join("/", filepath.ToSlash(p))
|
||||
if p == "/" {
|
||||
p = ""
|
||||
}
|
||||
k := convertPathToKey([]byte(p))
|
||||
|
||||
deleteDir := func(cr *CacheRecord) {
|
||||
if cr.Type == CacheRecordTypeDir {
|
||||
cc.node.WalkPrefix(append(k, 0), func(k []byte, v interface{}) bool {
|
||||
cc.txn.Delete(k)
|
||||
return false
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
if cc.txn == nil {
|
||||
cc.txn = cc.tree.Txn()
|
||||
cc.node = cc.tree.Root()
|
||||
|
||||
// root is not called by HandleChange. need to fake it
|
||||
if _, ok := cc.node.Get([]byte{0}); !ok {
|
||||
cc.txn.Insert([]byte{0}, &CacheRecord{
|
||||
Type: CacheRecordTypeDirHeader,
|
||||
Digest: digest.FromBytes(nil),
|
||||
})
|
||||
cc.txn.Insert([]byte(""), &CacheRecord{
|
||||
Type: CacheRecordTypeDir,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if kind == fsutil.ChangeKindDelete {
|
||||
v, ok := cc.txn.Delete(k)
|
||||
if ok {
|
||||
deleteDir(v.(*CacheRecord))
|
||||
}
|
||||
d := path.Dir(p)
|
||||
if d == "/" {
|
||||
d = ""
|
||||
}
|
||||
cc.dirtyMap[d] = struct{}{}
|
||||
return
|
||||
}
|
||||
|
||||
stat, ok := fi.Sys().(*fstypes.Stat)
|
||||
if !ok {
|
||||
return errors.Errorf("%s invalid change without stat information", p)
|
||||
}
|
||||
|
||||
h, ok := fi.(Hashed)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid fileinfo: %s", p)
|
||||
}
|
||||
|
||||
v, ok := cc.node.Get(k)
|
||||
if ok {
|
||||
deleteDir(v.(*CacheRecord))
|
||||
}
|
||||
|
||||
cr := &CacheRecord{
|
||||
Type: CacheRecordTypeFile,
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
cr.Type = CacheRecordTypeSymlink
|
||||
cr.Linkname = filepath.ToSlash(stat.Linkname)
|
||||
}
|
||||
if fi.IsDir() {
|
||||
cr.Type = CacheRecordTypeDirHeader
|
||||
cr2 := &CacheRecord{
|
||||
Type: CacheRecordTypeDir,
|
||||
}
|
||||
cc.txn.Insert(k, cr2)
|
||||
k = append(k, 0)
|
||||
p += "/"
|
||||
}
|
||||
cr.Digest = h.Digest()
|
||||
|
||||
// if we receive a hardlink just use the digest of the source
|
||||
// note that the source may be called later because data writing is async
|
||||
if fi.Mode()&os.ModeSymlink == 0 && stat.Linkname != "" {
|
||||
ln := path.Join("/", filepath.ToSlash(stat.Linkname))
|
||||
v, ok := cc.txn.Get(convertPathToKey([]byte(ln)))
|
||||
if ok {
|
||||
cp := *v.(*CacheRecord)
|
||||
cr = &cp
|
||||
}
|
||||
cc.linkMap[ln] = append(cc.linkMap[ln], k)
|
||||
}
|
||||
|
||||
cc.txn.Insert(k, cr)
|
||||
if !fi.IsDir() {
|
||||
if links, ok := cc.linkMap[p]; ok {
|
||||
for _, l := range links {
|
||||
pp := convertKeyToPath(l)
|
||||
cc.txn.Insert(l, cr)
|
||||
d := path.Dir(string(pp))
|
||||
if d == "/" {
|
||||
d = ""
|
||||
}
|
||||
cc.dirtyMap[d] = struct{}{}
|
||||
}
|
||||
delete(cc.linkMap, p)
|
||||
}
|
||||
}
|
||||
|
||||
d := path.Dir(p)
|
||||
if d == "/" {
|
||||
d = ""
|
||||
}
|
||||
cc.dirtyMap[d] = struct{}{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cc *cacheContext) ChecksumWildcard(ctx context.Context, mountable cache.Mountable, p string, followLinks bool) (digest.Digest, error) {
|
||||
m := &mount{mountable: mountable}
|
||||
defer m.clean()
|
||||
|
||||
wildcards, err := cc.wildcards(ctx, m, p)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
if followLinks {
|
||||
for i, w := range wildcards {
|
||||
if w.Record.Type == CacheRecordTypeSymlink {
|
||||
dgst, err := cc.checksumFollow(ctx, m, w.Path, followLinks)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
wildcards[i].Record = &CacheRecord{Digest: dgst}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(wildcards) == 0 {
|
||||
return digest.FromBytes([]byte{}), nil
|
||||
}
|
||||
|
||||
if len(wildcards) > 1 {
|
||||
digester := digest.Canonical.Digester()
|
||||
for i, w := range wildcards {
|
||||
if i != 0 {
|
||||
digester.Hash().Write([]byte{0})
|
||||
}
|
||||
digester.Hash().Write([]byte(w.Record.Digest))
|
||||
}
|
||||
return digester.Digest(), nil
|
||||
} else {
|
||||
return wildcards[0].Record.Digest, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string, followLinks bool) (digest.Digest, error) {
|
||||
m := &mount{mountable: mountable}
|
||||
defer m.clean()
|
||||
|
||||
return cc.checksumFollow(ctx, m, p, followLinks)
|
||||
}
|
||||
|
||||
func (cc *cacheContext) checksumFollow(ctx context.Context, m *mount, p string, follow bool) (digest.Digest, error) {
|
||||
const maxSymlinkLimit = 255
|
||||
i := 0
|
||||
for {
|
||||
if i > maxSymlinkLimit {
|
||||
return "", errors.Errorf("too many symlinks: %s", p)
|
||||
}
|
||||
cr, err := cc.checksumNoFollow(ctx, m, p)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if cr.Type == CacheRecordTypeSymlink && follow {
|
||||
link := cr.Linkname
|
||||
if !path.IsAbs(cr.Linkname) {
|
||||
link = path.Join(path.Dir(p), link)
|
||||
}
|
||||
i++
|
||||
p = link
|
||||
} else {
|
||||
return cr.Digest, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *cacheContext) wildcards(ctx context.Context, m *mount, p string) ([]*Wildcard, error) {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
|
||||
if cc.txn != nil {
|
||||
cc.commitActiveTransaction()
|
||||
}
|
||||
|
||||
root := cc.tree.Root()
|
||||
scan, err := cc.needsScan(root, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if scan {
|
||||
if err := cc.scanPath(ctx, m, ""); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if cc.dirty {
|
||||
go cc.save()
|
||||
cc.dirty = false
|
||||
}
|
||||
}()
|
||||
|
||||
p = path.Join("/", filepath.ToSlash(p))
|
||||
if p == "/" {
|
||||
p = ""
|
||||
}
|
||||
|
||||
wildcards := make([]*Wildcard, 0, 2)
|
||||
|
||||
txn := cc.tree.Txn()
|
||||
root = txn.Root()
|
||||
var updated bool
|
||||
|
||||
iter := root.Seek([]byte{})
|
||||
for {
|
||||
k, _, ok := iter.Next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if len(k) > 0 && k[len(k)-1] == byte(0) {
|
||||
continue
|
||||
}
|
||||
fn := convertKeyToPath(k)
|
||||
b, err := path.Match(p, string(fn))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !b {
|
||||
continue
|
||||
}
|
||||
|
||||
cr, upt, err := cc.checksum(ctx, root, txn, m, k, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if upt {
|
||||
updated = true
|
||||
}
|
||||
|
||||
wildcards = append(wildcards, &Wildcard{Path: string(fn), Record: cr})
|
||||
|
||||
if cr.Type == CacheRecordTypeDir {
|
||||
iter = root.Seek(append(k, 0, 0xff))
|
||||
}
|
||||
}
|
||||
|
||||
cc.tree = txn.Commit()
|
||||
cc.dirty = updated
|
||||
|
||||
return wildcards, nil
|
||||
}
|
||||
|
||||
func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
|
||||
p = path.Join("/", filepath.ToSlash(p))
|
||||
if p == "/" {
|
||||
p = ""
|
||||
}
|
||||
|
||||
cc.mu.RLock()
|
||||
if cc.txn == nil {
|
||||
root := cc.tree.Root()
|
||||
cc.mu.RUnlock()
|
||||
v, ok := root.Get(convertPathToKey([]byte(p)))
|
||||
if ok {
|
||||
cr := v.(*CacheRecord)
|
||||
if cr.Digest != "" {
|
||||
return cr, nil
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cc.mu.RUnlock()
|
||||
}
|
||||
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
|
||||
if cc.txn != nil {
|
||||
cc.commitActiveTransaction()
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if cc.dirty {
|
||||
go cc.save()
|
||||
cc.dirty = false
|
||||
}
|
||||
}()
|
||||
|
||||
return cc.lazyChecksum(ctx, m, p)
|
||||
}
|
||||
|
||||
func (cc *cacheContext) commitActiveTransaction() {
|
||||
for d := range cc.dirtyMap {
|
||||
addParentToMap(d, cc.dirtyMap)
|
||||
}
|
||||
for d := range cc.dirtyMap {
|
||||
k := convertPathToKey([]byte(d))
|
||||
if _, ok := cc.txn.Get(k); ok {
|
||||
cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir})
|
||||
}
|
||||
}
|
||||
cc.tree = cc.txn.Commit()
|
||||
cc.node = nil
|
||||
cc.dirtyMap = map[string]struct{}{}
|
||||
cc.txn = nil
|
||||
}
|
||||
|
||||
func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
|
||||
root := cc.tree.Root()
|
||||
scan, err := cc.needsScan(root, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if scan {
|
||||
if err := cc.scanPath(ctx, m, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
k := convertPathToKey([]byte(p))
|
||||
txn := cc.tree.Txn()
|
||||
root = txn.Root()
|
||||
cr, updated, err := cc.checksum(ctx, root, txn, m, k, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cc.tree = txn.Commit()
|
||||
cc.dirty = updated
|
||||
return cr, err
|
||||
}
|
||||
|
||||
func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, follow bool) (*CacheRecord, bool, error) {
|
||||
origk := k
|
||||
k, cr, err := getFollowLinks(root, k, follow)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if cr == nil {
|
||||
return nil, false, errors.Wrapf(errNotFound, "%q not found", convertKeyToPath(origk))
|
||||
}
|
||||
if cr.Digest != "" {
|
||||
return cr, false, nil
|
||||
}
|
||||
var dgst digest.Digest
|
||||
|
||||
switch cr.Type {
|
||||
case CacheRecordTypeDir:
|
||||
h := sha256.New()
|
||||
next := append(k, 0)
|
||||
iter := root.Seek(next)
|
||||
subk := next
|
||||
ok := true
|
||||
for {
|
||||
if !ok || !bytes.HasPrefix(subk, next) {
|
||||
break
|
||||
}
|
||||
h.Write(bytes.TrimPrefix(subk, k))
|
||||
|
||||
subcr, _, err := cc.checksum(ctx, root, txn, m, subk, true)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
h.Write([]byte(subcr.Digest))
|
||||
|
||||
if subcr.Type == CacheRecordTypeDir { // skip subfiles
|
||||
next := append(subk, 0, 0xff)
|
||||
iter = root.Seek(next)
|
||||
}
|
||||
subk, _, ok = iter.Next()
|
||||
}
|
||||
dgst = digest.NewDigest(digest.SHA256, h)
|
||||
|
||||
default:
|
||||
p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0})))
|
||||
|
||||
target, err := m.mount(ctx)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// no FollowSymlinkInScope because invalid paths should not be inserted
|
||||
fp := filepath.Join(target, filepath.FromSlash(p))
|
||||
|
||||
fi, err := os.Lstat(fp)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
dgst, err = prepareDigest(fp, p, fi)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
cr2 := &CacheRecord{
|
||||
Digest: dgst,
|
||||
Type: cr.Type,
|
||||
Linkname: cr.Linkname,
|
||||
}
|
||||
|
||||
txn.Insert(k, cr2)
|
||||
|
||||
return cr2, true, nil
|
||||
}
|
||||
|
||||
// needsScan returns false if path is in the tree or a parent path is in tree
|
||||
// and subpath is missing
|
||||
func (cc *cacheContext) needsScan(root *iradix.Node, p string) (bool, error) {
|
||||
var linksWalked int
|
||||
return cc.needsScanFollow(root, p, &linksWalked)
|
||||
}
|
||||
|
||||
func (cc *cacheContext) needsScanFollow(root *iradix.Node, p string, linksWalked *int) (bool, error) {
|
||||
if p == "/" {
|
||||
p = ""
|
||||
}
|
||||
if v, ok := root.Get(convertPathToKey([]byte(p))); !ok {
|
||||
if p == "" {
|
||||
return true, nil
|
||||
}
|
||||
return cc.needsScanFollow(root, path.Clean(path.Dir(p)), linksWalked)
|
||||
} else {
|
||||
cr := v.(*CacheRecord)
|
||||
if cr.Type == CacheRecordTypeSymlink {
|
||||
if *linksWalked > 255 {
|
||||
return false, errTooManyLinks
|
||||
}
|
||||
*linksWalked++
|
||||
link := path.Clean(cr.Linkname)
|
||||
if !path.IsAbs(cr.Linkname) {
|
||||
link = path.Join("/", path.Dir(p), link)
|
||||
}
|
||||
return cc.needsScanFollow(root, link, linksWalked)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) {
|
||||
p = path.Join("/", p)
|
||||
d, _ := path.Split(p)
|
||||
|
||||
mp, err := m.mount(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n := cc.tree.Root()
|
||||
txn := cc.tree.Txn()
|
||||
|
||||
parentPath, err := rootPath(mp, filepath.FromSlash(d), func(p, link string) error {
|
||||
cr := &CacheRecord{
|
||||
Type: CacheRecordTypeSymlink,
|
||||
Linkname: filepath.ToSlash(link),
|
||||
}
|
||||
k := []byte(filepath.Join("/", filepath.ToSlash(p)))
|
||||
k = convertPathToKey(k)
|
||||
txn.Insert(k, cr)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = filepath.Walk(parentPath, func(path string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to walk %s", path)
|
||||
}
|
||||
rel, err := filepath.Rel(mp, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k := []byte(filepath.Join("/", filepath.ToSlash(rel)))
|
||||
if string(k) == "/" {
|
||||
k = []byte{}
|
||||
}
|
||||
k = convertPathToKey(k)
|
||||
if _, ok := n.Get(k); !ok {
|
||||
cr := &CacheRecord{
|
||||
Type: CacheRecordTypeFile,
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
cr.Type = CacheRecordTypeSymlink
|
||||
link, err := os.Readlink(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr.Linkname = filepath.ToSlash(link)
|
||||
}
|
||||
if fi.IsDir() {
|
||||
cr.Type = CacheRecordTypeDirHeader
|
||||
cr2 := &CacheRecord{
|
||||
Type: CacheRecordTypeDir,
|
||||
}
|
||||
txn.Insert(k, cr2)
|
||||
k = append(k, 0)
|
||||
}
|
||||
txn.Insert(k, cr)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cc.tree = txn.Commit()
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFollowLinks(root *iradix.Node, k []byte, follow bool) ([]byte, *CacheRecord, error) {
|
||||
var linksWalked int
|
||||
return getFollowLinksWalk(root, k, follow, &linksWalked)
|
||||
}
|
||||
|
||||
func getFollowLinksWalk(root *iradix.Node, k []byte, follow bool, linksWalked *int) ([]byte, *CacheRecord, error) {
|
||||
v, ok := root.Get(k)
|
||||
if ok {
|
||||
return k, v.(*CacheRecord), nil
|
||||
}
|
||||
if !follow || len(k) == 0 {
|
||||
return k, nil, nil
|
||||
}
|
||||
|
||||
dir, file := splitKey(k)
|
||||
|
||||
k, parent, err := getFollowLinksWalk(root, dir, follow, linksWalked)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if parent != nil {
|
||||
if parent.Type == CacheRecordTypeSymlink {
|
||||
*linksWalked++
|
||||
if *linksWalked > 255 {
|
||||
return nil, nil, errors.Errorf("too many links")
|
||||
}
|
||||
dirPath := path.Clean(string(convertKeyToPath(dir)))
|
||||
if dirPath == "." || dirPath == "/" {
|
||||
dirPath = ""
|
||||
}
|
||||
link := path.Clean(parent.Linkname)
|
||||
if !path.IsAbs(link) {
|
||||
link = path.Join("/", path.Join(path.Dir(dirPath), link))
|
||||
}
|
||||
return getFollowLinksWalk(root, append(convertPathToKey([]byte(link)), file...), follow, linksWalked)
|
||||
}
|
||||
}
|
||||
k = append(k, file...)
|
||||
v, ok = root.Get(k)
|
||||
if ok {
|
||||
return k, v.(*CacheRecord), nil
|
||||
}
|
||||
return k, nil, nil
|
||||
}
|
||||
|
||||
func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) {
|
||||
h, err := NewFileHash(fp, fi)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create hash for %s", p)
|
||||
}
|
||||
if fi.Mode().IsRegular() && fi.Size() > 0 {
|
||||
// TODO: would be nice to put the contents to separate hash first
|
||||
// so it can be cached for hardlinks
|
||||
f, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to open %s", p)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := poolsCopy(h, f); err != nil {
|
||||
return "", errors.Wrapf(err, "failed to copy file data for %s", p)
|
||||
}
|
||||
}
|
||||
return digest.NewDigest(digest.SHA256, h), nil
|
||||
}
|
||||
|
||||
func addParentToMap(d string, m map[string]struct{}) {
|
||||
if d == "" {
|
||||
return
|
||||
}
|
||||
d = path.Dir(d)
|
||||
if d == "/" {
|
||||
d = ""
|
||||
}
|
||||
m[d] = struct{}{}
|
||||
addParentToMap(d, m)
|
||||
}
|
||||
|
||||
func ensureOriginMetadata(md *metadata.StorageItem) *metadata.StorageItem {
|
||||
v := md.Get("cache.equalMutable") // TODO: const
|
||||
if v == nil {
|
||||
return md
|
||||
}
|
||||
var mutable string
|
||||
if err := v.Unmarshal(&mutable); err != nil {
|
||||
return md
|
||||
}
|
||||
si, ok := md.Storage().Get(mutable)
|
||||
if ok {
|
||||
return si
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
var pool32K = sync.Pool{
|
||||
New: func() interface{} { return make([]byte, 32*1024) }, // 32K
|
||||
}
|
||||
|
||||
func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buf := pool32K.Get().([]byte)
|
||||
written, err = io.CopyBuffer(dst, src, buf)
|
||||
pool32K.Put(buf)
|
||||
return
|
||||
}
|
||||
|
||||
func convertPathToKey(p []byte) []byte {
|
||||
return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1)
|
||||
}
|
||||
|
||||
func convertKeyToPath(p []byte) []byte {
|
||||
return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1)
|
||||
}
|
||||
|
||||
func splitKey(k []byte) ([]byte, []byte) {
|
||||
foundBytes := false
|
||||
i := len(k) - 1
|
||||
for {
|
||||
if i <= 0 || foundBytes && k[i] == 0 {
|
||||
break
|
||||
}
|
||||
if k[i] != 0 {
|
||||
foundBytes = true
|
||||
}
|
||||
i--
|
||||
}
|
||||
return append([]byte{}, k[:i]...), k[i:]
|
||||
}
|
864
vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go
generated
vendored
Normal file
864
vendor/github.com/moby/buildkit/cache/contenthash/checksum.pb.go
generated
vendored
Normal file
@@ -0,0 +1,864 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: checksum.proto
|
||||
|
||||
package contenthash
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type CacheRecordType int32
|
||||
|
||||
const (
|
||||
CacheRecordTypeFile CacheRecordType = 0
|
||||
CacheRecordTypeDir CacheRecordType = 1
|
||||
CacheRecordTypeDirHeader CacheRecordType = 2
|
||||
CacheRecordTypeSymlink CacheRecordType = 3
|
||||
)
|
||||
|
||||
var CacheRecordType_name = map[int32]string{
|
||||
0: "FILE",
|
||||
1: "DIR",
|
||||
2: "DIR_HEADER",
|
||||
3: "SYMLINK",
|
||||
}
|
||||
|
||||
var CacheRecordType_value = map[string]int32{
|
||||
"FILE": 0,
|
||||
"DIR": 1,
|
||||
"DIR_HEADER": 2,
|
||||
"SYMLINK": 3,
|
||||
}
|
||||
|
||||
func (x CacheRecordType) String() string {
|
||||
return proto.EnumName(CacheRecordType_name, int32(x))
|
||||
}
|
||||
|
||||
func (CacheRecordType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_843938c28b799986, []int{0}
|
||||
}
|
||||
|
||||
type CacheRecord struct {
|
||||
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
|
||||
Type CacheRecordType `protobuf:"varint,2,opt,name=type,proto3,enum=contenthash.CacheRecordType" json:"type,omitempty"`
|
||||
Linkname string `protobuf:"bytes,3,opt,name=linkname,proto3" json:"linkname,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CacheRecord) Reset() { *m = CacheRecord{} }
|
||||
func (m *CacheRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*CacheRecord) ProtoMessage() {}
|
||||
func (*CacheRecord) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_843938c28b799986, []int{0}
|
||||
}
|
||||
func (m *CacheRecord) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *CacheRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_CacheRecord.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *CacheRecord) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CacheRecord.Merge(m, src)
|
||||
}
|
||||
func (m *CacheRecord) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *CacheRecord) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CacheRecord.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CacheRecord proto.InternalMessageInfo
|
||||
|
||||
func (m *CacheRecord) GetType() CacheRecordType {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return CacheRecordTypeFile
|
||||
}
|
||||
|
||||
func (m *CacheRecord) GetLinkname() string {
|
||||
if m != nil {
|
||||
return m.Linkname
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type CacheRecordWithPath struct {
|
||||
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
|
||||
Record *CacheRecord `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CacheRecordWithPath) Reset() { *m = CacheRecordWithPath{} }
|
||||
func (m *CacheRecordWithPath) String() string { return proto.CompactTextString(m) }
|
||||
func (*CacheRecordWithPath) ProtoMessage() {}
|
||||
func (*CacheRecordWithPath) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_843938c28b799986, []int{1}
|
||||
}
|
||||
func (m *CacheRecordWithPath) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *CacheRecordWithPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_CacheRecordWithPath.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *CacheRecordWithPath) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CacheRecordWithPath.Merge(m, src)
|
||||
}
|
||||
func (m *CacheRecordWithPath) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *CacheRecordWithPath) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CacheRecordWithPath.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CacheRecordWithPath proto.InternalMessageInfo
|
||||
|
||||
func (m *CacheRecordWithPath) GetPath() string {
|
||||
if m != nil {
|
||||
return m.Path
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *CacheRecordWithPath) GetRecord() *CacheRecord {
|
||||
if m != nil {
|
||||
return m.Record
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type CacheRecords struct {
|
||||
Paths []*CacheRecordWithPath `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CacheRecords) Reset() { *m = CacheRecords{} }
|
||||
func (m *CacheRecords) String() string { return proto.CompactTextString(m) }
|
||||
func (*CacheRecords) ProtoMessage() {}
|
||||
func (*CacheRecords) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_843938c28b799986, []int{2}
|
||||
}
|
||||
func (m *CacheRecords) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *CacheRecords) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_CacheRecords.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *CacheRecords) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_CacheRecords.Merge(m, src)
|
||||
}
|
||||
func (m *CacheRecords) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *CacheRecords) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_CacheRecords.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_CacheRecords proto.InternalMessageInfo
|
||||
|
||||
func (m *CacheRecords) GetPaths() []*CacheRecordWithPath {
|
||||
if m != nil {
|
||||
return m.Paths
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("contenthash.CacheRecordType", CacheRecordType_name, CacheRecordType_value)
|
||||
proto.RegisterType((*CacheRecord)(nil), "contenthash.CacheRecord")
|
||||
proto.RegisterType((*CacheRecordWithPath)(nil), "contenthash.CacheRecordWithPath")
|
||||
proto.RegisterType((*CacheRecords)(nil), "contenthash.CacheRecords")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("checksum.proto", fileDescriptor_843938c28b799986) }
|
||||
|
||||
var fileDescriptor_843938c28b799986 = []byte{
|
||||
// 426 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0x13, 0x41,
|
||||
0x18, 0xc7, 0x77, 0x9a, 0x18, 0xf5, 0x8b, 0xd4, 0x30, 0x85, 0x76, 0x19, 0xca, 0x64, 0xcc, 0xc5,
|
||||
0x50, 0xec, 0xa6, 0x44, 0xf0, 0x6e, 0xdd, 0x84, 0x46, 0xab, 0xc8, 0x54, 0x10, 0xf1, 0x20, 0x9b,
|
||||
0xcd, 0xb8, 0xb3, 0xb4, 0xd9, 0x59, 0x76, 0x27, 0x87, 0xbc, 0x81, 0xec, 0xc9, 0x17, 0xd8, 0x93,
|
||||
0x82, 0xef, 0xe0, 0x5d, 0xe8, 0xb1, 0x47, 0xf1, 0x50, 0x24, 0x79, 0x11, 0xd9, 0xd9, 0x2a, 0xcb,
|
||||
0x4a, 0x4e, 0xf3, 0x7d, 0x33, 0xbf, 0xef, 0xff, 0xff, 0xcf, 0x30, 0xb0, 0xed, 0x4b, 0xe1, 0x9f,
|
||||
0xa7, 0x8b, 0xb9, 0x13, 0x27, 0x4a, 0x2b, 0xdc, 0xf6, 0x55, 0xa4, 0x45, 0xa4, 0xa5, 0x97, 0x4a,
|
||||
0x72, 0x18, 0x84, 0x5a, 0x2e, 0xa6, 0x8e, 0xaf, 0xe6, 0x83, 0x40, 0x05, 0x6a, 0x60, 0x98, 0xe9,
|
||||
0xe2, 0xa3, 0xe9, 0x4c, 0x63, 0xaa, 0x72, 0xb6, 0xf7, 0x0d, 0x41, 0xfb, 0x99, 0xe7, 0x4b, 0xc1,
|
||||
0x85, 0xaf, 0x92, 0x19, 0x7e, 0x0e, 0xad, 0x59, 0x18, 0x88, 0x54, 0xdb, 0x88, 0xa1, 0xfe, 0xdd,
|
||||
0xe3, 0xe1, 0xe5, 0x75, 0xd7, 0xfa, 0x75, 0xdd, 0x3d, 0xa8, 0xc8, 0xaa, 0x58, 0x44, 0x85, 0xa5,
|
||||
0x17, 0x46, 0x22, 0x49, 0x07, 0x81, 0x3a, 0x2c, 0x47, 0x1c, 0xd7, 0x2c, 0xfc, 0x46, 0x01, 0x1f,
|
||||
0x41, 0x53, 0x2f, 0x63, 0x61, 0x6f, 0x31, 0xd4, 0xdf, 0x1e, 0xee, 0x3b, 0x95, 0x98, 0x4e, 0xc5,
|
||||
0xf3, 0xcd, 0x32, 0x16, 0xdc, 0x90, 0x98, 0xc0, 0x9d, 0x8b, 0x30, 0x3a, 0x8f, 0xbc, 0xb9, 0xb0,
|
||||
0x1b, 0x85, 0x3f, 0xff, 0xd7, 0xf7, 0xde, 0xc3, 0x4e, 0x65, 0xe8, 0x6d, 0xa8, 0xe5, 0x6b, 0x4f,
|
||||
0x4b, 0x8c, 0xa1, 0x19, 0x7b, 0x5a, 0x96, 0x71, 0xb9, 0xa9, 0xf1, 0x11, 0xb4, 0x12, 0x43, 0x19,
|
||||
0xeb, 0xf6, 0xd0, 0xde, 0x64, 0xcd, 0x6f, 0xb8, 0xde, 0x18, 0xee, 0x55, 0xb6, 0x53, 0xfc, 0x04,
|
||||
0x6e, 0x15, 0x4a, 0xa9, 0x8d, 0x58, 0xa3, 0xdf, 0x1e, 0xb2, 0x4d, 0x02, 0x7f, 0x63, 0xf0, 0x12,
|
||||
0x3f, 0xf8, 0x81, 0xe0, 0x7e, 0xed, 0x6a, 0xf8, 0x01, 0x34, 0xc7, 0x93, 0xd3, 0x51, 0xc7, 0x22,
|
||||
0x7b, 0x59, 0xce, 0x76, 0x6a, 0xc7, 0xe3, 0xf0, 0x42, 0xe0, 0x2e, 0x34, 0xdc, 0x09, 0xef, 0x20,
|
||||
0xb2, 0x9b, 0xe5, 0x0c, 0xd7, 0x08, 0x37, 0x4c, 0xf0, 0x23, 0x00, 0x77, 0xc2, 0x3f, 0x9c, 0x8c,
|
||||
0x9e, 0xba, 0x23, 0xde, 0xd9, 0x22, 0xfb, 0x59, 0xce, 0xec, 0xff, 0xb9, 0x13, 0xe1, 0xcd, 0x44,
|
||||
0x82, 0x1f, 0xc2, 0xed, 0xb3, 0x77, 0x2f, 0x4f, 0x27, 0xaf, 0x5e, 0x74, 0x1a, 0x84, 0x64, 0x39,
|
||||
0xdb, 0xad, 0xa1, 0x67, 0xcb, 0x79, 0xf1, 0xae, 0x64, 0xef, 0xd3, 0x17, 0x6a, 0x7d, 0xff, 0x4a,
|
||||
0xeb, 0x99, 0x8f, 0xed, 0xcb, 0x15, 0x45, 0x57, 0x2b, 0x8a, 0x7e, 0xaf, 0x28, 0xfa, 0xbc, 0xa6,
|
||||
0xd6, 0xd5, 0x9a, 0x5a, 0x3f, 0xd7, 0xd4, 0x9a, 0xb6, 0xcc, 0xbf, 0x79, 0xfc, 0x27, 0x00, 0x00,
|
||||
0xff, 0xff, 0xfd, 0xd7, 0xd8, 0x37, 0x85, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *CacheRecord) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *CacheRecord) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *CacheRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Linkname) > 0 {
|
||||
i -= len(m.Linkname)
|
||||
copy(dAtA[i:], m.Linkname)
|
||||
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Linkname)))
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
if m.Type != 0 {
|
||||
i = encodeVarintChecksum(dAtA, i, uint64(m.Type))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Digest) > 0 {
|
||||
i -= len(m.Digest)
|
||||
copy(dAtA[i:], m.Digest)
|
||||
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Digest)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *CacheRecordWithPath) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *CacheRecordWithPath) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *CacheRecordWithPath) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Record != nil {
|
||||
{
|
||||
size, err := m.Record.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintChecksum(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.Path) > 0 {
|
||||
i -= len(m.Path)
|
||||
copy(dAtA[i:], m.Path)
|
||||
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Path)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *CacheRecords) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *CacheRecords) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *CacheRecords) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Paths) > 0 {
|
||||
for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintChecksum(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintChecksum(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovChecksum(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *CacheRecord) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Digest)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovChecksum(uint64(l))
|
||||
}
|
||||
if m.Type != 0 {
|
||||
n += 1 + sovChecksum(uint64(m.Type))
|
||||
}
|
||||
l = len(m.Linkname)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovChecksum(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *CacheRecordWithPath) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Path)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovChecksum(uint64(l))
|
||||
}
|
||||
if m.Record != nil {
|
||||
l = m.Record.Size()
|
||||
n += 1 + l + sovChecksum(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *CacheRecords) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Paths) > 0 {
|
||||
for _, e := range m.Paths {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovChecksum(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovChecksum(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozChecksum(x uint64) (n int) {
|
||||
return sovChecksum(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *CacheRecord) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: CacheRecord: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: CacheRecord: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
|
||||
}
|
||||
m.Type = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Type |= CacheRecordType(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Linkname = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipChecksum(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *CacheRecordWithPath) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: CacheRecordWithPath: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: CacheRecordWithPath: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Path = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Record == nil {
|
||||
m.Record = &CacheRecord{}
|
||||
}
|
||||
if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipChecksum(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *CacheRecords) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: CacheRecords: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: CacheRecords: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Paths = append(m.Paths, &CacheRecordWithPath{})
|
||||
if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipChecksum(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthChecksum
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipChecksum(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowChecksum
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthChecksum
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupChecksum
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthChecksum
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthChecksum = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowChecksum = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupChecksum = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
30
vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto
generated
vendored
Normal file
30
vendor/github.com/moby/buildkit/cache/contenthash/checksum.proto
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package contenthash;
|
||||
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
|
||||
enum CacheRecordType {
|
||||
option (gogoproto.goproto_enum_prefix) = false;
|
||||
option (gogoproto.enum_customname) = "CacheRecordType";
|
||||
|
||||
FILE = 0 [(gogoproto.enumvalue_customname) = "CacheRecordTypeFile"];
|
||||
DIR = 1 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDir"];
|
||||
DIR_HEADER = 2 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDirHeader"];
|
||||
SYMLINK = 3 [(gogoproto.enumvalue_customname) = "CacheRecordTypeSymlink"];
|
||||
}
|
||||
|
||||
message CacheRecord {
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
CacheRecordType type = 2;
|
||||
string linkname = 3;
|
||||
}
|
||||
|
||||
message CacheRecordWithPath {
|
||||
string path = 1;
|
||||
CacheRecord record = 2;
|
||||
}
|
||||
|
||||
message CacheRecords {
|
||||
repeated CacheRecordWithPath paths = 1;
|
||||
}
|
101
vendor/github.com/moby/buildkit/cache/contenthash/filehash.go
generated
vendored
Normal file
101
vendor/github.com/moby/buildkit/cache/contenthash/filehash.go
generated
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
package contenthash
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
)
|
||||
|
||||
// NewFileHash returns new hash that is used for the builder cache keys
|
||||
func NewFileHash(path string, fi os.FileInfo) (hash.Hash, error) {
|
||||
var link string
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
var err error
|
||||
link, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
stat := &fstypes.Stat{
|
||||
Mode: uint32(fi.Mode()),
|
||||
Size_: fi.Size(),
|
||||
ModTime: fi.ModTime().UnixNano(),
|
||||
Linkname: link,
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSymlink != 0 {
|
||||
stat.Mode = stat.Mode | 0777
|
||||
}
|
||||
|
||||
if err := setUnixOpt(path, fi, stat); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewFromStat(stat)
|
||||
}
|
||||
|
||||
func NewFromStat(stat *fstypes.Stat) (hash.Hash, error) {
|
||||
// Clear the socket bit since archive/tar.FileInfoHeader does not handle it
|
||||
stat.Mode &^= uint32(os.ModeSocket)
|
||||
|
||||
fi := &statInfo{stat}
|
||||
hdr, err := tar.FileInfoHeader(fi, stat.Linkname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr.Name = "" // note: empty name is different from current has in docker build. Name is added on recursive directory scan instead
|
||||
hdr.Mode = int64(chmodWindowsTarEntry(os.FileMode(hdr.Mode)))
|
||||
hdr.Devmajor = stat.Devmajor
|
||||
hdr.Devminor = stat.Devminor
|
||||
|
||||
if len(stat.Xattrs) > 0 {
|
||||
hdr.Xattrs = make(map[string]string, len(stat.Xattrs))
|
||||
for k, v := range stat.Xattrs {
|
||||
hdr.Xattrs[k] = string(v)
|
||||
}
|
||||
}
|
||||
// fmt.Printf("hdr: %#v\n", hdr)
|
||||
tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()}
|
||||
tsh.Reset() // initialize header
|
||||
return tsh, nil
|
||||
}
|
||||
|
||||
type tarsumHash struct {
|
||||
hash.Hash
|
||||
hdr *tar.Header
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (tsh *tarsumHash) Reset() {
|
||||
// comply with hash.Hash and reset to the state hash had before any writes
|
||||
tsh.Hash.Reset()
|
||||
WriteV1TarsumHeaders(tsh.hdr, tsh.Hash)
|
||||
}
|
||||
|
||||
type statInfo struct {
|
||||
*fstypes.Stat
|
||||
}
|
||||
|
||||
func (s *statInfo) Name() string {
|
||||
return filepath.Base(s.Stat.Path)
|
||||
}
|
||||
func (s *statInfo) Size() int64 {
|
||||
return s.Stat.Size_
|
||||
}
|
||||
func (s *statInfo) Mode() os.FileMode {
|
||||
return os.FileMode(s.Stat.Mode)
|
||||
}
|
||||
func (s *statInfo) ModTime() time.Time {
|
||||
return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9)
|
||||
}
|
||||
func (s *statInfo) IsDir() bool {
|
||||
return s.Mode().IsDir()
|
||||
}
|
||||
func (s *statInfo) Sys() interface{} {
|
||||
return s.Stat
|
||||
}
|
47
vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go
generated
vendored
Normal file
47
vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
// +build !windows
|
||||
|
||||
package contenthash
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func chmodWindowsTarEntry(perm os.FileMode) os.FileMode {
|
||||
return perm
|
||||
}
|
||||
|
||||
func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error {
|
||||
s := fi.Sys().(*syscall.Stat_t)
|
||||
|
||||
stat.Uid = s.Uid
|
||||
stat.Gid = s.Gid
|
||||
|
||||
if !fi.IsDir() {
|
||||
if s.Mode&syscall.S_IFBLK != 0 ||
|
||||
s.Mode&syscall.S_IFCHR != 0 {
|
||||
stat.Devmajor = int64(unix.Major(uint64(s.Rdev)))
|
||||
stat.Devminor = int64(unix.Minor(uint64(s.Rdev)))
|
||||
}
|
||||
}
|
||||
|
||||
attrs, err := sysx.LListxattr(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(attrs) > 0 {
|
||||
stat.Xattrs = map[string][]byte{}
|
||||
for _, attr := range attrs {
|
||||
v, err := sysx.LGetxattr(path, attr)
|
||||
if err == nil {
|
||||
stat.Xattrs[attr] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
23
vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go
generated
vendored
Normal file
23
vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
// +build windows
|
||||
|
||||
package contenthash
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
)
|
||||
|
||||
// chmodWindowsTarEntry is used to adjust the file permissions used in tar
|
||||
// header based on the platform the archival is done.
|
||||
func chmodWindowsTarEntry(perm os.FileMode) os.FileMode {
|
||||
perm &= 0755
|
||||
// Add the x bit: make everything +x from windows
|
||||
perm |= 0111
|
||||
|
||||
return perm
|
||||
}
|
||||
|
||||
func setUnixOpt(path string, fi os.FileInfo, stat *fstypes.Stat) error {
|
||||
return nil
|
||||
}
|
3
vendor/github.com/moby/buildkit/cache/contenthash/generate.go
generated
vendored
Normal file
3
vendor/github.com/moby/buildkit/cache/contenthash/generate.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
package contenthash
|
||||
|
||||
//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. checksum.proto
|
107
vendor/github.com/moby/buildkit/cache/contenthash/path.go
generated
vendored
Normal file
107
vendor/github.com/moby/buildkit/cache/contenthash/path.go
generated
vendored
Normal file
@@ -0,0 +1,107 @@
|
||||
package contenthash
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
errTooManyLinks = errors.New("too many links")
|
||||
)
|
||||
|
||||
type onSymlinkFunc func(string, string) error
|
||||
|
||||
// rootPath joins a path with a root, evaluating and bounding any
|
||||
// symlink to the root directory.
|
||||
// This is containerd/continuity/fs RootPath implementation with a callback on
|
||||
// resolving the symlink.
|
||||
func rootPath(root, path string, cb onSymlinkFunc) (string, error) {
|
||||
if path == "" {
|
||||
return root, nil
|
||||
}
|
||||
var linksWalked int // to protect against cycles
|
||||
for {
|
||||
i := linksWalked
|
||||
newpath, err := walkLinks(root, path, &linksWalked, cb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
path = newpath
|
||||
if i == linksWalked {
|
||||
newpath = filepath.Join("/", newpath)
|
||||
if path == newpath {
|
||||
return filepath.Join(root, newpath), nil
|
||||
}
|
||||
path = newpath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func walkLink(root, path string, linksWalked *int, cb onSymlinkFunc) (newpath string, islink bool, err error) {
|
||||
if *linksWalked > 255 {
|
||||
return "", false, errTooManyLinks
|
||||
}
|
||||
|
||||
path = filepath.Join("/", path)
|
||||
if path == "/" {
|
||||
return path, false, nil
|
||||
}
|
||||
realPath := filepath.Join(root, path)
|
||||
|
||||
fi, err := os.Lstat(realPath)
|
||||
if err != nil {
|
||||
// If path does not yet exist, treat as non-symlink
|
||||
if os.IsNotExist(err) {
|
||||
return path, false, nil
|
||||
}
|
||||
return "", false, err
|
||||
}
|
||||
if fi.Mode()&os.ModeSymlink == 0 {
|
||||
return path, false, nil
|
||||
}
|
||||
newpath, err = os.Readlink(realPath)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
if cb != nil {
|
||||
if err := cb(path, newpath); err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
}
|
||||
*linksWalked++
|
||||
return newpath, true, nil
|
||||
}
|
||||
|
||||
func walkLinks(root, path string, linksWalked *int, cb onSymlinkFunc) (string, error) {
|
||||
switch dir, file := filepath.Split(path); {
|
||||
case dir == "":
|
||||
newpath, _, err := walkLink(root, file, linksWalked, cb)
|
||||
return newpath, err
|
||||
case file == "":
|
||||
if os.IsPathSeparator(dir[len(dir)-1]) {
|
||||
if dir == "/" {
|
||||
return dir, nil
|
||||
}
|
||||
return walkLinks(root, dir[:len(dir)-1], linksWalked, cb)
|
||||
}
|
||||
newpath, _, err := walkLink(root, dir, linksWalked, cb)
|
||||
return newpath, err
|
||||
default:
|
||||
newdir, err := walkLinks(root, dir, linksWalked, cb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked, cb)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !islink {
|
||||
return newpath, nil
|
||||
}
|
||||
if filepath.IsAbs(newpath) {
|
||||
return newpath, nil
|
||||
}
|
||||
return filepath.Join(newdir, newpath), nil
|
||||
}
|
||||
}
|
63
vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go
generated
vendored
Normal file
63
vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package contenthash
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"io"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// WriteV1TarsumHeaders writes a tar header to a writer in V1 tarsum format.
|
||||
func WriteV1TarsumHeaders(h *tar.Header, w io.Writer) {
|
||||
for _, elem := range v1TarHeaderSelect(h) {
|
||||
w.Write([]byte(elem[0] + elem[1]))
|
||||
}
|
||||
}
|
||||
|
||||
// Functions below are from docker legacy tarsum implementation.
|
||||
// There is no valid technical reason to continue using them.
|
||||
|
||||
func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||
return [][2]string{
|
||||
{"name", h.Name},
|
||||
{"mode", strconv.FormatInt(h.Mode, 10)},
|
||||
{"uid", strconv.Itoa(h.Uid)},
|
||||
{"gid", strconv.Itoa(h.Gid)},
|
||||
{"size", strconv.FormatInt(h.Size, 10)},
|
||||
{"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)},
|
||||
{"typeflag", string([]byte{h.Typeflag})},
|
||||
{"linkname", h.Linkname},
|
||||
{"uname", h.Uname},
|
||||
{"gname", h.Gname},
|
||||
{"devmajor", strconv.FormatInt(h.Devmajor, 10)},
|
||||
{"devminor", strconv.FormatInt(h.Devminor, 10)},
|
||||
}
|
||||
}
|
||||
|
||||
func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) {
|
||||
// Get extended attributes.
|
||||
xAttrKeys := make([]string, len(h.Xattrs))
|
||||
for k := range h.Xattrs {
|
||||
if k == "security.capability" || !strings.HasPrefix(k, "security.") && !strings.HasPrefix(k, "system.") {
|
||||
xAttrKeys = append(xAttrKeys, k)
|
||||
}
|
||||
}
|
||||
sort.Strings(xAttrKeys)
|
||||
|
||||
// Make the slice with enough capacity to hold the 11 basic headers
|
||||
// we want from the v0 selector plus however many xattrs we have.
|
||||
orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys))
|
||||
|
||||
// Copy all headers from v0 excluding the 'mtime' header (the 5th element).
|
||||
v0headers := v0TarHeaderSelect(h)
|
||||
orderedHeaders = append(orderedHeaders, v0headers[0:5]...)
|
||||
orderedHeaders = append(orderedHeaders, v0headers[6:]...)
|
||||
|
||||
// Finally, append the sorted xattrs.
|
||||
for _, k := range xAttrKeys {
|
||||
orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]})
|
||||
}
|
||||
|
||||
return
|
||||
}
|
1092
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
Normal file
1092
vendor/github.com/moby/buildkit/cache/manager.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
501
vendor/github.com/moby/buildkit/cache/metadata.go
generated
vendored
Normal file
501
vendor/github.com/moby/buildkit/cache/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,501 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const sizeUnknown int64 = -1
|
||||
const keySize = "snapshot.size"
|
||||
const keyEqualMutable = "cache.equalMutable"
|
||||
const keyCachePolicy = "cache.cachePolicy"
|
||||
const keyDescription = "cache.description"
|
||||
const keyCreatedAt = "cache.createdAt"
|
||||
const keyLastUsedAt = "cache.lastUsedAt"
|
||||
const keyUsageCount = "cache.usageCount"
|
||||
const keyLayerType = "cache.layerType"
|
||||
const keyRecordType = "cache.recordType"
|
||||
const keyCommitted = "snapshot.committed"
|
||||
const keyParent = "cache.parent"
|
||||
const keyDiffID = "cache.diffID"
|
||||
const keyChainID = "cache.chainID"
|
||||
const keyBlobChainID = "cache.blobChainID"
|
||||
const keyBlob = "cache.blob"
|
||||
const keySnapshot = "cache.snapshot"
|
||||
const keyBlobOnly = "cache.blobonly"
|
||||
const keyMediaType = "cache.mediatype"
|
||||
|
||||
const keyDeleted = "cache.deleted"
|
||||
|
||||
func queueDiffID(si *metadata.StorageItem, str string) error {
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
v, err := metadata.NewValue(str)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create diffID value")
|
||||
}
|
||||
si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyDiffID, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getMediaType(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyMediaType)
|
||||
if v == nil {
|
||||
return si.ID()
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func queueMediaType(si *metadata.StorageItem, str string) error {
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
v, err := metadata.NewValue(str)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create mediaType value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyMediaType, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSnapshotID(si *metadata.StorageItem) string {
|
||||
v := si.Get(keySnapshot)
|
||||
if v == nil {
|
||||
return si.ID()
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func queueSnapshotID(si *metadata.StorageItem, str string) error {
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
v, err := metadata.NewValue(str)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create chainID value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keySnapshot, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDiffID(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyDiffID)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func queueChainID(si *metadata.StorageItem, str string) error {
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
v, err := metadata.NewValue(str)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create chainID value")
|
||||
}
|
||||
v.Index = "chainid:" + str
|
||||
si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyChainID, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBlobChainID(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyBlobChainID)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func queueBlobChainID(si *metadata.StorageItem, str string) error {
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
v, err := metadata.NewValue(str)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create chainID value")
|
||||
}
|
||||
v.Index = "blobchainid:" + str
|
||||
si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyBlobChainID, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getChainID(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyChainID)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func queueBlob(si *metadata.StorageItem, str string) error {
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
v, err := metadata.NewValue(str)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create blob value")
|
||||
}
|
||||
si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyBlob, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBlob(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyBlob)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func queueBlobOnly(si *metadata.StorageItem, b bool) error {
|
||||
v, err := metadata.NewValue(b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create blobonly value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyBlobOnly, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBlobOnly(si *metadata.StorageItem) bool {
|
||||
v := si.Get(keyBlobOnly)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
var blobOnly bool
|
||||
if err := v.Unmarshal(&blobOnly); err != nil {
|
||||
return false
|
||||
}
|
||||
return blobOnly
|
||||
}
|
||||
|
||||
func setDeleted(si *metadata.StorageItem) error {
|
||||
v, err := metadata.NewValue(true)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create deleted value")
|
||||
}
|
||||
si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyDeleted, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDeleted(si *metadata.StorageItem) bool {
|
||||
v := si.Get(keyDeleted)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
var deleted bool
|
||||
if err := v.Unmarshal(&deleted); err != nil {
|
||||
return false
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
func queueCommitted(si *metadata.StorageItem) error {
|
||||
v, err := metadata.NewValue(true)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create committed value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyCommitted, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCommitted(si *metadata.StorageItem) bool {
|
||||
v := si.Get(keyCommitted)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
var committed bool
|
||||
if err := v.Unmarshal(&committed); err != nil {
|
||||
return false
|
||||
}
|
||||
return committed
|
||||
}
|
||||
|
||||
func queueParent(si *metadata.StorageItem, parent string) error {
|
||||
if parent == "" {
|
||||
return nil
|
||||
}
|
||||
v, err := metadata.NewValue(parent)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create parent value")
|
||||
}
|
||||
si.Update(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyParent, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getParent(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyParent)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var parent string
|
||||
if err := v.Unmarshal(&parent); err != nil {
|
||||
return ""
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func setSize(si *metadata.StorageItem, s int64) error {
|
||||
v, err := metadata.NewValue(s)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create size value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keySize, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSize(si *metadata.StorageItem) int64 {
|
||||
v := si.Get(keySize)
|
||||
if v == nil {
|
||||
return sizeUnknown
|
||||
}
|
||||
var size int64
|
||||
if err := v.Unmarshal(&size); err != nil {
|
||||
return sizeUnknown
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func getEqualMutable(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyEqualMutable)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func setEqualMutable(si *metadata.StorageItem, s string) error {
|
||||
v, err := metadata.NewValue(s)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create %s meta value", keyEqualMutable)
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyEqualMutable, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func clearEqualMutable(si *metadata.StorageItem) error {
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyEqualMutable, nil)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func queueCachePolicy(si *metadata.StorageItem, p cachePolicy) error {
|
||||
v, err := metadata.NewValue(p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create cachePolicy value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyCachePolicy, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCachePolicy(si *metadata.StorageItem) cachePolicy {
|
||||
v := si.Get(keyCachePolicy)
|
||||
if v == nil {
|
||||
return cachePolicyDefault
|
||||
}
|
||||
var p cachePolicy
|
||||
if err := v.Unmarshal(&p); err != nil {
|
||||
return cachePolicyDefault
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func queueDescription(si *metadata.StorageItem, descr string) error {
|
||||
v, err := metadata.NewValue(descr)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create description value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyDescription, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetDescription(si *metadata.StorageItem) string {
|
||||
v := si.Get(keyDescription)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func queueCreatedAt(si *metadata.StorageItem, tm time.Time) error {
|
||||
v, err := metadata.NewValue(tm.UnixNano())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create createdAt value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyCreatedAt, v)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetCreatedAt(si *metadata.StorageItem) time.Time {
|
||||
v := si.Get(keyCreatedAt)
|
||||
if v == nil {
|
||||
return time.Time{}
|
||||
}
|
||||
var tm int64
|
||||
if err := v.Unmarshal(&tm); err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(tm/1e9, tm%1e9)
|
||||
}
|
||||
|
||||
func getLastUsed(si *metadata.StorageItem) (int, *time.Time) {
|
||||
v := si.Get(keyUsageCount)
|
||||
if v == nil {
|
||||
return 0, nil
|
||||
}
|
||||
var usageCount int
|
||||
if err := v.Unmarshal(&usageCount); err != nil {
|
||||
return 0, nil
|
||||
}
|
||||
v = si.Get(keyLastUsedAt)
|
||||
if v == nil {
|
||||
return usageCount, nil
|
||||
}
|
||||
var lastUsedTs int64
|
||||
if err := v.Unmarshal(&lastUsedTs); err != nil || lastUsedTs == 0 {
|
||||
return usageCount, nil
|
||||
}
|
||||
tm := time.Unix(lastUsedTs/1e9, lastUsedTs%1e9)
|
||||
return usageCount, &tm
|
||||
}
|
||||
|
||||
func updateLastUsed(si *metadata.StorageItem) error {
|
||||
count, _ := getLastUsed(si)
|
||||
count++
|
||||
|
||||
v, err := metadata.NewValue(count)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create usageCount value")
|
||||
}
|
||||
v2, err := metadata.NewValue(time.Now().UnixNano())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create lastUsedAt value")
|
||||
}
|
||||
return si.Update(func(b *bolt.Bucket) error {
|
||||
if err := si.SetValue(b, keyUsageCount, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return si.SetValue(b, keyLastUsedAt, v2)
|
||||
})
|
||||
}
|
||||
|
||||
func SetLayerType(m withMetadata, value string) error {
|
||||
v, err := metadata.NewValue(value)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create layertype value")
|
||||
}
|
||||
m.Metadata().Queue(func(b *bolt.Bucket) error {
|
||||
return m.Metadata().SetValue(b, keyLayerType, v)
|
||||
})
|
||||
return m.Metadata().Commit()
|
||||
}
|
||||
|
||||
func GetLayerType(m withMetadata) string {
|
||||
v := m.Metadata().Get(keyLayerType)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func GetRecordType(m withMetadata) client.UsageRecordType {
|
||||
v := m.Metadata().Get(keyRecordType)
|
||||
if v == nil {
|
||||
return ""
|
||||
}
|
||||
var str string
|
||||
if err := v.Unmarshal(&str); err != nil {
|
||||
return ""
|
||||
}
|
||||
return client.UsageRecordType(str)
|
||||
}
|
||||
|
||||
func SetRecordType(m withMetadata, value client.UsageRecordType) error {
|
||||
if err := queueRecordType(m.Metadata(), value); err != nil {
|
||||
return err
|
||||
}
|
||||
return m.Metadata().Commit()
|
||||
}
|
||||
|
||||
func queueRecordType(si *metadata.StorageItem, value client.UsageRecordType) error {
|
||||
v, err := metadata.NewValue(value)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create recordtype value")
|
||||
}
|
||||
si.Queue(func(b *bolt.Bucket) error {
|
||||
return si.SetValue(b, keyRecordType, v)
|
||||
})
|
||||
return nil
|
||||
}
|
394
vendor/github.com/moby/buildkit/cache/metadata/metadata.go
generated
vendored
Normal file
394
vendor/github.com/moby/buildkit/cache/metadata/metadata.go
generated
vendored
Normal file
@@ -0,0 +1,394 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
mainBucket = "_main"
|
||||
indexBucket = "_index"
|
||||
externalBucket = "_external"
|
||||
)
|
||||
|
||||
var errNotFound = errors.Errorf("not found")
|
||||
|
||||
type Store struct {
|
||||
db *bolt.DB
|
||||
}
|
||||
|
||||
func NewStore(dbPath string) (*Store, error) {
|
||||
db, err := bolt.Open(dbPath, 0600, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open database file %s", dbPath)
|
||||
}
|
||||
return &Store{db: db}, nil
|
||||
}
|
||||
|
||||
func (s *Store) DB() *bolt.DB {
|
||||
return s.db
|
||||
}
|
||||
|
||||
func (s *Store) All() ([]*StorageItem, error) {
|
||||
var out []*StorageItem
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(mainBucket))
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
return b.ForEach(func(key, _ []byte) error {
|
||||
b := b.Bucket(key)
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
si, err := newStorageItem(string(key), b, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out = append(out, si)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
return out, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (s *Store) Probe(index string) (bool, error) {
|
||||
var exists bool
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(indexBucket))
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
main := tx.Bucket([]byte(mainBucket))
|
||||
if main == nil {
|
||||
return nil
|
||||
}
|
||||
search := []byte(indexKey(index, ""))
|
||||
c := b.Cursor()
|
||||
k, _ := c.Seek(search)
|
||||
if k != nil && bytes.HasPrefix(k, search) {
|
||||
exists = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return exists, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (s *Store) Search(index string) ([]*StorageItem, error) {
|
||||
var out []*StorageItem
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(indexBucket))
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
main := tx.Bucket([]byte(mainBucket))
|
||||
if main == nil {
|
||||
return nil
|
||||
}
|
||||
index = indexKey(index, "")
|
||||
c := b.Cursor()
|
||||
k, _ := c.Seek([]byte(index))
|
||||
for {
|
||||
if k != nil && strings.HasPrefix(string(k), index) {
|
||||
itemID := strings.TrimPrefix(string(k), index)
|
||||
k, _ = c.Next()
|
||||
b := main.Bucket([]byte(itemID))
|
||||
if b == nil {
|
||||
logrus.Errorf("index pointing to missing record %s", itemID)
|
||||
continue
|
||||
}
|
||||
si, err := newStorageItem(itemID, b, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out = append(out, si)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return out, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (s *Store) View(id string, fn func(b *bolt.Bucket) error) error {
|
||||
return s.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(mainBucket))
|
||||
if b == nil {
|
||||
return errors.WithStack(errNotFound)
|
||||
}
|
||||
b = b.Bucket([]byte(id))
|
||||
if b == nil {
|
||||
return errors.WithStack(errNotFound)
|
||||
}
|
||||
return fn(b)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Store) Clear(id string) error {
|
||||
return errors.WithStack(s.db.Update(func(tx *bolt.Tx) error {
|
||||
external := tx.Bucket([]byte(externalBucket))
|
||||
if external != nil {
|
||||
external.DeleteBucket([]byte(id))
|
||||
}
|
||||
main := tx.Bucket([]byte(mainBucket))
|
||||
if main == nil {
|
||||
return nil
|
||||
}
|
||||
b := main.Bucket([]byte(id))
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
si, err := newStorageItem(id, b, s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if indexes := si.Indexes(); len(indexes) > 0 {
|
||||
b := tx.Bucket([]byte(indexBucket))
|
||||
if b != nil {
|
||||
for _, index := range indexes {
|
||||
if err := b.Delete([]byte(indexKey(index, id))); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return main.DeleteBucket([]byte(id))
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *Store) Update(id string, fn func(b *bolt.Bucket) error) error {
|
||||
return errors.WithStack(s.db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists([]byte(mainBucket))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(id))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return fn(b)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *Store) Get(id string) (*StorageItem, bool) {
|
||||
empty := func() *StorageItem {
|
||||
si, _ := newStorageItem(id, nil, s)
|
||||
return si
|
||||
}
|
||||
tx, err := s.db.Begin(false)
|
||||
if err != nil {
|
||||
return empty(), false
|
||||
}
|
||||
defer tx.Rollback()
|
||||
b := tx.Bucket([]byte(mainBucket))
|
||||
if b == nil {
|
||||
return empty(), false
|
||||
}
|
||||
b = b.Bucket([]byte(id))
|
||||
if b == nil {
|
||||
return empty(), false
|
||||
}
|
||||
si, _ := newStorageItem(id, b, s)
|
||||
return si, true
|
||||
}
|
||||
|
||||
func (s *Store) Close() error {
|
||||
return errors.WithStack(s.db.Close())
|
||||
}
|
||||
|
||||
type StorageItem struct {
|
||||
id string
|
||||
values map[string]*Value
|
||||
queue []func(*bolt.Bucket) error
|
||||
storage *Store
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func newStorageItem(id string, b *bolt.Bucket, s *Store) (*StorageItem, error) {
|
||||
si := &StorageItem{
|
||||
id: id,
|
||||
storage: s,
|
||||
values: make(map[string]*Value),
|
||||
}
|
||||
if b != nil {
|
||||
if err := b.ForEach(func(k, v []byte) error {
|
||||
var sv Value
|
||||
if len(v) > 0 {
|
||||
if err := json.Unmarshal(v, &sv); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
si.values[string(k)] = &sv
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return si, errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
return si, nil
|
||||
}
|
||||
|
||||
func (s *StorageItem) Storage() *Store { // TODO: used in local source. how to remove this?
|
||||
return s.storage
|
||||
}
|
||||
|
||||
func (s *StorageItem) ID() string {
|
||||
return s.id
|
||||
}
|
||||
|
||||
func (s *StorageItem) View(fn func(b *bolt.Bucket) error) error {
|
||||
return s.storage.View(s.id, fn)
|
||||
}
|
||||
|
||||
func (s *StorageItem) Update(fn func(b *bolt.Bucket) error) error {
|
||||
return s.storage.Update(s.id, fn)
|
||||
}
|
||||
|
||||
func (s *StorageItem) Metadata() *StorageItem {
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *StorageItem) Keys() []string {
|
||||
keys := make([]string, 0, len(s.values))
|
||||
for k := range s.values {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func (s *StorageItem) Get(k string) *Value {
|
||||
s.mu.RLock()
|
||||
v := s.values[k]
|
||||
s.mu.RUnlock()
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *StorageItem) GetExternal(k string) ([]byte, error) {
|
||||
var dt []byte
|
||||
err := s.storage.db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte(externalBucket))
|
||||
if b == nil {
|
||||
return errors.WithStack(errNotFound)
|
||||
}
|
||||
b = b.Bucket([]byte(s.id))
|
||||
if b == nil {
|
||||
return errors.WithStack(errNotFound)
|
||||
}
|
||||
dt = b.Get([]byte(k))
|
||||
if dt == nil {
|
||||
return errors.WithStack(errNotFound)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return dt, nil
|
||||
}
|
||||
|
||||
func (s *StorageItem) SetExternal(k string, dt []byte) error {
|
||||
return errors.WithStack(s.storage.db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists([]byte(externalBucket))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(s.id))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return b.Put([]byte(k), dt)
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *StorageItem) Queue(fn func(b *bolt.Bucket) error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.queue = append(s.queue, fn)
|
||||
}
|
||||
|
||||
func (s *StorageItem) Commit() error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return errors.WithStack(s.Update(func(b *bolt.Bucket) error {
|
||||
for _, fn := range s.queue {
|
||||
if err := fn(b); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
s.queue = s.queue[:0]
|
||||
return nil
|
||||
}))
|
||||
}
|
||||
|
||||
func (s *StorageItem) Indexes() (out []string) {
|
||||
for _, v := range s.values {
|
||||
if v.Index != "" {
|
||||
out = append(out, v.Index)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error {
|
||||
if v == nil {
|
||||
if old, ok := s.values[key]; ok {
|
||||
if old.Index != "" {
|
||||
b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
b.Delete([]byte(indexKey(old.Index, s.ID()))) // ignore error
|
||||
}
|
||||
}
|
||||
if err := b.Put([]byte(key), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(s.values, key)
|
||||
return nil
|
||||
}
|
||||
dt, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := b.Put([]byte(key), dt); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if v.Index != "" {
|
||||
b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := b.Put([]byte(indexKey(v.Index, s.ID())), []byte{}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
s.values[key] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
type Value struct {
|
||||
Value json.RawMessage `json:"value,omitempty"`
|
||||
Index string `json:"index,omitempty"`
|
||||
}
|
||||
|
||||
func NewValue(v interface{}) (*Value, error) {
|
||||
dt, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return &Value{Value: json.RawMessage(dt)}, nil
|
||||
}
|
||||
|
||||
func (v *Value) Unmarshal(target interface{}) error {
|
||||
return errors.WithStack(json.Unmarshal(v.Value, target))
|
||||
}
|
||||
|
||||
func indexKey(index, target string) string {
|
||||
return index + "::" + target
|
||||
}
|
257
vendor/github.com/moby/buildkit/cache/migrate_v2.go
generated
vendored
Normal file
257
vendor/github.com/moby/buildkit/cache/migrate_v2.go
generated
vendored
Normal file
@@ -0,0 +1,257 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func migrateChainID(si *metadata.StorageItem, all map[string]*metadata.StorageItem) (digest.Digest, digest.Digest, error) {
|
||||
diffID := digest.Digest(getDiffID(si))
|
||||
if diffID == "" {
|
||||
return "", "", nil
|
||||
}
|
||||
blobID := digest.Digest(getBlob(si))
|
||||
if blobID == "" {
|
||||
return "", "", nil
|
||||
}
|
||||
chainID := digest.Digest(getChainID(si))
|
||||
blobChainID := digest.Digest(getBlobChainID(si))
|
||||
|
||||
if chainID != "" && blobChainID != "" {
|
||||
return chainID, blobChainID, nil
|
||||
}
|
||||
|
||||
chainID = diffID
|
||||
blobChainID = digest.FromBytes([]byte(blobID + " " + diffID))
|
||||
|
||||
parent := getParent(si)
|
||||
if parent != "" {
|
||||
pChainID, pBlobChainID, err := migrateChainID(all[parent], all)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
chainID = digest.FromBytes([]byte(pChainID + " " + chainID))
|
||||
blobChainID = digest.FromBytes([]byte(pBlobChainID + " " + blobChainID))
|
||||
}
|
||||
|
||||
queueChainID(si, chainID.String())
|
||||
queueBlobChainID(si, blobChainID.String())
|
||||
|
||||
return chainID, blobChainID, si.Commit()
|
||||
}
|
||||
|
||||
func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapshot.Snapshotter, lm leases.Manager) error {
|
||||
_, err := os.Stat(to)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(errors.Cause(err)) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err = os.Stat(from)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(errors.Cause(err)) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
tmpPath := to + ".tmp"
|
||||
tmpFile, err := os.Create(tmpPath)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
src, err := os.Open(from)
|
||||
if err != nil {
|
||||
tmpFile.Close()
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if _, err = io.Copy(tmpFile, src); err != nil {
|
||||
tmpFile.Close()
|
||||
src.Close()
|
||||
return errors.Wrapf(err, "failed to copy db for migration")
|
||||
}
|
||||
src.Close()
|
||||
tmpFile.Close()
|
||||
|
||||
md, err := metadata.NewStore(tmpPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
items, err := md.All()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
byID := map[string]*metadata.StorageItem{}
|
||||
for _, item := range items {
|
||||
byID[item.ID()] = item
|
||||
}
|
||||
|
||||
// add committed, parent, snapshot
|
||||
for id, item := range byID {
|
||||
em := getEqualMutable(item)
|
||||
if em == "" {
|
||||
info, err := s.Stat(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Kind == snapshots.KindCommitted {
|
||||
queueCommitted(item)
|
||||
}
|
||||
if info.Parent != "" {
|
||||
queueParent(item, info.Parent)
|
||||
}
|
||||
} else {
|
||||
queueCommitted(item)
|
||||
}
|
||||
queueSnapshotID(item, id)
|
||||
item.Commit()
|
||||
}
|
||||
|
||||
for _, item := range byID {
|
||||
em := getEqualMutable(item)
|
||||
if em != "" {
|
||||
if getParent(item) == "" {
|
||||
queueParent(item, getParent(byID[em]))
|
||||
item.Commit()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type diffPair struct {
|
||||
Blobsum string
|
||||
DiffID string
|
||||
}
|
||||
// move diffID, blobsum to new location
|
||||
for _, item := range byID {
|
||||
v := item.Get("blobmapping.blob")
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
var blob diffPair
|
||||
if err := v.Unmarshal(&blob); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if _, err := cs.Info(ctx, digest.Digest(blob.Blobsum)); err != nil {
|
||||
continue
|
||||
}
|
||||
queueDiffID(item, blob.DiffID)
|
||||
queueBlob(item, blob.Blobsum)
|
||||
queueMediaType(item, images.MediaTypeDockerSchema2LayerGzip)
|
||||
if err := item.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// calculate new chainid/blobsumid
|
||||
for _, item := range byID {
|
||||
if _, _, err := migrateChainID(item, byID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx = context.TODO() // no cancellation allowed pass this point
|
||||
|
||||
// add new leases
|
||||
for _, item := range byID {
|
||||
l, err := lm.Create(ctx, func(l *leases.Lease) error {
|
||||
l.ID = item.ID()
|
||||
l.Labels = map[string]string{
|
||||
"containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
// if we are running the migration twice
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
continue
|
||||
}
|
||||
return errors.Wrap(err, "failed to create lease")
|
||||
}
|
||||
|
||||
if err := lm.AddResource(ctx, l, leases.Resource{
|
||||
ID: getSnapshotID(item),
|
||||
Type: "snapshots/" + s.Name(),
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "failed to add snapshot %s to lease", item.ID())
|
||||
}
|
||||
|
||||
if blobID := getBlob(item); blobID != "" {
|
||||
if err := lm.AddResource(ctx, l, leases.Resource{
|
||||
ID: blobID,
|
||||
Type: "content",
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "failed to add blob %s to lease", item.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// remove old root labels
|
||||
for _, item := range byID {
|
||||
if _, err := s.Update(ctx, snapshots.Info{
|
||||
Name: getSnapshotID(item),
|
||||
}, "labels.containerd.io/gc.root"); err != nil {
|
||||
if !errdefs.IsNotFound(errors.Cause(err)) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if blob := getBlob(item); blob != "" {
|
||||
if _, err := cs.Update(ctx, content.Info{
|
||||
Digest: digest.Digest(blob),
|
||||
}, "labels.containerd.io/gc.root"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// previous implementation can leak views, just clean up all views
|
||||
err = s.Walk(ctx, func(ctx context.Context, info snapshots.Info) error {
|
||||
if info.Kind == snapshots.KindView {
|
||||
if _, err := s.Update(ctx, snapshots.Info{
|
||||
Name: info.Name,
|
||||
}, "labels.containerd.io/gc.root"); err != nil {
|
||||
if !errdefs.IsNotFound(errors.Cause(err)) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// switch to new DB
|
||||
if err := md.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Rename(tmpPath, to); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, item := range byID {
|
||||
logrus.Infof("migrated %s parent:%q snapshot:%v committed:%v blob:%v diffid:%v chainID:%v blobChainID:%v",
|
||||
item.ID(), getParent(item), getSnapshotID(item), getCommitted(item), getBlob(item), getDiffID(item), getChainID(item), getBlobChainID(item))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
693
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
Normal file
693
vendor/github.com/moby/buildkit/cache/refs.go
generated
vendored
Normal file
@@ -0,0 +1,693 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/leases"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/util/leaseutil"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imagespecidentity "github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Ref is a reference to cacheable objects.
|
||||
type Ref interface {
|
||||
Mountable
|
||||
ID() string
|
||||
Release(context.Context) error
|
||||
Size(ctx context.Context) (int64, error)
|
||||
Metadata() *metadata.StorageItem
|
||||
IdentityMapping() *idtools.IdentityMapping
|
||||
}
|
||||
|
||||
type ImmutableRef interface {
|
||||
Ref
|
||||
Parent() ImmutableRef
|
||||
Finalize(ctx context.Context, commit bool) error // Make sure reference is flushed to driver
|
||||
Clone() ImmutableRef
|
||||
|
||||
Info() RefInfo
|
||||
SetBlob(ctx context.Context, desc ocispec.Descriptor) error
|
||||
Extract(ctx context.Context) error // +progress
|
||||
}
|
||||
|
||||
type RefInfo struct {
|
||||
SnapshotID string
|
||||
ChainID digest.Digest
|
||||
BlobChainID digest.Digest
|
||||
DiffID digest.Digest
|
||||
Blob digest.Digest
|
||||
MediaType string
|
||||
Extracted bool
|
||||
}
|
||||
|
||||
type MutableRef interface {
|
||||
Ref
|
||||
Commit(context.Context) (ImmutableRef, error)
|
||||
}
|
||||
|
||||
type Mountable interface {
|
||||
Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error)
|
||||
}
|
||||
|
||||
type ref interface {
|
||||
updateLastUsed() bool
|
||||
}
|
||||
|
||||
type cacheRecord struct {
|
||||
cm *cacheManager
|
||||
mu *sync.Mutex // the mutex is shared by records sharing data
|
||||
|
||||
mutable bool
|
||||
refs map[ref]struct{}
|
||||
parent *immutableRef
|
||||
md *metadata.StorageItem
|
||||
|
||||
// dead means record is marked as deleted
|
||||
dead bool
|
||||
|
||||
view string
|
||||
viewMount snapshot.Mountable
|
||||
|
||||
sizeG flightcontrol.Group
|
||||
|
||||
// these are filled if multiple refs point to same data
|
||||
equalMutable *mutableRef
|
||||
equalImmutable *immutableRef
|
||||
|
||||
parentChainCache []digest.Digest
|
||||
}
|
||||
|
||||
// hold ref lock before calling
|
||||
func (cr *cacheRecord) ref(triggerLastUsed bool) *immutableRef {
|
||||
ref := &immutableRef{cacheRecord: cr, triggerLastUsed: triggerLastUsed}
|
||||
cr.refs[ref] = struct{}{}
|
||||
return ref
|
||||
}
|
||||
|
||||
// hold ref lock before calling
|
||||
func (cr *cacheRecord) mref(triggerLastUsed bool) *mutableRef {
|
||||
ref := &mutableRef{cacheRecord: cr, triggerLastUsed: triggerLastUsed}
|
||||
cr.refs[ref] = struct{}{}
|
||||
return ref
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) parentChain() []digest.Digest {
|
||||
if cr.parentChainCache != nil {
|
||||
return cr.parentChainCache
|
||||
}
|
||||
blob := getBlob(cr.md)
|
||||
if blob == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
var parent []digest.Digest
|
||||
if cr.parent != nil {
|
||||
parent = cr.parent.parentChain()
|
||||
}
|
||||
pcc := make([]digest.Digest, len(parent)+1)
|
||||
copy(pcc, parent)
|
||||
pcc[len(parent)] = digest.Digest(blob)
|
||||
cr.parentChainCache = pcc
|
||||
return pcc
|
||||
}
|
||||
|
||||
// hold ref lock before calling
|
||||
func (cr *cacheRecord) isDead() bool {
|
||||
return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead)
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) IdentityMapping() *idtools.IdentityMapping {
|
||||
return cr.cm.IdentityMapping()
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) Size(ctx context.Context) (int64, error) {
|
||||
// this expects that usage() is implemented lazily
|
||||
s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) {
|
||||
cr.mu.Lock()
|
||||
s := getSize(cr.md)
|
||||
if s != sizeUnknown {
|
||||
cr.mu.Unlock()
|
||||
return s, nil
|
||||
}
|
||||
driverID := getSnapshotID(cr.md)
|
||||
if cr.equalMutable != nil {
|
||||
driverID = getSnapshotID(cr.equalMutable.md)
|
||||
}
|
||||
cr.mu.Unlock()
|
||||
var usage snapshots.Usage
|
||||
if !getBlobOnly(cr.md) {
|
||||
var err error
|
||||
usage, err = cr.cm.ManagerOpt.Snapshotter.Usage(ctx, driverID)
|
||||
if err != nil {
|
||||
cr.mu.Lock()
|
||||
isDead := cr.isDead()
|
||||
cr.mu.Unlock()
|
||||
if isDead {
|
||||
return int64(0), nil
|
||||
}
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return s, errors.Wrapf(err, "failed to get usage for %s", cr.ID())
|
||||
}
|
||||
}
|
||||
}
|
||||
if dgst := getBlob(cr.md); dgst != "" {
|
||||
info, err := cr.cm.ContentStore.Info(ctx, digest.Digest(dgst))
|
||||
if err == nil {
|
||||
usage.Size += info.Size
|
||||
}
|
||||
}
|
||||
cr.mu.Lock()
|
||||
setSize(cr.md, usage.Size)
|
||||
if err := cr.md.Commit(); err != nil {
|
||||
cr.mu.Unlock()
|
||||
return s, err
|
||||
}
|
||||
cr.mu.Unlock()
|
||||
return usage.Size, nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return s.(int64), nil
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) Parent() ImmutableRef {
|
||||
if p := cr.parentRef(true); p != nil { // avoid returning typed nil pointer
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) parentRef(hidden bool) *immutableRef {
|
||||
p := cr.parent
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
return p.ref(hidden)
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
|
||||
cr.mu.Lock()
|
||||
defer cr.mu.Unlock()
|
||||
|
||||
if cr.mutable {
|
||||
m, err := cr.cm.Snapshotter.Mounts(ctx, getSnapshotID(cr.md))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to mount %s", cr.ID())
|
||||
}
|
||||
if readonly {
|
||||
m = setReadonly(m)
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if cr.equalMutable != nil && readonly {
|
||||
m, err := cr.cm.Snapshotter.Mounts(ctx, getSnapshotID(cr.equalMutable.md))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to mount %s", cr.equalMutable.ID())
|
||||
}
|
||||
return setReadonly(m), nil
|
||||
}
|
||||
|
||||
if err := cr.finalize(ctx, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cr.viewMount == nil { // TODO: handle this better
|
||||
view := identity.NewID()
|
||||
l, err := cr.cm.LeaseManager.Create(ctx, func(l *leases.Lease) error {
|
||||
l.ID = view
|
||||
l.Labels = map[string]string{
|
||||
"containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano),
|
||||
}
|
||||
return nil
|
||||
}, leaseutil.MakeTemporary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx = leases.WithLease(ctx, l.ID)
|
||||
m, err := cr.cm.Snapshotter.View(ctx, view, getSnapshotID(cr.md))
|
||||
if err != nil {
|
||||
cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: l.ID})
|
||||
return nil, errors.Wrapf(err, "failed to mount %s", cr.ID())
|
||||
}
|
||||
cr.view = view
|
||||
cr.viewMount = m
|
||||
}
|
||||
return cr.viewMount, nil
|
||||
}
|
||||
|
||||
// call when holding the manager lock
|
||||
func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error {
|
||||
delete(cr.cm.records, cr.ID())
|
||||
if cr.parent != nil {
|
||||
cr.parent.mu.Lock()
|
||||
err := cr.parent.release(ctx)
|
||||
cr.parent.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if removeSnapshot {
|
||||
if err := cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.ID()}); err != nil {
|
||||
return errors.Wrapf(err, "failed to remove %s", cr.ID())
|
||||
}
|
||||
}
|
||||
if err := cr.cm.md.Clear(cr.ID()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) ID() string {
|
||||
return cr.md.ID()
|
||||
}
|
||||
|
||||
type immutableRef struct {
|
||||
*cacheRecord
|
||||
triggerLastUsed bool
|
||||
}
|
||||
|
||||
type mutableRef struct {
|
||||
*cacheRecord
|
||||
triggerLastUsed bool
|
||||
}
|
||||
|
||||
func (sr *immutableRef) Clone() ImmutableRef {
|
||||
sr.mu.Lock()
|
||||
ref := sr.ref(false)
|
||||
sr.mu.Unlock()
|
||||
return ref
|
||||
}
|
||||
|
||||
func (sr *immutableRef) Info() RefInfo {
|
||||
return RefInfo{
|
||||
ChainID: digest.Digest(getChainID(sr.md)),
|
||||
DiffID: digest.Digest(getDiffID(sr.md)),
|
||||
Blob: digest.Digest(getBlob(sr.md)),
|
||||
MediaType: getMediaType(sr.md),
|
||||
BlobChainID: digest.Digest(getBlobChainID(sr.md)),
|
||||
SnapshotID: getSnapshotID(sr.md),
|
||||
Extracted: !getBlobOnly(sr.md),
|
||||
}
|
||||
}
|
||||
|
||||
func (sr *immutableRef) Extract(ctx context.Context) error {
|
||||
_, err := sr.sizeG.Do(ctx, sr.ID()+"-extract", func(ctx context.Context) (interface{}, error) {
|
||||
snapshotID := getSnapshotID(sr.md)
|
||||
if _, err := sr.cm.Snapshotter.Stat(ctx, snapshotID); err == nil {
|
||||
queueBlobOnly(sr.md, false)
|
||||
return nil, sr.md.Commit()
|
||||
}
|
||||
|
||||
parentID := ""
|
||||
if sr.parent != nil {
|
||||
if err := sr.parent.Extract(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parentID = getSnapshotID(sr.parent.md)
|
||||
}
|
||||
info := sr.Info()
|
||||
key := fmt.Sprintf("extract-%s %s", identity.NewID(), info.ChainID)
|
||||
|
||||
err := sr.cm.Snapshotter.Prepare(ctx, key, parentID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mountable, err := sr.cm.Snapshotter.Mounts(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts, unmount, err := mountable.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = sr.cm.Applier.Apply(ctx, ocispec.Descriptor{
|
||||
Digest: info.Blob,
|
||||
MediaType: info.MediaType,
|
||||
}, mounts)
|
||||
if err != nil {
|
||||
unmount()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := unmount(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := sr.cm.Snapshotter.Commit(ctx, getSnapshotID(sr.md), key); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
queueBlobOnly(sr.md, false)
|
||||
if err := sr.md.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// SetBlob associates a blob with the cache record.
|
||||
// A lease must be held for the blob when calling this function
|
||||
// Caller should call Info() for knowing what current values are actually set
|
||||
func (sr *immutableRef) SetBlob(ctx context.Context, desc ocispec.Descriptor) error {
|
||||
diffID, err := diffIDFromDescriptor(desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := sr.cm.ContentStore.Info(ctx, desc.Digest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sr.mu.Lock()
|
||||
defer sr.mu.Unlock()
|
||||
|
||||
if getChainID(sr.md) != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := sr.finalize(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := sr.parent
|
||||
var parentChainID digest.Digest
|
||||
var parentBlobChainID digest.Digest
|
||||
if p != nil {
|
||||
pInfo := p.Info()
|
||||
if pInfo.ChainID == "" || pInfo.BlobChainID == "" {
|
||||
return errors.Errorf("failed to set blob for reference with non-addressable parent")
|
||||
}
|
||||
parentChainID = pInfo.ChainID
|
||||
parentBlobChainID = pInfo.BlobChainID
|
||||
}
|
||||
|
||||
if err := sr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: sr.ID()}, leases.Resource{
|
||||
ID: desc.Digest.String(),
|
||||
Type: "content",
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queueDiffID(sr.md, diffID.String())
|
||||
queueBlob(sr.md, desc.Digest.String())
|
||||
chainID := diffID
|
||||
blobChainID := imagespecidentity.ChainID([]digest.Digest{desc.Digest, diffID})
|
||||
if parentChainID != "" {
|
||||
chainID = imagespecidentity.ChainID([]digest.Digest{parentChainID, chainID})
|
||||
blobChainID = imagespecidentity.ChainID([]digest.Digest{parentBlobChainID, blobChainID})
|
||||
}
|
||||
queueChainID(sr.md, chainID.String())
|
||||
queueBlobChainID(sr.md, blobChainID.String())
|
||||
queueMediaType(sr.md, desc.MediaType)
|
||||
if err := sr.md.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sr *immutableRef) Release(ctx context.Context) error {
|
||||
sr.cm.mu.Lock()
|
||||
defer sr.cm.mu.Unlock()
|
||||
|
||||
sr.mu.Lock()
|
||||
defer sr.mu.Unlock()
|
||||
|
||||
return sr.release(ctx)
|
||||
}
|
||||
|
||||
func (sr *immutableRef) updateLastUsed() bool {
|
||||
return sr.triggerLastUsed
|
||||
}
|
||||
|
||||
func (sr *immutableRef) updateLastUsedNow() bool {
|
||||
if !sr.triggerLastUsed {
|
||||
return false
|
||||
}
|
||||
for r := range sr.refs {
|
||||
if r.updateLastUsed() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (sr *immutableRef) release(ctx context.Context) error {
|
||||
delete(sr.refs, sr)
|
||||
|
||||
if sr.updateLastUsedNow() {
|
||||
updateLastUsed(sr.md)
|
||||
if sr.equalMutable != nil {
|
||||
sr.equalMutable.triggerLastUsed = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(sr.refs) == 0 {
|
||||
if sr.viewMount != nil { // TODO: release viewMount earlier if possible
|
||||
if err := sr.cm.LeaseManager.Delete(ctx, leases.Lease{ID: sr.view}); err != nil {
|
||||
return errors.Wrapf(err, "failed to remove view lease %s", sr.view)
|
||||
}
|
||||
sr.view = ""
|
||||
sr.viewMount = nil
|
||||
}
|
||||
|
||||
if sr.equalMutable != nil {
|
||||
sr.equalMutable.release(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sr *immutableRef) Finalize(ctx context.Context, b bool) error {
|
||||
sr.mu.Lock()
|
||||
defer sr.mu.Unlock()
|
||||
|
||||
return sr.finalize(ctx, b)
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) Metadata() *metadata.StorageItem {
|
||||
return cr.md
|
||||
}
|
||||
|
||||
func (cr *cacheRecord) finalize(ctx context.Context, commit bool) error {
|
||||
mutable := cr.equalMutable
|
||||
if mutable == nil {
|
||||
return nil
|
||||
}
|
||||
if !commit {
|
||||
if HasCachePolicyRetain(mutable) {
|
||||
CachePolicyRetain(mutable)
|
||||
return mutable.Metadata().Commit()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := cr.cm.ManagerOpt.LeaseManager.Create(ctx, func(l *leases.Lease) error {
|
||||
l.ID = cr.ID()
|
||||
l.Labels = map[string]string{
|
||||
"containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) { // migrator adds leases for everything
|
||||
return errors.Wrap(err, "failed to create lease")
|
||||
}
|
||||
}
|
||||
|
||||
if err := cr.cm.ManagerOpt.LeaseManager.AddResource(ctx, leases.Lease{ID: cr.ID()}, leases.Resource{
|
||||
ID: cr.ID(),
|
||||
Type: "snapshots/" + cr.cm.ManagerOpt.Snapshotter.Name(),
|
||||
}); err != nil {
|
||||
cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.ID()})
|
||||
return errors.Wrapf(err, "failed to add snapshot %s to lease", cr.ID())
|
||||
}
|
||||
|
||||
err = cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID())
|
||||
if err != nil {
|
||||
cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.ID()})
|
||||
return errors.Wrapf(err, "failed to commit %s", mutable.ID())
|
||||
}
|
||||
mutable.dead = true
|
||||
go func() {
|
||||
cr.cm.mu.Lock()
|
||||
defer cr.cm.mu.Unlock()
|
||||
if err := mutable.remove(context.TODO(), true); err != nil {
|
||||
logrus.Error(err)
|
||||
}
|
||||
}()
|
||||
|
||||
cr.equalMutable = nil
|
||||
clearEqualMutable(cr.md)
|
||||
return cr.md.Commit()
|
||||
}
|
||||
|
||||
func (sr *mutableRef) updateLastUsed() bool {
|
||||
return sr.triggerLastUsed
|
||||
}
|
||||
|
||||
func (sr *mutableRef) commit(ctx context.Context) (*immutableRef, error) {
|
||||
if !sr.mutable || len(sr.refs) == 0 {
|
||||
return nil, errors.Wrapf(errInvalid, "invalid mutable ref %p", sr)
|
||||
}
|
||||
|
||||
id := identity.NewID()
|
||||
md, _ := sr.cm.md.Get(id)
|
||||
rec := &cacheRecord{
|
||||
mu: sr.mu,
|
||||
cm: sr.cm,
|
||||
parent: sr.parentRef(false),
|
||||
equalMutable: sr,
|
||||
refs: make(map[ref]struct{}),
|
||||
md: md,
|
||||
}
|
||||
|
||||
if descr := GetDescription(sr.md); descr != "" {
|
||||
if err := queueDescription(md, descr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
parentID := ""
|
||||
if rec.parent != nil {
|
||||
parentID = rec.parent.ID()
|
||||
}
|
||||
if err := initializeMetadata(rec, parentID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sr.cm.records[id] = rec
|
||||
|
||||
if err := sr.md.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
queueCommitted(md)
|
||||
setSize(md, sizeUnknown)
|
||||
setEqualMutable(md, sr.ID())
|
||||
if err := md.Commit(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ref := rec.ref(true)
|
||||
sr.equalImmutable = ref
|
||||
return ref, nil
|
||||
}
|
||||
|
||||
func (sr *mutableRef) updatesLastUsed() bool {
|
||||
return sr.triggerLastUsed
|
||||
}
|
||||
|
||||
func (sr *mutableRef) Commit(ctx context.Context) (ImmutableRef, error) {
|
||||
sr.cm.mu.Lock()
|
||||
defer sr.cm.mu.Unlock()
|
||||
|
||||
sr.mu.Lock()
|
||||
defer sr.mu.Unlock()
|
||||
|
||||
return sr.commit(ctx)
|
||||
}
|
||||
|
||||
func (sr *mutableRef) Release(ctx context.Context) error {
|
||||
sr.cm.mu.Lock()
|
||||
defer sr.cm.mu.Unlock()
|
||||
|
||||
sr.mu.Lock()
|
||||
defer sr.mu.Unlock()
|
||||
|
||||
return sr.release(ctx)
|
||||
}
|
||||
|
||||
func (sr *mutableRef) release(ctx context.Context) error {
|
||||
delete(sr.refs, sr)
|
||||
if getCachePolicy(sr.md) != cachePolicyRetain {
|
||||
if sr.equalImmutable != nil {
|
||||
if getCachePolicy(sr.equalImmutable.md) == cachePolicyRetain {
|
||||
if sr.updateLastUsed() {
|
||||
updateLastUsed(sr.md)
|
||||
sr.triggerLastUsed = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := sr.equalImmutable.remove(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return sr.remove(ctx, true)
|
||||
} else {
|
||||
if sr.updateLastUsed() {
|
||||
updateLastUsed(sr.md)
|
||||
sr.triggerLastUsed = false
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setReadonly(mounts snapshot.Mountable) snapshot.Mountable {
|
||||
return &readOnlyMounter{mounts}
|
||||
}
|
||||
|
||||
type readOnlyMounter struct {
|
||||
snapshot.Mountable
|
||||
}
|
||||
|
||||
func (m *readOnlyMounter) Mount() ([]mount.Mount, func() error, error) {
|
||||
mounts, release, err := m.Mountable.Mount()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for i, m := range mounts {
|
||||
if m.Type == "overlay" {
|
||||
mounts[i].Options = readonlyOverlay(m.Options)
|
||||
continue
|
||||
}
|
||||
opts := make([]string, 0, len(m.Options))
|
||||
for _, opt := range m.Options {
|
||||
if opt != "rw" {
|
||||
opts = append(opts, opt)
|
||||
}
|
||||
}
|
||||
opts = append(opts, "ro")
|
||||
mounts[i].Options = opts
|
||||
}
|
||||
return mounts, release, nil
|
||||
}
|
||||
|
||||
func readonlyOverlay(opt []string) []string {
|
||||
out := make([]string, 0, len(opt))
|
||||
upper := ""
|
||||
for _, o := range opt {
|
||||
if strings.HasPrefix(o, "upperdir=") {
|
||||
upper = strings.TrimPrefix(o, "upperdir=")
|
||||
} else if !strings.HasPrefix(o, "workdir=") {
|
||||
out = append(out, o)
|
||||
}
|
||||
}
|
||||
if upper != "" {
|
||||
for i, o := range out {
|
||||
if strings.HasPrefix(o, "lowerdir=") {
|
||||
out[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=")
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
142
vendor/github.com/moby/buildkit/cache/remotecache/export.go
generated
vendored
Normal file
142
vendor/github.com/moby/buildkit/cache/remotecache/export.go
generated
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
package remotecache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
v1 "github.com/moby/buildkit/cache/remotecache/v1"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/moby/buildkit/util/progress"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type ResolveCacheExporterFunc func(ctx context.Context, attrs map[string]string) (Exporter, error)
|
||||
|
||||
func oneOffProgress(ctx context.Context, id string) func(err error) error {
|
||||
pw, _, _ := progress.FromContext(ctx)
|
||||
now := time.Now()
|
||||
st := progress.Status{
|
||||
Started: &now,
|
||||
}
|
||||
pw.Write(id, st)
|
||||
return func(err error) error {
|
||||
now := time.Now()
|
||||
st.Completed = &now
|
||||
pw.Write(id, st)
|
||||
pw.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
type Exporter interface {
|
||||
solver.CacheExporterTarget
|
||||
// Finalize finalizes and return metadata that are returned to the client
|
||||
// e.g. ExporterResponseManifestDesc
|
||||
Finalize(ctx context.Context) (map[string]string, error)
|
||||
}
|
||||
|
||||
const (
|
||||
// ExportResponseManifestDesc is a key for the map returned from Exporter.Finalize.
|
||||
// The map value is a JSON string of an OCI desciptor of a manifest.
|
||||
ExporterResponseManifestDesc = "cache.manifest"
|
||||
)
|
||||
|
||||
type contentCacheExporter struct {
|
||||
solver.CacheExporterTarget
|
||||
chains *v1.CacheChains
|
||||
ingester content.Ingester
|
||||
}
|
||||
|
||||
func NewExporter(ingester content.Ingester) Exporter {
|
||||
cc := v1.NewCacheChains()
|
||||
return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester}
|
||||
}
|
||||
|
||||
func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string, error) {
|
||||
return export(ctx, ce.ingester, ce.chains)
|
||||
}
|
||||
|
||||
func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) (map[string]string, error) {
|
||||
res := make(map[string]string)
|
||||
config, descs, err := cc.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// own type because oci type can't be pushed and docker type doesn't have annotations
|
||||
type manifestList struct {
|
||||
specs.Versioned
|
||||
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
|
||||
// Manifests references platform specific manifests.
|
||||
Manifests []ocispec.Descriptor `json:"manifests"`
|
||||
}
|
||||
|
||||
var mfst manifestList
|
||||
mfst.SchemaVersion = 2
|
||||
mfst.MediaType = images.MediaTypeDockerSchema2ManifestList
|
||||
|
||||
for _, l := range config.Layers {
|
||||
dgstPair, ok := descs[l.Blob]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing blob %s", l.Blob)
|
||||
}
|
||||
layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob))
|
||||
if err := contentutil.Copy(ctx, ingester, dgstPair.Provider, dgstPair.Descriptor); err != nil {
|
||||
return nil, layerDone(errors.Wrap(err, "error writing layer blob"))
|
||||
}
|
||||
layerDone(nil)
|
||||
mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor)
|
||||
}
|
||||
|
||||
dt, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dgst := digest.FromBytes(dt)
|
||||
desc := ocispec.Descriptor{
|
||||
Digest: dgst,
|
||||
Size: int64(len(dt)),
|
||||
MediaType: v1.CacheConfigMediaTypeV0,
|
||||
}
|
||||
configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst))
|
||||
if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
|
||||
return nil, configDone(errors.Wrap(err, "error writing config blob"))
|
||||
}
|
||||
configDone(nil)
|
||||
|
||||
mfst.Manifests = append(mfst.Manifests, desc)
|
||||
|
||||
dt, err = json.Marshal(mfst)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to marshal manifest")
|
||||
}
|
||||
dgst = digest.FromBytes(dt)
|
||||
|
||||
desc = ocispec.Descriptor{
|
||||
Digest: dgst,
|
||||
Size: int64(len(dt)),
|
||||
MediaType: mfst.MediaType,
|
||||
}
|
||||
mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst))
|
||||
if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
|
||||
return nil, mfstDone(errors.Wrap(err, "error writing manifest blob"))
|
||||
}
|
||||
descJSON, err := json.Marshal(desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res[ExporterResponseManifestDesc] = string(descJSON)
|
||||
mfstDone(nil)
|
||||
return res, nil
|
||||
}
|
299
vendor/github.com/moby/buildkit/cache/remotecache/import.go
generated
vendored
Normal file
299
vendor/github.com/moby/buildkit/cache/remotecache/import.go
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
package remotecache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
v1 "github.com/moby/buildkit/cache/remotecache/v1"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/util/imageutil"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// ResolveCacheImporterFunc returns importer and descriptor.
|
||||
type ResolveCacheImporterFunc func(ctx context.Context, attrs map[string]string) (Importer, ocispec.Descriptor, error)
|
||||
|
||||
type Importer interface {
|
||||
Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error)
|
||||
}
|
||||
|
||||
type DistributionSourceLabelSetter interface {
|
||||
SetDistributionSourceLabel(context.Context, digest.Digest) error
|
||||
SetDistributionSourceAnnotation(desc ocispec.Descriptor) ocispec.Descriptor
|
||||
}
|
||||
|
||||
func NewImporter(provider content.Provider) Importer {
|
||||
return &contentCacheImporter{provider: provider}
|
||||
}
|
||||
|
||||
type contentCacheImporter struct {
|
||||
provider content.Provider
|
||||
}
|
||||
|
||||
func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
|
||||
dt, err := readBlob(ctx, ci.provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mfst ocispec.Index
|
||||
if err := json.Unmarshal(dt, &mfst); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
allLayers := v1.DescriptorProvider{}
|
||||
|
||||
var configDesc ocispec.Descriptor
|
||||
|
||||
for _, m := range mfst.Manifests {
|
||||
if m.MediaType == v1.CacheConfigMediaTypeV0 {
|
||||
configDesc = m
|
||||
continue
|
||||
}
|
||||
allLayers[m.Digest] = v1.DescriptorProviderPair{
|
||||
Descriptor: m,
|
||||
Provider: ci.provider,
|
||||
}
|
||||
}
|
||||
|
||||
if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok {
|
||||
for dgst, l := range allLayers {
|
||||
err := dsls.SetDistributionSourceLabel(ctx, dgst)
|
||||
_ = err // error ignored because layer may not exist
|
||||
l.Descriptor = dsls.SetDistributionSourceAnnotation(l.Descriptor)
|
||||
allLayers[dgst] = l
|
||||
}
|
||||
}
|
||||
|
||||
if configDesc.Digest == "" {
|
||||
return ci.importInlineCache(ctx, dt, id, w)
|
||||
}
|
||||
|
||||
dt, err = readBlob(ctx, ci.provider, configDesc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cc := v1.NewCacheChains()
|
||||
if err := v1.Parse(dt, allLayers, cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return solver.NewCacheManager(id, keysStorage, resultStorage), nil
|
||||
}
|
||||
|
||||
func readBlob(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]byte, error) {
|
||||
maxBlobSize := int64(1 << 20)
|
||||
if desc.Size > maxBlobSize {
|
||||
return nil, errors.Errorf("blob %s is too large (%d > %d)", desc.Digest, desc.Size, maxBlobSize)
|
||||
}
|
||||
dt, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
// NOTE: even if err == EOF, we might have got expected dt here.
|
||||
// For instance, http.Response.Body is known to return non-zero bytes with EOF.
|
||||
if err == io.EOF {
|
||||
if dtDigest := desc.Digest.Algorithm().FromBytes(dt); dtDigest != desc.Digest {
|
||||
err = errors.Wrapf(err, "got EOF, expected %s (%d bytes), got %s (%d bytes)",
|
||||
desc.Digest, desc.Size, dtDigest, len(dt))
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return dt, errors.WithStack(err)
|
||||
}
|
||||
|
||||
func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte, id string, w worker.Worker) (solver.CacheManager, error) {
|
||||
m := map[digest.Digest][]byte{}
|
||||
|
||||
if err := ci.allDistributionManifests(ctx, dt, m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
var cMap = map[digest.Digest]*v1.CacheChains{}
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for dgst, dt := range m {
|
||||
func(dgst digest.Digest, dt []byte) {
|
||||
eg.Go(func() error {
|
||||
var m ocispec.Manifest
|
||||
|
||||
if err := json.Unmarshal(dt, &m); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if m.Config.Digest == "" || len(m.Layers) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok {
|
||||
for i, l := range m.Layers {
|
||||
err := dsls.SetDistributionSourceLabel(ctx, l.Digest)
|
||||
_ = err // error ignored because layer may not exist
|
||||
m.Layers[i] = dsls.SetDistributionSourceAnnotation(l)
|
||||
}
|
||||
}
|
||||
|
||||
p, err := content.ReadBlob(ctx, ci.provider, m.Config)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
var img image
|
||||
|
||||
if err := json.Unmarshal(p, &img); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if len(img.Rootfs.DiffIDs) != len(m.Layers) {
|
||||
logrus.Warnf("invalid image with mismatching manifest and config")
|
||||
return nil
|
||||
}
|
||||
|
||||
if img.Cache == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var config v1.CacheConfig
|
||||
if err := json.Unmarshal(img.Cache, &config.Records); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
createdDates, createdMsg, err := parseCreatedLayerInfo(img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
layers := v1.DescriptorProvider{}
|
||||
for i, m := range m.Layers {
|
||||
if m.Annotations == nil {
|
||||
m.Annotations = map[string]string{}
|
||||
}
|
||||
if createdAt := createdDates[i]; createdAt != "" {
|
||||
m.Annotations["buildkit/createdat"] = createdAt
|
||||
}
|
||||
if createdBy := createdMsg[i]; createdBy != "" {
|
||||
m.Annotations["buildkit/description"] = createdBy
|
||||
}
|
||||
m.Annotations["containerd.io/uncompressed"] = img.Rootfs.DiffIDs[i].String()
|
||||
layers[m.Digest] = v1.DescriptorProviderPair{
|
||||
Descriptor: m,
|
||||
Provider: ci.provider,
|
||||
}
|
||||
config.Layers = append(config.Layers, v1.CacheLayer{
|
||||
Blob: m.Digest,
|
||||
ParentIndex: i - 1,
|
||||
})
|
||||
}
|
||||
|
||||
dt, err = json.Marshal(config)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
cc := v1.NewCacheChains()
|
||||
if err := v1.ParseConfig(config, layers, cc); err != nil {
|
||||
return err
|
||||
}
|
||||
mu.Lock()
|
||||
cMap[dgst] = cc
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}(dgst, dt)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cms := make([]solver.CacheManager, 0, len(cMap))
|
||||
|
||||
for _, cc := range cMap {
|
||||
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cms = append(cms, solver.NewCacheManager(id, keysStorage, resultStorage))
|
||||
}
|
||||
|
||||
return solver.NewCombinedCacheManager(cms, nil), nil
|
||||
}
|
||||
|
||||
func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt []byte, m map[digest.Digest][]byte) error {
|
||||
mt, err := imageutil.DetectManifestBlobMediaType(dt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch mt {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
m[digest.FromBytes(dt)] = dt
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
var index ocispec.Index
|
||||
if err := json.Unmarshal(dt, &index); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
for _, d := range index.Manifests {
|
||||
if _, ok := m[d.Digest]; ok {
|
||||
continue
|
||||
}
|
||||
p, err := content.ReadBlob(ctx, ci.provider, d)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := ci.allDistributionManifests(ctx, p, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type image struct {
|
||||
Rootfs struct {
|
||||
DiffIDs []digest.Digest `json:"diff_ids"`
|
||||
} `json:"rootfs"`
|
||||
Cache []byte `json:"moby.buildkit.cache.v0"`
|
||||
History []struct {
|
||||
Created *time.Time `json:"created,omitempty"`
|
||||
CreatedBy string `json:"created_by,omitempty"`
|
||||
EmptyLayer bool `json:"empty_layer,omitempty"`
|
||||
} `json:"history,omitempty"`
|
||||
}
|
||||
|
||||
func parseCreatedLayerInfo(img image) ([]string, []string, error) {
|
||||
dates := make([]string, 0, len(img.Rootfs.DiffIDs))
|
||||
createdBy := make([]string, 0, len(img.Rootfs.DiffIDs))
|
||||
for _, h := range img.History {
|
||||
if !h.EmptyLayer {
|
||||
str := ""
|
||||
if h.Created != nil {
|
||||
dt, err := h.Created.MarshalText()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
str = string(dt)
|
||||
}
|
||||
dates = append(dates, str)
|
||||
createdBy = append(createdBy, h.CreatedBy)
|
||||
}
|
||||
}
|
||||
return dates, createdBy, nil
|
||||
}
|
297
vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go
generated
vendored
Normal file
297
vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go
generated
vendored
Normal file
@@ -0,0 +1,297 @@
|
||||
package cacheimport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func NewCacheKeyStorage(cc *CacheChains, w worker.Worker) (solver.CacheKeyStorage, solver.CacheResultStorage, error) {
|
||||
storage := &cacheKeyStorage{
|
||||
byID: map[string]*itemWithOutgoingLinks{},
|
||||
byItem: map[*item]string{},
|
||||
byResult: map[string]map[string]struct{}{},
|
||||
}
|
||||
|
||||
for _, it := range cc.items {
|
||||
if _, err := addItemToStorage(storage, it); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
results := &cacheResultStorage{
|
||||
w: w,
|
||||
byID: storage.byID,
|
||||
byItem: storage.byItem,
|
||||
byResult: storage.byResult,
|
||||
}
|
||||
|
||||
return storage, results, nil
|
||||
}
|
||||
|
||||
func addItemToStorage(k *cacheKeyStorage, it *item) (*itemWithOutgoingLinks, error) {
|
||||
if id, ok := k.byItem[it]; ok {
|
||||
if id == "" {
|
||||
return nil, errors.Errorf("invalid loop")
|
||||
}
|
||||
return k.byID[id], nil
|
||||
}
|
||||
|
||||
var id string
|
||||
if len(it.links) == 0 {
|
||||
id = it.dgst.String()
|
||||
} else {
|
||||
id = identity.NewID()
|
||||
}
|
||||
|
||||
k.byItem[it] = ""
|
||||
|
||||
for i, m := range it.links {
|
||||
for l := range m {
|
||||
src, err := addItemToStorage(k, l.src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cl := nlink{
|
||||
input: i,
|
||||
dgst: it.dgst,
|
||||
selector: l.selector,
|
||||
}
|
||||
src.links[cl] = append(src.links[cl], id)
|
||||
}
|
||||
}
|
||||
|
||||
k.byItem[it] = id
|
||||
|
||||
itl := &itemWithOutgoingLinks{
|
||||
item: it,
|
||||
links: map[nlink][]string{},
|
||||
}
|
||||
|
||||
k.byID[id] = itl
|
||||
|
||||
if res := it.result; res != nil {
|
||||
resultID := remoteID(res)
|
||||
ids, ok := k.byResult[resultID]
|
||||
if !ok {
|
||||
ids = map[string]struct{}{}
|
||||
k.byResult[resultID] = ids
|
||||
}
|
||||
ids[id] = struct{}{}
|
||||
}
|
||||
return itl, nil
|
||||
}
|
||||
|
||||
type cacheKeyStorage struct {
|
||||
byID map[string]*itemWithOutgoingLinks
|
||||
byItem map[*item]string
|
||||
byResult map[string]map[string]struct{}
|
||||
}
|
||||
|
||||
type itemWithOutgoingLinks struct {
|
||||
*item
|
||||
links map[nlink][]string
|
||||
}
|
||||
|
||||
func (cs *cacheKeyStorage) Exists(id string) bool {
|
||||
_, ok := cs.byID[id]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (cs *cacheKeyStorage) Walk(func(id string) error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *cacheKeyStorage) WalkResults(id string, fn func(solver.CacheResult) error) error {
|
||||
it, ok := cs.byID[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if res := it.result; res != nil {
|
||||
return fn(solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *cacheKeyStorage) Load(id string, resultID string) (solver.CacheResult, error) {
|
||||
it, ok := cs.byID[id]
|
||||
if !ok {
|
||||
return solver.CacheResult{}, nil
|
||||
}
|
||||
if res := it.result; res != nil {
|
||||
return solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}, nil
|
||||
}
|
||||
return solver.CacheResult{}, nil
|
||||
}
|
||||
|
||||
func (cs *cacheKeyStorage) AddResult(id string, res solver.CacheResult) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *cacheKeyStorage) Release(resultID string) error {
|
||||
return nil
|
||||
}
|
||||
func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error {
|
||||
return nil
|
||||
}
|
||||
func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error {
|
||||
it, ok := cs.byID[id]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
for _, id := range it.links[nlink{
|
||||
dgst: outputKey(link.Digest, int(link.Output)),
|
||||
input: int(link.Input),
|
||||
selector: link.Selector.String(),
|
||||
}] {
|
||||
if err := fn(id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *cacheKeyStorage) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error {
|
||||
for k, it := range cs.byID {
|
||||
for nl, ids := range it.links {
|
||||
for _, id2 := range ids {
|
||||
if id == id2 {
|
||||
if err := fn(k, solver.CacheInfoLink{
|
||||
Input: solver.Index(nl.input),
|
||||
Selector: digest.Digest(nl.selector),
|
||||
Digest: nl.dgst,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *cacheKeyStorage) WalkIDsByResult(id string, fn func(id string) error) error {
|
||||
ids := cs.byResult[id]
|
||||
for id := range ids {
|
||||
if err := fn(id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *cacheKeyStorage) HasLink(id string, link solver.CacheInfoLink, target string) bool {
|
||||
l := nlink{
|
||||
dgst: outputKey(link.Digest, int(link.Output)),
|
||||
input: int(link.Input),
|
||||
selector: link.Selector.String(),
|
||||
}
|
||||
if it, ok := cs.byID[id]; ok {
|
||||
for _, id := range it.links[l] {
|
||||
if id == target {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type cacheResultStorage struct {
|
||||
w worker.Worker
|
||||
byID map[string]*itemWithOutgoingLinks
|
||||
byResult map[string]map[string]struct{}
|
||||
byItem map[*item]string
|
||||
}
|
||||
|
||||
func (cs *cacheResultStorage) Save(res solver.Result, createdAt time.Time) (solver.CacheResult, error) {
|
||||
return solver.CacheResult{}, errors.Errorf("importer is immutable")
|
||||
}
|
||||
|
||||
func (cs *cacheResultStorage) LoadWithParents(ctx context.Context, res solver.CacheResult) (map[string]solver.Result, error) {
|
||||
v := cs.byResultID(res.ID)
|
||||
if v == nil || v.result == nil {
|
||||
return nil, errors.WithStack(solver.ErrNotFound)
|
||||
}
|
||||
|
||||
m := map[string]solver.Result{}
|
||||
|
||||
visited := make(map[*item]struct{})
|
||||
if err := v.walkAllResults(func(i *item) error {
|
||||
if i.result == nil {
|
||||
return nil
|
||||
}
|
||||
id, ok := cs.byItem[i]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if isSubRemote(*i.result, *v.result) {
|
||||
ref, err := cs.w.FromRemote(ctx, i.result)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m[id] = worker.NewWorkerRefResult(ref, cs.w)
|
||||
}
|
||||
return nil
|
||||
}, visited); err != nil {
|
||||
for _, v := range m {
|
||||
v.Release(context.TODO())
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) {
|
||||
item := cs.byResultID(res.ID)
|
||||
if item == nil || item.result == nil {
|
||||
return nil, errors.WithStack(solver.ErrNotFound)
|
||||
}
|
||||
|
||||
ref, err := cs.w.FromRemote(ctx, item.result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load result from remote")
|
||||
}
|
||||
return worker.NewWorkerRefResult(ref, cs.w), nil
|
||||
}
|
||||
|
||||
func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) {
|
||||
if r := cs.byResultID(res.ID); r != nil && r.result != nil {
|
||||
return r.result, nil
|
||||
}
|
||||
return nil, errors.WithStack(solver.ErrNotFound)
|
||||
}
|
||||
|
||||
func (cs *cacheResultStorage) Exists(id string) bool {
|
||||
return cs.byResultID(id) != nil
|
||||
}
|
||||
|
||||
func (cs *cacheResultStorage) byResultID(resultID string) *itemWithOutgoingLinks {
|
||||
m, ok := cs.byResult[resultID]
|
||||
if !ok || len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for id := range m {
|
||||
it, ok := cs.byID[id]
|
||||
if ok {
|
||||
return it
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// unique ID per remote. this ID is not stable.
|
||||
func remoteID(r *solver.Remote) string {
|
||||
dgstr := digest.Canonical.Digester()
|
||||
for _, desc := range r.Descriptors {
|
||||
dgstr.Hash().Write([]byte(desc.Digest))
|
||||
}
|
||||
return dgstr.Digest().String()
|
||||
}
|
158
vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go
generated
vendored
Normal file
158
vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
package cacheimport
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/moby/buildkit/solver"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func NewCacheChains() *CacheChains {
|
||||
return &CacheChains{visited: map[interface{}]struct{}{}}
|
||||
}
|
||||
|
||||
type CacheChains struct {
|
||||
items []*item
|
||||
visited map[interface{}]struct{}
|
||||
}
|
||||
|
||||
func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord {
|
||||
if strings.HasPrefix(dgst.String(), "random:") {
|
||||
return &nopRecord{}
|
||||
}
|
||||
it := &item{c: c, dgst: dgst}
|
||||
c.items = append(c.items, it)
|
||||
return it
|
||||
}
|
||||
|
||||
func (c *CacheChains) Visit(v interface{}) {
|
||||
c.visited[v] = struct{}{}
|
||||
}
|
||||
|
||||
func (c *CacheChains) Visited(v interface{}) bool {
|
||||
_, ok := c.visited[v]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (c *CacheChains) normalize() error {
|
||||
st := &normalizeState{
|
||||
added: map[*item]*item{},
|
||||
links: map[*item]map[nlink]map[digest.Digest]struct{}{},
|
||||
byKey: map[digest.Digest]*item{},
|
||||
}
|
||||
|
||||
for _, it := range c.items {
|
||||
_, err := normalizeItem(it, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
items := make([]*item, 0, len(st.byKey))
|
||||
for _, it := range st.byKey {
|
||||
items = append(items, it)
|
||||
}
|
||||
c.items = items
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) {
|
||||
if err := c.normalize(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
st := &marshalState{
|
||||
chainsByID: map[string]int{},
|
||||
descriptors: DescriptorProvider{},
|
||||
recordsByItem: map[*item]int{},
|
||||
}
|
||||
|
||||
for _, it := range c.items {
|
||||
if err := marshalItem(it, st); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
cc := CacheConfig{
|
||||
Layers: st.layers,
|
||||
Records: st.records,
|
||||
}
|
||||
sortConfig(&cc)
|
||||
|
||||
return &cc, st.descriptors, nil
|
||||
}
|
||||
|
||||
type DescriptorProvider map[digest.Digest]DescriptorProviderPair
|
||||
|
||||
type DescriptorProviderPair struct {
|
||||
Descriptor ocispec.Descriptor
|
||||
Provider content.Provider
|
||||
}
|
||||
|
||||
type item struct {
|
||||
c *CacheChains
|
||||
dgst digest.Digest
|
||||
|
||||
result *solver.Remote
|
||||
resultTime time.Time
|
||||
|
||||
links []map[link]struct{}
|
||||
}
|
||||
|
||||
type link struct {
|
||||
src *item
|
||||
selector string
|
||||
}
|
||||
|
||||
func (c *item) AddResult(createdAt time.Time, result *solver.Remote) {
|
||||
c.resultTime = createdAt
|
||||
c.result = result
|
||||
}
|
||||
|
||||
func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
|
||||
src, ok := rec.(*item)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
if index < len(c.links) {
|
||||
break
|
||||
}
|
||||
c.links = append(c.links, map[link]struct{}{})
|
||||
}
|
||||
|
||||
c.links[index][link{src: src, selector: selector}] = struct{}{}
|
||||
}
|
||||
|
||||
func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{}) error {
|
||||
if _, ok := visited[c]; ok {
|
||||
return nil
|
||||
}
|
||||
visited[c] = struct{}{}
|
||||
if err := fn(c); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, links := range c.links {
|
||||
for l := range links {
|
||||
if err := l.src.walkAllResults(fn, visited); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type nopRecord struct {
|
||||
}
|
||||
|
||||
func (c *nopRecord) AddResult(createdAt time.Time, result *solver.Remote) {
|
||||
}
|
||||
|
||||
func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
|
||||
}
|
||||
|
||||
var _ solver.CacheExporterTarget = &CacheChains{}
|
50
vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go
generated
vendored
Normal file
50
vendor/github.com/moby/buildkit/cache/remotecache/v1/doc.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
package cacheimport
|
||||
|
||||
// Distibutable build cache
|
||||
//
|
||||
// Main manifest is OCI image index
|
||||
// https://github.com/opencontainers/image-spec/blob/master/image-index.md .
|
||||
// Manifests array contains descriptors to the cache layers and one instance of
|
||||
// build cache config with media type application/vnd.buildkit.cacheconfig.v0 .
|
||||
// The cache layer descriptors need to have an annotation with uncompressed digest
|
||||
// to allow deduplication on extraction and optionally "buildkit/createdat"
|
||||
// annotation to support maintaining original timestamps.
|
||||
//
|
||||
// Cache config file layout:
|
||||
//
|
||||
//{
|
||||
// "layers": [
|
||||
// {
|
||||
// "blob": "sha256:deadbeef", <- digest of layer blob in index
|
||||
// "parent": -1 <- index of parent layer, -1 if no parent
|
||||
// },
|
||||
// {
|
||||
// "blob": "sha256:deadbeef",
|
||||
// "parent": 0
|
||||
// }
|
||||
// ],
|
||||
//
|
||||
// "records": [
|
||||
// {
|
||||
// "digest": "sha256:deadbeef", <- base digest for the record
|
||||
// },
|
||||
// {
|
||||
// "digest": "sha256:deadbeef",
|
||||
// "output": 1, <- optional output index
|
||||
// "layers": [ <- optional array or layer chains
|
||||
// {
|
||||
// "createdat": "",
|
||||
// "layer": 1, <- index to the layer
|
||||
// }
|
||||
// ],
|
||||
// "inputs": [ <- dependant records
|
||||
// [ <- index of the dependency (0)
|
||||
// {
|
||||
// "selector": "sel", <- optional selector
|
||||
// "link": 0, <- index to the dependant record
|
||||
// }
|
||||
// ]
|
||||
// ]
|
||||
// }
|
||||
// ]
|
||||
// }
|
110
vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go
generated
vendored
Normal file
110
vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
package cacheimport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporterTarget) error {
|
||||
var config CacheConfig
|
||||
if err := json.Unmarshal(configJSON, &config); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
return ParseConfig(config, provider, t)
|
||||
}
|
||||
|
||||
func ParseConfig(config CacheConfig, provider DescriptorProvider, t solver.CacheExporterTarget) error {
|
||||
cache := map[int]solver.CacheExporterRecord{}
|
||||
|
||||
for i := range config.Records {
|
||||
if _, err := parseRecord(config, i, provider, t, cache); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.CacheExporterTarget, cache map[int]solver.CacheExporterRecord) (solver.CacheExporterRecord, error) {
|
||||
if r, ok := cache[idx]; ok {
|
||||
if r == nil {
|
||||
return nil, errors.Errorf("invalid looping record")
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
if idx < 0 || idx >= len(cc.Records) {
|
||||
return nil, errors.Errorf("invalid record ID: %d", idx)
|
||||
}
|
||||
rec := cc.Records[idx]
|
||||
|
||||
r := t.Add(rec.Digest)
|
||||
cache[idx] = nil
|
||||
for i, inputs := range rec.Inputs {
|
||||
for _, inp := range inputs {
|
||||
src, err := parseRecord(cc, inp.LinkIndex, provider, t, cache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.LinkFrom(src, i, inp.Selector)
|
||||
}
|
||||
}
|
||||
|
||||
for _, res := range rec.Results {
|
||||
visited := map[int]struct{}{}
|
||||
remote, err := getRemoteChain(cc.Layers, res.LayerIndex, provider, visited)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if remote != nil {
|
||||
r.AddResult(res.CreatedAt, remote)
|
||||
}
|
||||
}
|
||||
|
||||
cache[idx] = r
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, visited map[int]struct{}) (*solver.Remote, error) {
|
||||
if _, ok := visited[idx]; ok {
|
||||
return nil, errors.Errorf("invalid looping layer")
|
||||
}
|
||||
visited[idx] = struct{}{}
|
||||
|
||||
if idx < 0 || idx >= len(layers) {
|
||||
return nil, errors.Errorf("invalid layer index %d", idx)
|
||||
}
|
||||
|
||||
l := layers[idx]
|
||||
|
||||
descPair, ok := provider[l.Blob]
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var r *solver.Remote
|
||||
if l.ParentIndex != -1 {
|
||||
var err error
|
||||
r, err = getRemoteChain(layers, l.ParentIndex, provider, visited)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
}
|
||||
r.Descriptors = append(r.Descriptors, descPair.Descriptor)
|
||||
mp := contentutil.NewMultiProvider(r.Provider)
|
||||
mp.Add(descPair.Descriptor.Digest, descPair.Provider)
|
||||
r.Provider = mp
|
||||
return r, nil
|
||||
}
|
||||
return &solver.Remote{
|
||||
Descriptors: []ocispec.Descriptor{descPair.Descriptor},
|
||||
Provider: descPair.Provider,
|
||||
}, nil
|
||||
|
||||
}
|
35
vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go
generated
vendored
Normal file
35
vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package cacheimport
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const CacheConfigMediaTypeV0 = "application/vnd.buildkit.cacheconfig.v0"
|
||||
|
||||
type CacheConfig struct {
|
||||
Layers []CacheLayer `json:"layers,omitempty"`
|
||||
Records []CacheRecord `json:"records,omitempty"`
|
||||
}
|
||||
|
||||
type CacheLayer struct {
|
||||
Blob digest.Digest `json:"blob,omitempty"`
|
||||
ParentIndex int `json:"parent,omitempty"`
|
||||
}
|
||||
|
||||
type CacheRecord struct {
|
||||
Results []CacheResult `json:"layers,omitempty"`
|
||||
Digest digest.Digest `json:"digest,omitempty"`
|
||||
Inputs [][]CacheInput `json:"inputs,omitempty"`
|
||||
}
|
||||
|
||||
type CacheResult struct {
|
||||
LayerIndex int `json:"layer"`
|
||||
CreatedAt time.Time `json:"createdAt,omitempty"`
|
||||
}
|
||||
|
||||
type CacheInput struct {
|
||||
Selector string `json:"selector,omitempty"`
|
||||
LinkIndex int `json:"link"`
|
||||
}
|
322
vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go
generated
vendored
Normal file
322
vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go
generated
vendored
Normal file
@@ -0,0 +1,322 @@
|
||||
package cacheimport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/moby/buildkit/solver"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// sortConfig sorts the config structure to make sure it is deterministic
|
||||
func sortConfig(cc *CacheConfig) {
|
||||
type indexedLayer struct {
|
||||
oldIndex int
|
||||
newIndex int
|
||||
l CacheLayer
|
||||
}
|
||||
|
||||
unsortedLayers := make([]*indexedLayer, len(cc.Layers))
|
||||
sortedLayers := make([]*indexedLayer, len(cc.Layers))
|
||||
|
||||
for i, l := range cc.Layers {
|
||||
il := &indexedLayer{oldIndex: i, l: l}
|
||||
unsortedLayers[i] = il
|
||||
sortedLayers[i] = il
|
||||
}
|
||||
sort.Slice(sortedLayers, func(i, j int) bool {
|
||||
li := sortedLayers[i].l
|
||||
lj := sortedLayers[j].l
|
||||
if li.Blob == lj.Blob {
|
||||
return li.ParentIndex < lj.ParentIndex
|
||||
}
|
||||
return li.Blob < lj.Blob
|
||||
})
|
||||
for i, l := range sortedLayers {
|
||||
l.newIndex = i
|
||||
}
|
||||
|
||||
layers := make([]CacheLayer, len(sortedLayers))
|
||||
for i, l := range sortedLayers {
|
||||
if pID := l.l.ParentIndex; pID != -1 {
|
||||
l.l.ParentIndex = unsortedLayers[pID].newIndex
|
||||
}
|
||||
layers[i] = l.l
|
||||
}
|
||||
|
||||
type indexedRecord struct {
|
||||
oldIndex int
|
||||
newIndex int
|
||||
r CacheRecord
|
||||
}
|
||||
|
||||
unsortedRecords := make([]*indexedRecord, len(cc.Records))
|
||||
sortedRecords := make([]*indexedRecord, len(cc.Records))
|
||||
|
||||
for i, r := range cc.Records {
|
||||
ir := &indexedRecord{oldIndex: i, r: r}
|
||||
unsortedRecords[i] = ir
|
||||
sortedRecords[i] = ir
|
||||
}
|
||||
sort.Slice(sortedRecords, func(i, j int) bool {
|
||||
ri := sortedRecords[i].r
|
||||
rj := sortedRecords[j].r
|
||||
if ri.Digest != rj.Digest {
|
||||
return ri.Digest < rj.Digest
|
||||
}
|
||||
if len(ri.Inputs) != len(rj.Inputs) {
|
||||
return len(ri.Inputs) < len(rj.Inputs)
|
||||
}
|
||||
for i, inputs := range ri.Inputs {
|
||||
if len(ri.Inputs[i]) != len(rj.Inputs[i]) {
|
||||
return len(ri.Inputs[i]) < len(rj.Inputs[i])
|
||||
}
|
||||
for j := range inputs {
|
||||
if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector {
|
||||
return ri.Inputs[i][j].Selector < rj.Inputs[i][j].Selector
|
||||
}
|
||||
inputDigesti := cc.Records[ri.Inputs[i][j].LinkIndex].Digest
|
||||
inputDigestj := cc.Records[rj.Inputs[i][j].LinkIndex].Digest
|
||||
if inputDigesti != inputDigestj {
|
||||
return inputDigesti < inputDigestj
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
for i, l := range sortedRecords {
|
||||
l.newIndex = i
|
||||
}
|
||||
|
||||
records := make([]CacheRecord, len(sortedRecords))
|
||||
for i, r := range sortedRecords {
|
||||
for j := range r.r.Results {
|
||||
r.r.Results[j].LayerIndex = unsortedLayers[r.r.Results[j].LayerIndex].newIndex
|
||||
}
|
||||
for j, inputs := range r.r.Inputs {
|
||||
for k := range inputs {
|
||||
r.r.Inputs[j][k].LinkIndex = unsortedRecords[r.r.Inputs[j][k].LinkIndex].newIndex
|
||||
}
|
||||
sort.Slice(inputs, func(i, j int) bool {
|
||||
return inputs[i].LinkIndex < inputs[j].LinkIndex
|
||||
})
|
||||
}
|
||||
records[i] = r.r
|
||||
}
|
||||
|
||||
cc.Layers = layers
|
||||
cc.Records = records
|
||||
}
|
||||
|
||||
func outputKey(dgst digest.Digest, idx int) digest.Digest {
|
||||
return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, idx)))
|
||||
}
|
||||
|
||||
type nlink struct {
|
||||
dgst digest.Digest
|
||||
input int
|
||||
selector string
|
||||
}
|
||||
type normalizeState struct {
|
||||
added map[*item]*item
|
||||
links map[*item]map[nlink]map[digest.Digest]struct{}
|
||||
byKey map[digest.Digest]*item
|
||||
next int
|
||||
}
|
||||
|
||||
func normalizeItem(it *item, state *normalizeState) (*item, error) {
|
||||
if it2, ok := state.added[it]; ok {
|
||||
return it2, nil
|
||||
}
|
||||
|
||||
if len(it.links) == 0 {
|
||||
id := it.dgst
|
||||
if it2, ok := state.byKey[id]; ok {
|
||||
state.added[it] = it2
|
||||
return it2, nil
|
||||
}
|
||||
state.byKey[id] = it
|
||||
state.added[it] = it
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
matches := map[digest.Digest]struct{}{}
|
||||
|
||||
// check if there is already a matching record
|
||||
for i, m := range it.links {
|
||||
if len(m) == 0 {
|
||||
return nil, errors.Errorf("invalid incomplete links")
|
||||
}
|
||||
for l := range m {
|
||||
nl := nlink{dgst: it.dgst, input: i, selector: l.selector}
|
||||
it2, err := normalizeItem(l.src, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
links := state.links[it2][nl]
|
||||
if i == 0 {
|
||||
for id := range links {
|
||||
matches[id] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
for id := range matches {
|
||||
if _, ok := links[id]; !ok {
|
||||
delete(matches, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var id digest.Digest
|
||||
|
||||
links := it.links
|
||||
|
||||
if len(matches) > 0 {
|
||||
for m := range matches {
|
||||
if id == "" || id > m {
|
||||
id = m
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// keep tmp IDs deterministic
|
||||
state.next++
|
||||
id = digest.FromBytes([]byte(fmt.Sprintf("%d", state.next)))
|
||||
state.byKey[id] = it
|
||||
it.links = make([]map[link]struct{}, len(it.links))
|
||||
for i := range it.links {
|
||||
it.links[i] = map[link]struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
it2 := state.byKey[id]
|
||||
state.added[it] = it2
|
||||
|
||||
for i, m := range links {
|
||||
for l := range m {
|
||||
subIt, err := normalizeItem(l.src, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
it2.links[i][link{src: subIt, selector: l.selector}] = struct{}{}
|
||||
|
||||
nl := nlink{dgst: it.dgst, input: i, selector: l.selector}
|
||||
if _, ok := state.links[subIt]; !ok {
|
||||
state.links[subIt] = map[nlink]map[digest.Digest]struct{}{}
|
||||
}
|
||||
if _, ok := state.links[subIt][nl]; !ok {
|
||||
state.links[subIt][nl] = map[digest.Digest]struct{}{}
|
||||
}
|
||||
state.links[subIt][nl][id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return it2, nil
|
||||
}
|
||||
|
||||
type marshalState struct {
|
||||
layers []CacheLayer
|
||||
chainsByID map[string]int
|
||||
descriptors DescriptorProvider
|
||||
|
||||
records []CacheRecord
|
||||
recordsByItem map[*item]int
|
||||
}
|
||||
|
||||
func marshalRemote(r *solver.Remote, state *marshalState) string {
|
||||
if len(r.Descriptors) == 0 {
|
||||
return ""
|
||||
}
|
||||
type Remote struct {
|
||||
Descriptors []ocispec.Descriptor
|
||||
Provider content.Provider
|
||||
}
|
||||
var parentID string
|
||||
if len(r.Descriptors) > 1 {
|
||||
r2 := &solver.Remote{
|
||||
Descriptors: r.Descriptors[:len(r.Descriptors)-1],
|
||||
Provider: r.Provider,
|
||||
}
|
||||
parentID = marshalRemote(r2, state)
|
||||
}
|
||||
desc := r.Descriptors[len(r.Descriptors)-1]
|
||||
|
||||
state.descriptors[desc.Digest] = DescriptorProviderPair{
|
||||
Descriptor: desc,
|
||||
Provider: r.Provider,
|
||||
}
|
||||
|
||||
id := desc.Digest.String() + parentID
|
||||
|
||||
if _, ok := state.chainsByID[id]; ok {
|
||||
return id
|
||||
}
|
||||
|
||||
state.chainsByID[id] = len(state.layers)
|
||||
l := CacheLayer{
|
||||
Blob: desc.Digest,
|
||||
ParentIndex: -1,
|
||||
}
|
||||
if parentID != "" {
|
||||
l.ParentIndex = state.chainsByID[parentID]
|
||||
}
|
||||
state.layers = append(state.layers, l)
|
||||
return id
|
||||
}
|
||||
|
||||
func marshalItem(it *item, state *marshalState) error {
|
||||
if _, ok := state.recordsByItem[it]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
rec := CacheRecord{
|
||||
Digest: it.dgst,
|
||||
Inputs: make([][]CacheInput, len(it.links)),
|
||||
}
|
||||
|
||||
for i, m := range it.links {
|
||||
for l := range m {
|
||||
if err := marshalItem(l.src, state); err != nil {
|
||||
return err
|
||||
}
|
||||
idx, ok := state.recordsByItem[l.src]
|
||||
if !ok {
|
||||
return errors.Errorf("invalid source record: %v", l.src)
|
||||
}
|
||||
rec.Inputs[i] = append(rec.Inputs[i], CacheInput{
|
||||
Selector: l.selector,
|
||||
LinkIndex: idx,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if it.result != nil {
|
||||
id := marshalRemote(it.result, state)
|
||||
if id != "" {
|
||||
idx, ok := state.chainsByID[id]
|
||||
if !ok {
|
||||
return errors.Errorf("parent chainid not found")
|
||||
}
|
||||
rec.Results = append(rec.Results, CacheResult{LayerIndex: idx, CreatedAt: it.resultTime})
|
||||
}
|
||||
}
|
||||
|
||||
state.recordsByItem[it] = len(state.records)
|
||||
state.records = append(state.records, rec)
|
||||
return nil
|
||||
}
|
||||
|
||||
func isSubRemote(sub, main solver.Remote) bool {
|
||||
if len(sub.Descriptors) > len(main.Descriptors) {
|
||||
return false
|
||||
}
|
||||
for i := range sub.Descriptors {
|
||||
if sub.Descriptors[i].Digest != main.Descriptors[i].Digest {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
139
vendor/github.com/moby/buildkit/cache/util/fsutil.go
generated
vendored
Normal file
139
vendor/github.com/moby/buildkit/cache/util/fsutil.go
generated
vendored
Normal file
@@ -0,0 +1,139 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/fsutil"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
)
|
||||
|
||||
type ReadRequest struct {
|
||||
Filename string
|
||||
Range *FileRange
|
||||
}
|
||||
|
||||
type FileRange struct {
|
||||
Offset int
|
||||
Length int
|
||||
}
|
||||
|
||||
func withMount(ctx context.Context, ref cache.ImmutableRef, cb func(string) error) error {
|
||||
mount, err := ref.Mount(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mount)
|
||||
|
||||
root, err := lm.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if lm != nil {
|
||||
lm.Unmount()
|
||||
}
|
||||
}()
|
||||
|
||||
if err := cb(root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := lm.Unmount(); err != nil {
|
||||
return err
|
||||
}
|
||||
lm = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReadFile(ctx context.Context, ref cache.ImmutableRef, req ReadRequest) ([]byte, error) {
|
||||
var dt []byte
|
||||
|
||||
err := withMount(ctx, ref, func(root string) error {
|
||||
fp, err := fs.RootPath(root, req.Filename)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if req.Range == nil {
|
||||
dt, err = ioutil.ReadFile(fp)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
} else {
|
||||
f, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
dt, err = ioutil.ReadAll(io.NewSectionReader(f, int64(req.Range.Offset), int64(req.Range.Length)))
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return dt, err
|
||||
}
|
||||
|
||||
type ReadDirRequest struct {
|
||||
Path string
|
||||
IncludePattern string
|
||||
}
|
||||
|
||||
func ReadDir(ctx context.Context, ref cache.ImmutableRef, req ReadDirRequest) ([]*fstypes.Stat, error) {
|
||||
var (
|
||||
rd []*fstypes.Stat
|
||||
wo fsutil.WalkOpt
|
||||
)
|
||||
if req.IncludePattern != "" {
|
||||
wo.IncludePatterns = append(wo.IncludePatterns, req.IncludePattern)
|
||||
}
|
||||
err := withMount(ctx, ref, func(root string) error {
|
||||
fp, err := fs.RootPath(root, req.Path)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return fsutil.Walk(ctx, fp, &wo, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "walking %q", root)
|
||||
}
|
||||
stat, ok := info.Sys().(*fstypes.Stat)
|
||||
if !ok {
|
||||
// This "can't happen(tm)".
|
||||
return errors.Errorf("expected a *fsutil.Stat but got %T", info.Sys())
|
||||
}
|
||||
rd = append(rd, stat)
|
||||
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
})
|
||||
return rd, err
|
||||
}
|
||||
|
||||
func StatFile(ctx context.Context, ref cache.ImmutableRef, path string) (*fstypes.Stat, error) {
|
||||
var st *fstypes.Stat
|
||||
err := withMount(ctx, ref, func(root string) error {
|
||||
fp, err := fs.RootPath(root, path)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if st, err = fsutil.Stat(fp); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return st, err
|
||||
}
|
Reference in New Issue
Block a user