Update gomod and vendor

This commit is contained in:
Ettore Di Giacinto
2021-01-19 18:29:09 +01:00
parent dbd37afced
commit 7b25a54653
930 changed files with 183699 additions and 4609 deletions

View File

@@ -0,0 +1,142 @@
package remotecache
import (
"bytes"
"context"
"encoding/json"
"fmt"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type ResolveCacheExporterFunc func(ctx context.Context, attrs map[string]string) (Exporter, error)
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}
type Exporter interface {
solver.CacheExporterTarget
// Finalize finalizes and return metadata that are returned to the client
// e.g. ExporterResponseManifestDesc
Finalize(ctx context.Context) (map[string]string, error)
}
const (
// ExportResponseManifestDesc is a key for the map returned from Exporter.Finalize.
// The map value is a JSON string of an OCI desciptor of a manifest.
ExporterResponseManifestDesc = "cache.manifest"
)
type contentCacheExporter struct {
solver.CacheExporterTarget
chains *v1.CacheChains
ingester content.Ingester
}
func NewExporter(ingester content.Ingester) Exporter {
cc := v1.NewCacheChains()
return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester}
}
func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string, error) {
return export(ctx, ce.ingester, ce.chains)
}
func export(ctx context.Context, ingester content.Ingester, cc *v1.CacheChains) (map[string]string, error) {
res := make(map[string]string)
config, descs, err := cc.Marshal()
if err != nil {
return nil, err
}
// own type because oci type can't be pushed and docker type doesn't have annotations
type manifestList struct {
specs.Versioned
MediaType string `json:"mediaType,omitempty"`
// Manifests references platform specific manifests.
Manifests []ocispec.Descriptor `json:"manifests"`
}
var mfst manifestList
mfst.SchemaVersion = 2
mfst.MediaType = images.MediaTypeDockerSchema2ManifestList
for _, l := range config.Layers {
dgstPair, ok := descs[l.Blob]
if !ok {
return nil, errors.Errorf("missing blob %s", l.Blob)
}
layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob))
if err := contentutil.Copy(ctx, ingester, dgstPair.Provider, dgstPair.Descriptor); err != nil {
return nil, layerDone(errors.Wrap(err, "error writing layer blob"))
}
layerDone(nil)
mfst.Manifests = append(mfst.Manifests, dgstPair.Descriptor)
}
dt, err := json.Marshal(config)
if err != nil {
return nil, err
}
dgst := digest.FromBytes(dt)
desc := ocispec.Descriptor{
Digest: dgst,
Size: int64(len(dt)),
MediaType: v1.CacheConfigMediaTypeV0,
}
configDone := oneOffProgress(ctx, fmt.Sprintf("writing config %s", dgst))
if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return nil, configDone(errors.Wrap(err, "error writing config blob"))
}
configDone(nil)
mfst.Manifests = append(mfst.Manifests, desc)
dt, err = json.Marshal(mfst)
if err != nil {
return nil, errors.Wrap(err, "failed to marshal manifest")
}
dgst = digest.FromBytes(dt)
desc = ocispec.Descriptor{
Digest: dgst,
Size: int64(len(dt)),
MediaType: mfst.MediaType,
}
mfstDone := oneOffProgress(ctx, fmt.Sprintf("writing manifest %s", dgst))
if err := content.WriteBlob(ctx, ingester, dgst.String(), bytes.NewReader(dt), desc); err != nil {
return nil, mfstDone(errors.Wrap(err, "error writing manifest blob"))
}
descJSON, err := json.Marshal(desc)
if err != nil {
return nil, err
}
res[ExporterResponseManifestDesc] = string(descJSON)
mfstDone(nil)
return res, nil
}

View File

@@ -0,0 +1,299 @@
package remotecache
import (
"context"
"encoding/json"
"io"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
// ResolveCacheImporterFunc returns importer and descriptor.
type ResolveCacheImporterFunc func(ctx context.Context, attrs map[string]string) (Importer, ocispec.Descriptor, error)
type Importer interface {
Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error)
}
type DistributionSourceLabelSetter interface {
SetDistributionSourceLabel(context.Context, digest.Digest) error
SetDistributionSourceAnnotation(desc ocispec.Descriptor) ocispec.Descriptor
}
func NewImporter(provider content.Provider) Importer {
return &contentCacheImporter{provider: provider}
}
type contentCacheImporter struct {
provider content.Provider
}
func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) {
dt, err := readBlob(ctx, ci.provider, desc)
if err != nil {
return nil, err
}
var mfst ocispec.Index
if err := json.Unmarshal(dt, &mfst); err != nil {
return nil, err
}
allLayers := v1.DescriptorProvider{}
var configDesc ocispec.Descriptor
for _, m := range mfst.Manifests {
if m.MediaType == v1.CacheConfigMediaTypeV0 {
configDesc = m
continue
}
allLayers[m.Digest] = v1.DescriptorProviderPair{
Descriptor: m,
Provider: ci.provider,
}
}
if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok {
for dgst, l := range allLayers {
err := dsls.SetDistributionSourceLabel(ctx, dgst)
_ = err // error ignored because layer may not exist
l.Descriptor = dsls.SetDistributionSourceAnnotation(l.Descriptor)
allLayers[dgst] = l
}
}
if configDesc.Digest == "" {
return ci.importInlineCache(ctx, dt, id, w)
}
dt, err = readBlob(ctx, ci.provider, configDesc)
if err != nil {
return nil, err
}
cc := v1.NewCacheChains()
if err := v1.Parse(dt, allLayers, cc); err != nil {
return nil, err
}
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
if err != nil {
return nil, err
}
return solver.NewCacheManager(id, keysStorage, resultStorage), nil
}
func readBlob(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]byte, error) {
maxBlobSize := int64(1 << 20)
if desc.Size > maxBlobSize {
return nil, errors.Errorf("blob %s is too large (%d > %d)", desc.Digest, desc.Size, maxBlobSize)
}
dt, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
// NOTE: even if err == EOF, we might have got expected dt here.
// For instance, http.Response.Body is known to return non-zero bytes with EOF.
if err == io.EOF {
if dtDigest := desc.Digest.Algorithm().FromBytes(dt); dtDigest != desc.Digest {
err = errors.Wrapf(err, "got EOF, expected %s (%d bytes), got %s (%d bytes)",
desc.Digest, desc.Size, dtDigest, len(dt))
} else {
err = nil
}
}
}
return dt, errors.WithStack(err)
}
func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte, id string, w worker.Worker) (solver.CacheManager, error) {
m := map[digest.Digest][]byte{}
if err := ci.allDistributionManifests(ctx, dt, m); err != nil {
return nil, err
}
var mu sync.Mutex
var cMap = map[digest.Digest]*v1.CacheChains{}
eg, ctx := errgroup.WithContext(ctx)
for dgst, dt := range m {
func(dgst digest.Digest, dt []byte) {
eg.Go(func() error {
var m ocispec.Manifest
if err := json.Unmarshal(dt, &m); err != nil {
return errors.WithStack(err)
}
if m.Config.Digest == "" || len(m.Layers) == 0 {
return nil
}
if dsls, ok := ci.provider.(DistributionSourceLabelSetter); ok {
for i, l := range m.Layers {
err := dsls.SetDistributionSourceLabel(ctx, l.Digest)
_ = err // error ignored because layer may not exist
m.Layers[i] = dsls.SetDistributionSourceAnnotation(l)
}
}
p, err := content.ReadBlob(ctx, ci.provider, m.Config)
if err != nil {
return errors.WithStack(err)
}
var img image
if err := json.Unmarshal(p, &img); err != nil {
return errors.WithStack(err)
}
if len(img.Rootfs.DiffIDs) != len(m.Layers) {
logrus.Warnf("invalid image with mismatching manifest and config")
return nil
}
if img.Cache == nil {
return nil
}
var config v1.CacheConfig
if err := json.Unmarshal(img.Cache, &config.Records); err != nil {
return errors.WithStack(err)
}
createdDates, createdMsg, err := parseCreatedLayerInfo(img)
if err != nil {
return err
}
layers := v1.DescriptorProvider{}
for i, m := range m.Layers {
if m.Annotations == nil {
m.Annotations = map[string]string{}
}
if createdAt := createdDates[i]; createdAt != "" {
m.Annotations["buildkit/createdat"] = createdAt
}
if createdBy := createdMsg[i]; createdBy != "" {
m.Annotations["buildkit/description"] = createdBy
}
m.Annotations["containerd.io/uncompressed"] = img.Rootfs.DiffIDs[i].String()
layers[m.Digest] = v1.DescriptorProviderPair{
Descriptor: m,
Provider: ci.provider,
}
config.Layers = append(config.Layers, v1.CacheLayer{
Blob: m.Digest,
ParentIndex: i - 1,
})
}
dt, err = json.Marshal(config)
if err != nil {
return errors.WithStack(err)
}
cc := v1.NewCacheChains()
if err := v1.ParseConfig(config, layers, cc); err != nil {
return err
}
mu.Lock()
cMap[dgst] = cc
mu.Unlock()
return nil
})
}(dgst, dt)
}
if err := eg.Wait(); err != nil {
return nil, err
}
cms := make([]solver.CacheManager, 0, len(cMap))
for _, cc := range cMap {
keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w)
if err != nil {
return nil, err
}
cms = append(cms, solver.NewCacheManager(id, keysStorage, resultStorage))
}
return solver.NewCombinedCacheManager(cms, nil), nil
}
func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt []byte, m map[digest.Digest][]byte) error {
mt, err := imageutil.DetectManifestBlobMediaType(dt)
if err != nil {
return err
}
switch mt {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
m[digest.FromBytes(dt)] = dt
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
var index ocispec.Index
if err := json.Unmarshal(dt, &index); err != nil {
return errors.WithStack(err)
}
for _, d := range index.Manifests {
if _, ok := m[d.Digest]; ok {
continue
}
p, err := content.ReadBlob(ctx, ci.provider, d)
if err != nil {
return errors.WithStack(err)
}
if err := ci.allDistributionManifests(ctx, p, m); err != nil {
return err
}
}
}
return nil
}
type image struct {
Rootfs struct {
DiffIDs []digest.Digest `json:"diff_ids"`
} `json:"rootfs"`
Cache []byte `json:"moby.buildkit.cache.v0"`
History []struct {
Created *time.Time `json:"created,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
EmptyLayer bool `json:"empty_layer,omitempty"`
} `json:"history,omitempty"`
}
func parseCreatedLayerInfo(img image) ([]string, []string, error) {
dates := make([]string, 0, len(img.Rootfs.DiffIDs))
createdBy := make([]string, 0, len(img.Rootfs.DiffIDs))
for _, h := range img.History {
if !h.EmptyLayer {
str := ""
if h.Created != nil {
dt, err := h.Created.MarshalText()
if err != nil {
return nil, nil, err
}
str = string(dt)
}
dates = append(dates, str)
createdBy = append(createdBy, h.CreatedBy)
}
}
return dates, createdBy, nil
}

View File

@@ -0,0 +1,106 @@
package registry
import (
"context"
"encoding/json"
"github.com/moby/buildkit/cache/remotecache"
v1 "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc {
return func(ctx context.Context, _ map[string]string) (remotecache.Exporter, error) {
return NewExporter(), nil
}
}
func NewExporter() remotecache.Exporter {
cc := v1.NewCacheChains()
return &exporter{CacheExporterTarget: cc, chains: cc}
}
type exporter struct {
solver.CacheExporterTarget
chains *v1.CacheChains
}
func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) {
return nil, nil
}
func (ce *exporter) reset() {
cc := v1.NewCacheChains()
ce.CacheExporterTarget = cc
ce.chains = cc
}
func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) {
config, descs, err := ce.chains.Marshal()
if err != nil {
return nil, err
}
descs2 := map[digest.Digest]v1.DescriptorProviderPair{}
for _, k := range layers {
if v, ok := descs[k]; ok {
descs2[k] = v
continue
}
// fallback for uncompressed digests
for _, v := range descs {
if uc := v.Descriptor.Annotations["containerd.io/uncompressed"]; uc == string(k) {
descs2[v.Descriptor.Digest] = v
}
}
}
cc := v1.NewCacheChains()
if err := v1.ParseConfig(*config, descs2, cc); err != nil {
return nil, err
}
cfg, _, err := cc.Marshal()
if err != nil {
return nil, err
}
if len(cfg.Layers) == 0 {
logrus.Warn("failed to match any cache with layers")
return nil, nil
}
cache := map[int]int{}
// reorder layers based on the order in the image
for i, r := range cfg.Records {
for j, rr := range r.Results {
n := getSortedLayerIndex(rr.LayerIndex, cfg.Layers, cache)
rr.LayerIndex = n
r.Results[j] = rr
cfg.Records[i] = r
}
}
dt, err := json.Marshal(cfg.Records)
if err != nil {
return nil, err
}
ce.reset()
return dt, nil
}
func getSortedLayerIndex(idx int, layers []v1.CacheLayer, cache map[int]int) int {
if idx == -1 {
return -1
}
l := layers[idx]
if i, ok := cache[idx]; ok {
return i
}
cache[idx] = getSortedLayerIndex(l.ParentIndex, layers, cache) + 1
return cache[idx]
}

View File

@@ -0,0 +1,83 @@
package local
import (
"context"
"time"
"github.com/containerd/containerd/content"
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/session"
sessioncontent "github.com/moby/buildkit/session/content"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
const (
attrDigest = "digest"
attrSrc = "src"
attrDest = "dest"
contentStoreIDPrefix = "local:"
)
// ResolveCacheExporterFunc for "local" cache exporter.
func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExporterFunc {
return func(ctx context.Context, attrs map[string]string) (remotecache.Exporter, error) {
store := attrs[attrDest]
if store == "" {
return nil, errors.New("local cache exporter requires dest")
}
csID := contentStoreIDPrefix + store
cs, err := getContentStore(ctx, sm, csID)
if err != nil {
return nil, err
}
return remotecache.NewExporter(cs), nil
}
}
// ResolveCacheImporterFunc for "local" cache importer.
func ResolveCacheImporterFunc(sm *session.Manager) remotecache.ResolveCacheImporterFunc {
return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
dgstStr := attrs[attrDigest]
if dgstStr == "" {
return nil, specs.Descriptor{}, errors.New("local cache importer requires explicit digest")
}
dgst := digest.Digest(dgstStr)
store := attrs[attrSrc]
if store == "" {
return nil, specs.Descriptor{}, errors.New("local cache importer requires src")
}
csID := contentStoreIDPrefix + store
cs, err := getContentStore(ctx, sm, csID)
if err != nil {
return nil, specs.Descriptor{}, err
}
info, err := cs.Info(ctx, dgst)
if err != nil {
return nil, specs.Descriptor{}, err
}
desc := specs.Descriptor{
// MediaType is typically MediaTypeDockerSchema2ManifestList,
// but we leave it empty until we get correct support for local index.json
Digest: dgst,
Size: info.Size,
}
return remotecache.NewImporter(cs), desc, nil
}
}
func getContentStore(ctx context.Context, sm *session.Manager, storeID string) (content.Store, error) {
sessionID := session.FromContext(ctx)
if sessionID == "" {
return nil, errors.New("local cache exporter/importer requires session")
}
timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
caller, err := sm.Get(timeoutCtx, sessionID)
if err != nil {
return nil, err
}
return sessioncontent.NewCallerStore(caller, storeID), nil
}

View File

@@ -0,0 +1,96 @@
package registry
import (
"context"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes/docker"
"github.com/docker/distribution/reference"
"github.com/moby/buildkit/cache/remotecache"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/resolver"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func canonicalizeRef(rawRef string) (string, error) {
if rawRef == "" {
return "", errors.New("missing ref")
}
parsed, err := reference.ParseNormalizedNamed(rawRef)
if err != nil {
return "", err
}
return reference.TagNameOnly(parsed).String(), nil
}
const (
attrRef = "ref"
)
func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) remotecache.ResolveCacheExporterFunc {
return func(ctx context.Context, attrs map[string]string) (remotecache.Exporter, error) {
ref, err := canonicalizeRef(attrs[attrRef])
if err != nil {
return nil, err
}
remote := resolver.New(ctx, hosts, sm)
pusher, err := remote.Pusher(ctx, ref)
if err != nil {
return nil, err
}
return remotecache.NewExporter(contentutil.FromPusher(pusher)), nil
}
}
func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docker.RegistryHosts) remotecache.ResolveCacheImporterFunc {
return func(ctx context.Context, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) {
ref, err := canonicalizeRef(attrs[attrRef])
if err != nil {
return nil, specs.Descriptor{}, err
}
remote := resolver.New(ctx, hosts, sm)
xref, desc, err := remote.Resolve(ctx, ref)
if err != nil {
return nil, specs.Descriptor{}, err
}
fetcher, err := remote.Fetcher(ctx, xref)
if err != nil {
return nil, specs.Descriptor{}, err
}
src := &withDistributionSourceLabel{
Provider: contentutil.FromFetcher(fetcher),
ref: ref,
source: cs,
}
return remotecache.NewImporter(src), desc, nil
}
}
type withDistributionSourceLabel struct {
content.Provider
ref string
source content.Manager
}
var _ remotecache.DistributionSourceLabelSetter = &withDistributionSourceLabel{}
func (dsl *withDistributionSourceLabel) SetDistributionSourceLabel(ctx context.Context, dgst digest.Digest) error {
hf, err := docker.AppendDistributionSourceLabel(dsl.source, dsl.ref)
if err != nil {
return err
}
_, err = hf(ctx, ocispec.Descriptor{Digest: dgst})
return err
}
func (dsl *withDistributionSourceLabel) SetDistributionSourceAnnotation(desc ocispec.Descriptor) ocispec.Descriptor {
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
desc.Annotations["containerd.io/distribution.source.ref"] = dsl.ref
return desc
}

View File

@@ -0,0 +1,297 @@
package cacheimport
import (
"context"
"time"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/worker"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
func NewCacheKeyStorage(cc *CacheChains, w worker.Worker) (solver.CacheKeyStorage, solver.CacheResultStorage, error) {
storage := &cacheKeyStorage{
byID: map[string]*itemWithOutgoingLinks{},
byItem: map[*item]string{},
byResult: map[string]map[string]struct{}{},
}
for _, it := range cc.items {
if _, err := addItemToStorage(storage, it); err != nil {
return nil, nil, err
}
}
results := &cacheResultStorage{
w: w,
byID: storage.byID,
byItem: storage.byItem,
byResult: storage.byResult,
}
return storage, results, nil
}
func addItemToStorage(k *cacheKeyStorage, it *item) (*itemWithOutgoingLinks, error) {
if id, ok := k.byItem[it]; ok {
if id == "" {
return nil, errors.Errorf("invalid loop")
}
return k.byID[id], nil
}
var id string
if len(it.links) == 0 {
id = it.dgst.String()
} else {
id = identity.NewID()
}
k.byItem[it] = ""
for i, m := range it.links {
for l := range m {
src, err := addItemToStorage(k, l.src)
if err != nil {
return nil, err
}
cl := nlink{
input: i,
dgst: it.dgst,
selector: l.selector,
}
src.links[cl] = append(src.links[cl], id)
}
}
k.byItem[it] = id
itl := &itemWithOutgoingLinks{
item: it,
links: map[nlink][]string{},
}
k.byID[id] = itl
if res := it.result; res != nil {
resultID := remoteID(res)
ids, ok := k.byResult[resultID]
if !ok {
ids = map[string]struct{}{}
k.byResult[resultID] = ids
}
ids[id] = struct{}{}
}
return itl, nil
}
type cacheKeyStorage struct {
byID map[string]*itemWithOutgoingLinks
byItem map[*item]string
byResult map[string]map[string]struct{}
}
type itemWithOutgoingLinks struct {
*item
links map[nlink][]string
}
func (cs *cacheKeyStorage) Exists(id string) bool {
_, ok := cs.byID[id]
return ok
}
func (cs *cacheKeyStorage) Walk(func(id string) error) error {
return nil
}
func (cs *cacheKeyStorage) WalkResults(id string, fn func(solver.CacheResult) error) error {
it, ok := cs.byID[id]
if !ok {
return nil
}
if res := it.result; res != nil {
return fn(solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime})
}
return nil
}
func (cs *cacheKeyStorage) Load(id string, resultID string) (solver.CacheResult, error) {
it, ok := cs.byID[id]
if !ok {
return solver.CacheResult{}, nil
}
if res := it.result; res != nil {
return solver.CacheResult{ID: remoteID(res), CreatedAt: it.resultTime}, nil
}
return solver.CacheResult{}, nil
}
func (cs *cacheKeyStorage) AddResult(id string, res solver.CacheResult) error {
return nil
}
func (cs *cacheKeyStorage) Release(resultID string) error {
return nil
}
func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error {
return nil
}
func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error {
it, ok := cs.byID[id]
if !ok {
return nil
}
for _, id := range it.links[nlink{
dgst: outputKey(link.Digest, int(link.Output)),
input: int(link.Input),
selector: link.Selector.String(),
}] {
if err := fn(id); err != nil {
return err
}
}
return nil
}
func (cs *cacheKeyStorage) WalkBacklinks(id string, fn func(id string, link solver.CacheInfoLink) error) error {
for k, it := range cs.byID {
for nl, ids := range it.links {
for _, id2 := range ids {
if id == id2 {
if err := fn(k, solver.CacheInfoLink{
Input: solver.Index(nl.input),
Selector: digest.Digest(nl.selector),
Digest: nl.dgst,
}); err != nil {
return err
}
}
}
}
}
return nil
}
func (cs *cacheKeyStorage) WalkIDsByResult(id string, fn func(id string) error) error {
ids := cs.byResult[id]
for id := range ids {
if err := fn(id); err != nil {
return err
}
}
return nil
}
func (cs *cacheKeyStorage) HasLink(id string, link solver.CacheInfoLink, target string) bool {
l := nlink{
dgst: outputKey(link.Digest, int(link.Output)),
input: int(link.Input),
selector: link.Selector.String(),
}
if it, ok := cs.byID[id]; ok {
for _, id := range it.links[l] {
if id == target {
return true
}
}
}
return false
}
type cacheResultStorage struct {
w worker.Worker
byID map[string]*itemWithOutgoingLinks
byResult map[string]map[string]struct{}
byItem map[*item]string
}
func (cs *cacheResultStorage) Save(res solver.Result, createdAt time.Time) (solver.CacheResult, error) {
return solver.CacheResult{}, errors.Errorf("importer is immutable")
}
func (cs *cacheResultStorage) LoadWithParents(ctx context.Context, res solver.CacheResult) (map[string]solver.Result, error) {
v := cs.byResultID(res.ID)
if v == nil || v.result == nil {
return nil, errors.WithStack(solver.ErrNotFound)
}
m := map[string]solver.Result{}
visited := make(map[*item]struct{})
if err := v.walkAllResults(func(i *item) error {
if i.result == nil {
return nil
}
id, ok := cs.byItem[i]
if !ok {
return nil
}
if isSubRemote(*i.result, *v.result) {
ref, err := cs.w.FromRemote(ctx, i.result)
if err != nil {
return err
}
m[id] = worker.NewWorkerRefResult(ref, cs.w)
}
return nil
}, visited); err != nil {
for _, v := range m {
v.Release(context.TODO())
}
return nil, err
}
return m, nil
}
func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) (solver.Result, error) {
item := cs.byResultID(res.ID)
if item == nil || item.result == nil {
return nil, errors.WithStack(solver.ErrNotFound)
}
ref, err := cs.w.FromRemote(ctx, item.result)
if err != nil {
return nil, errors.Wrap(err, "failed to load result from remote")
}
return worker.NewWorkerRefResult(ref, cs.w), nil
}
func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult) (*solver.Remote, error) {
if r := cs.byResultID(res.ID); r != nil && r.result != nil {
return r.result, nil
}
return nil, errors.WithStack(solver.ErrNotFound)
}
func (cs *cacheResultStorage) Exists(id string) bool {
return cs.byResultID(id) != nil
}
func (cs *cacheResultStorage) byResultID(resultID string) *itemWithOutgoingLinks {
m, ok := cs.byResult[resultID]
if !ok || len(m) == 0 {
return nil
}
for id := range m {
it, ok := cs.byID[id]
if ok {
return it
}
}
return nil
}
// unique ID per remote. this ID is not stable.
func remoteID(r *solver.Remote) string {
dgstr := digest.Canonical.Digester()
for _, desc := range r.Descriptors {
dgstr.Hash().Write([]byte(desc.Digest))
}
return dgstr.Digest().String()
}

View File

@@ -0,0 +1,158 @@
package cacheimport
import (
"strings"
"time"
"github.com/containerd/containerd/content"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
func NewCacheChains() *CacheChains {
return &CacheChains{visited: map[interface{}]struct{}{}}
}
type CacheChains struct {
items []*item
visited map[interface{}]struct{}
}
func (c *CacheChains) Add(dgst digest.Digest) solver.CacheExporterRecord {
if strings.HasPrefix(dgst.String(), "random:") {
return &nopRecord{}
}
it := &item{c: c, dgst: dgst}
c.items = append(c.items, it)
return it
}
func (c *CacheChains) Visit(v interface{}) {
c.visited[v] = struct{}{}
}
func (c *CacheChains) Visited(v interface{}) bool {
_, ok := c.visited[v]
return ok
}
func (c *CacheChains) normalize() error {
st := &normalizeState{
added: map[*item]*item{},
links: map[*item]map[nlink]map[digest.Digest]struct{}{},
byKey: map[digest.Digest]*item{},
}
for _, it := range c.items {
_, err := normalizeItem(it, st)
if err != nil {
return err
}
}
items := make([]*item, 0, len(st.byKey))
for _, it := range st.byKey {
items = append(items, it)
}
c.items = items
return nil
}
func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) {
if err := c.normalize(); err != nil {
return nil, nil, err
}
st := &marshalState{
chainsByID: map[string]int{},
descriptors: DescriptorProvider{},
recordsByItem: map[*item]int{},
}
for _, it := range c.items {
if err := marshalItem(it, st); err != nil {
return nil, nil, err
}
}
cc := CacheConfig{
Layers: st.layers,
Records: st.records,
}
sortConfig(&cc)
return &cc, st.descriptors, nil
}
type DescriptorProvider map[digest.Digest]DescriptorProviderPair
type DescriptorProviderPair struct {
Descriptor ocispec.Descriptor
Provider content.Provider
}
type item struct {
c *CacheChains
dgst digest.Digest
result *solver.Remote
resultTime time.Time
links []map[link]struct{}
}
type link struct {
src *item
selector string
}
func (c *item) AddResult(createdAt time.Time, result *solver.Remote) {
c.resultTime = createdAt
c.result = result
}
func (c *item) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
src, ok := rec.(*item)
if !ok {
return
}
for {
if index < len(c.links) {
break
}
c.links = append(c.links, map[link]struct{}{})
}
c.links[index][link{src: src, selector: selector}] = struct{}{}
}
func (c *item) walkAllResults(fn func(i *item) error, visited map[*item]struct{}) error {
if _, ok := visited[c]; ok {
return nil
}
visited[c] = struct{}{}
if err := fn(c); err != nil {
return err
}
for _, links := range c.links {
for l := range links {
if err := l.src.walkAllResults(fn, visited); err != nil {
return err
}
}
}
return nil
}
type nopRecord struct {
}
func (c *nopRecord) AddResult(createdAt time.Time, result *solver.Remote) {
}
func (c *nopRecord) LinkFrom(rec solver.CacheExporterRecord, index int, selector string) {
}
var _ solver.CacheExporterTarget = &CacheChains{}

View File

@@ -0,0 +1,50 @@
package cacheimport
// Distibutable build cache
//
// Main manifest is OCI image index
// https://github.com/opencontainers/image-spec/blob/master/image-index.md .
// Manifests array contains descriptors to the cache layers and one instance of
// build cache config with media type application/vnd.buildkit.cacheconfig.v0 .
// The cache layer descriptors need to have an annotation with uncompressed digest
// to allow deduplication on extraction and optionally "buildkit/createdat"
// annotation to support maintaining original timestamps.
//
// Cache config file layout:
//
//{
// "layers": [
// {
// "blob": "sha256:deadbeef", <- digest of layer blob in index
// "parent": -1 <- index of parent layer, -1 if no parent
// },
// {
// "blob": "sha256:deadbeef",
// "parent": 0
// }
// ],
//
// "records": [
// {
// "digest": "sha256:deadbeef", <- base digest for the record
// },
// {
// "digest": "sha256:deadbeef",
// "output": 1, <- optional output index
// "layers": [ <- optional array or layer chains
// {
// "createdat": "",
// "layer": 1, <- index to the layer
// }
// ],
// "inputs": [ <- dependant records
// [ <- index of the dependency (0)
// {
// "selector": "sel", <- optional selector
// "link": 0, <- index to the dependant record
// }
// ]
// ]
// }
// ]
// }

View File

@@ -0,0 +1,110 @@
package cacheimport
import (
"encoding/json"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/contentutil"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func Parse(configJSON []byte, provider DescriptorProvider, t solver.CacheExporterTarget) error {
var config CacheConfig
if err := json.Unmarshal(configJSON, &config); err != nil {
return errors.WithStack(err)
}
return ParseConfig(config, provider, t)
}
func ParseConfig(config CacheConfig, provider DescriptorProvider, t solver.CacheExporterTarget) error {
cache := map[int]solver.CacheExporterRecord{}
for i := range config.Records {
if _, err := parseRecord(config, i, provider, t, cache); err != nil {
return err
}
}
return nil
}
func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver.CacheExporterTarget, cache map[int]solver.CacheExporterRecord) (solver.CacheExporterRecord, error) {
if r, ok := cache[idx]; ok {
if r == nil {
return nil, errors.Errorf("invalid looping record")
}
return r, nil
}
if idx < 0 || idx >= len(cc.Records) {
return nil, errors.Errorf("invalid record ID: %d", idx)
}
rec := cc.Records[idx]
r := t.Add(rec.Digest)
cache[idx] = nil
for i, inputs := range rec.Inputs {
for _, inp := range inputs {
src, err := parseRecord(cc, inp.LinkIndex, provider, t, cache)
if err != nil {
return nil, err
}
r.LinkFrom(src, i, inp.Selector)
}
}
for _, res := range rec.Results {
visited := map[int]struct{}{}
remote, err := getRemoteChain(cc.Layers, res.LayerIndex, provider, visited)
if err != nil {
return nil, err
}
if remote != nil {
r.AddResult(res.CreatedAt, remote)
}
}
cache[idx] = r
return r, nil
}
func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, visited map[int]struct{}) (*solver.Remote, error) {
if _, ok := visited[idx]; ok {
return nil, errors.Errorf("invalid looping layer")
}
visited[idx] = struct{}{}
if idx < 0 || idx >= len(layers) {
return nil, errors.Errorf("invalid layer index %d", idx)
}
l := layers[idx]
descPair, ok := provider[l.Blob]
if !ok {
return nil, nil
}
var r *solver.Remote
if l.ParentIndex != -1 {
var err error
r, err = getRemoteChain(layers, l.ParentIndex, provider, visited)
if err != nil {
return nil, err
}
if r == nil {
return nil, nil
}
r.Descriptors = append(r.Descriptors, descPair.Descriptor)
mp := contentutil.NewMultiProvider(r.Provider)
mp.Add(descPair.Descriptor.Digest, descPair.Provider)
r.Provider = mp
return r, nil
}
return &solver.Remote{
Descriptors: []ocispec.Descriptor{descPair.Descriptor},
Provider: descPair.Provider,
}, nil
}

View File

@@ -0,0 +1,35 @@
package cacheimport
import (
"time"
digest "github.com/opencontainers/go-digest"
)
const CacheConfigMediaTypeV0 = "application/vnd.buildkit.cacheconfig.v0"
type CacheConfig struct {
Layers []CacheLayer `json:"layers,omitempty"`
Records []CacheRecord `json:"records,omitempty"`
}
type CacheLayer struct {
Blob digest.Digest `json:"blob,omitempty"`
ParentIndex int `json:"parent,omitempty"`
}
type CacheRecord struct {
Results []CacheResult `json:"layers,omitempty"`
Digest digest.Digest `json:"digest,omitempty"`
Inputs [][]CacheInput `json:"inputs,omitempty"`
}
type CacheResult struct {
LayerIndex int `json:"layer"`
CreatedAt time.Time `json:"createdAt,omitempty"`
}
type CacheInput struct {
Selector string `json:"selector,omitempty"`
LinkIndex int `json:"link"`
}

View File

@@ -0,0 +1,322 @@
package cacheimport
import (
"fmt"
"sort"
"github.com/containerd/containerd/content"
"github.com/moby/buildkit/solver"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// sortConfig sorts the config structure to make sure it is deterministic
func sortConfig(cc *CacheConfig) {
type indexedLayer struct {
oldIndex int
newIndex int
l CacheLayer
}
unsortedLayers := make([]*indexedLayer, len(cc.Layers))
sortedLayers := make([]*indexedLayer, len(cc.Layers))
for i, l := range cc.Layers {
il := &indexedLayer{oldIndex: i, l: l}
unsortedLayers[i] = il
sortedLayers[i] = il
}
sort.Slice(sortedLayers, func(i, j int) bool {
li := sortedLayers[i].l
lj := sortedLayers[j].l
if li.Blob == lj.Blob {
return li.ParentIndex < lj.ParentIndex
}
return li.Blob < lj.Blob
})
for i, l := range sortedLayers {
l.newIndex = i
}
layers := make([]CacheLayer, len(sortedLayers))
for i, l := range sortedLayers {
if pID := l.l.ParentIndex; pID != -1 {
l.l.ParentIndex = unsortedLayers[pID].newIndex
}
layers[i] = l.l
}
type indexedRecord struct {
oldIndex int
newIndex int
r CacheRecord
}
unsortedRecords := make([]*indexedRecord, len(cc.Records))
sortedRecords := make([]*indexedRecord, len(cc.Records))
for i, r := range cc.Records {
ir := &indexedRecord{oldIndex: i, r: r}
unsortedRecords[i] = ir
sortedRecords[i] = ir
}
sort.Slice(sortedRecords, func(i, j int) bool {
ri := sortedRecords[i].r
rj := sortedRecords[j].r
if ri.Digest != rj.Digest {
return ri.Digest < rj.Digest
}
if len(ri.Inputs) != len(rj.Inputs) {
return len(ri.Inputs) < len(rj.Inputs)
}
for i, inputs := range ri.Inputs {
if len(ri.Inputs[i]) != len(rj.Inputs[i]) {
return len(ri.Inputs[i]) < len(rj.Inputs[i])
}
for j := range inputs {
if ri.Inputs[i][j].Selector != rj.Inputs[i][j].Selector {
return ri.Inputs[i][j].Selector < rj.Inputs[i][j].Selector
}
inputDigesti := cc.Records[ri.Inputs[i][j].LinkIndex].Digest
inputDigestj := cc.Records[rj.Inputs[i][j].LinkIndex].Digest
if inputDigesti != inputDigestj {
return inputDigesti < inputDigestj
}
}
}
return false
})
for i, l := range sortedRecords {
l.newIndex = i
}
records := make([]CacheRecord, len(sortedRecords))
for i, r := range sortedRecords {
for j := range r.r.Results {
r.r.Results[j].LayerIndex = unsortedLayers[r.r.Results[j].LayerIndex].newIndex
}
for j, inputs := range r.r.Inputs {
for k := range inputs {
r.r.Inputs[j][k].LinkIndex = unsortedRecords[r.r.Inputs[j][k].LinkIndex].newIndex
}
sort.Slice(inputs, func(i, j int) bool {
return inputs[i].LinkIndex < inputs[j].LinkIndex
})
}
records[i] = r.r
}
cc.Layers = layers
cc.Records = records
}
func outputKey(dgst digest.Digest, idx int) digest.Digest {
return digest.FromBytes([]byte(fmt.Sprintf("%s@%d", dgst, idx)))
}
type nlink struct {
dgst digest.Digest
input int
selector string
}
type normalizeState struct {
added map[*item]*item
links map[*item]map[nlink]map[digest.Digest]struct{}
byKey map[digest.Digest]*item
next int
}
func normalizeItem(it *item, state *normalizeState) (*item, error) {
if it2, ok := state.added[it]; ok {
return it2, nil
}
if len(it.links) == 0 {
id := it.dgst
if it2, ok := state.byKey[id]; ok {
state.added[it] = it2
return it2, nil
}
state.byKey[id] = it
state.added[it] = it
return nil, nil
}
matches := map[digest.Digest]struct{}{}
// check if there is already a matching record
for i, m := range it.links {
if len(m) == 0 {
return nil, errors.Errorf("invalid incomplete links")
}
for l := range m {
nl := nlink{dgst: it.dgst, input: i, selector: l.selector}
it2, err := normalizeItem(l.src, state)
if err != nil {
return nil, err
}
links := state.links[it2][nl]
if i == 0 {
for id := range links {
matches[id] = struct{}{}
}
} else {
for id := range matches {
if _, ok := links[id]; !ok {
delete(matches, id)
}
}
}
}
}
var id digest.Digest
links := it.links
if len(matches) > 0 {
for m := range matches {
if id == "" || id > m {
id = m
}
}
} else {
// keep tmp IDs deterministic
state.next++
id = digest.FromBytes([]byte(fmt.Sprintf("%d", state.next)))
state.byKey[id] = it
it.links = make([]map[link]struct{}, len(it.links))
for i := range it.links {
it.links[i] = map[link]struct{}{}
}
}
it2 := state.byKey[id]
state.added[it] = it2
for i, m := range links {
for l := range m {
subIt, err := normalizeItem(l.src, state)
if err != nil {
return nil, err
}
it2.links[i][link{src: subIt, selector: l.selector}] = struct{}{}
nl := nlink{dgst: it.dgst, input: i, selector: l.selector}
if _, ok := state.links[subIt]; !ok {
state.links[subIt] = map[nlink]map[digest.Digest]struct{}{}
}
if _, ok := state.links[subIt][nl]; !ok {
state.links[subIt][nl] = map[digest.Digest]struct{}{}
}
state.links[subIt][nl][id] = struct{}{}
}
}
return it2, nil
}
type marshalState struct {
layers []CacheLayer
chainsByID map[string]int
descriptors DescriptorProvider
records []CacheRecord
recordsByItem map[*item]int
}
func marshalRemote(r *solver.Remote, state *marshalState) string {
if len(r.Descriptors) == 0 {
return ""
}
type Remote struct {
Descriptors []ocispec.Descriptor
Provider content.Provider
}
var parentID string
if len(r.Descriptors) > 1 {
r2 := &solver.Remote{
Descriptors: r.Descriptors[:len(r.Descriptors)-1],
Provider: r.Provider,
}
parentID = marshalRemote(r2, state)
}
desc := r.Descriptors[len(r.Descriptors)-1]
state.descriptors[desc.Digest] = DescriptorProviderPair{
Descriptor: desc,
Provider: r.Provider,
}
id := desc.Digest.String() + parentID
if _, ok := state.chainsByID[id]; ok {
return id
}
state.chainsByID[id] = len(state.layers)
l := CacheLayer{
Blob: desc.Digest,
ParentIndex: -1,
}
if parentID != "" {
l.ParentIndex = state.chainsByID[parentID]
}
state.layers = append(state.layers, l)
return id
}
func marshalItem(it *item, state *marshalState) error {
if _, ok := state.recordsByItem[it]; ok {
return nil
}
rec := CacheRecord{
Digest: it.dgst,
Inputs: make([][]CacheInput, len(it.links)),
}
for i, m := range it.links {
for l := range m {
if err := marshalItem(l.src, state); err != nil {
return err
}
idx, ok := state.recordsByItem[l.src]
if !ok {
return errors.Errorf("invalid source record: %v", l.src)
}
rec.Inputs[i] = append(rec.Inputs[i], CacheInput{
Selector: l.selector,
LinkIndex: idx,
})
}
}
if it.result != nil {
id := marshalRemote(it.result, state)
if id != "" {
idx, ok := state.chainsByID[id]
if !ok {
return errors.Errorf("parent chainid not found")
}
rec.Results = append(rec.Results, CacheResult{LayerIndex: idx, CreatedAt: it.resultTime})
}
}
state.recordsByItem[it] = len(state.records)
state.records = append(state.records, rec)
return nil
}
func isSubRemote(sub, main solver.Remote) bool {
if len(sub.Descriptors) > len(main.Descriptors) {
return false
}
for i := range sub.Descriptors {
if sub.Descriptors[i].Digest != main.Descriptors[i].Digest {
return false
}
}
return true
}