Update go.mod and vendor

This commit is contained in:
Ettore Di Giacinto
2021-06-04 11:15:10 +02:00
parent c3b552103f
commit 0658020c60
649 changed files with 18 additions and 156476 deletions

View File

@@ -1,162 +0,0 @@
package apicaps
import (
"fmt"
"sort"
"strings"
pb "github.com/moby/buildkit/util/apicaps/pb"
"github.com/pkg/errors"
)
type PBCap = pb.APICap
// ExportedProduct is the name of the product using this package.
// Users vendoring this library may override it to provide better versioning hints
// for their users (or set it with a flag to buildkitd).
var ExportedProduct string
// CapStatus defines the stability properties of a capability
type CapStatus int
const (
// CapStatusStable refers to a capability that should never be changed in
// backwards incompatible manner unless there is a serious security issue.
CapStatusStable CapStatus = iota
// CapStatusExperimental refers to a capability that may be removed in the future.
// If incompatible changes are made the previous ID is disabled and new is added.
CapStatusExperimental
// CapStatusPrerelease is same as CapStatusExperimental that can be used for new
// features before they move to stable.
CapStatusPrerelease
)
// CapID is type for capability identifier
type CapID string
// Cap describes an API feature
type Cap struct {
ID CapID
Name string // readable name, may contain spaces but keep in one sentence
Status CapStatus
Enabled bool
Deprecated bool
SupportedHint map[string]string
DisabledReason string
DisabledReasonMsg string
DisabledAlternative string
}
// CapList is a collection of capability definitions
type CapList struct {
m map[CapID]Cap
}
// Init initializes definition for a new capability.
// Not safe to be called concurrently with other methods.
func (l *CapList) Init(cc ...Cap) {
if l.m == nil {
l.m = make(map[CapID]Cap, len(cc))
}
for _, c := range cc {
l.m[c.ID] = c
}
}
// All reports the configuration of all known capabilities
func (l *CapList) All() []pb.APICap {
out := make([]pb.APICap, 0, len(l.m))
for _, c := range l.m {
out = append(out, pb.APICap{
ID: string(c.ID),
Enabled: c.Enabled,
Deprecated: c.Deprecated,
DisabledReason: c.DisabledReason,
DisabledReasonMsg: c.DisabledReasonMsg,
DisabledAlternative: c.DisabledAlternative,
})
}
sort.Slice(out, func(i, j int) bool {
return out[i].ID < out[j].ID
})
return out
}
// CapSet returns a CapSet for an capability configuration
func (l *CapList) CapSet(caps []pb.APICap) CapSet {
m := make(map[string]*pb.APICap, len(caps))
for _, c := range caps {
if c.ID != "" {
c := c // capture loop iterator
m[c.ID] = &c
}
}
return CapSet{
list: l,
set: m,
}
}
// CapSet is a configuration for detecting supported capabilities
type CapSet struct {
list *CapList
set map[string]*pb.APICap
}
// Supports returns an error if capability is not supported
func (s *CapSet) Supports(id CapID) error {
err := &CapError{ID: id}
c, ok := s.list.m[id]
if !ok {
return errors.WithStack(err)
}
err.Definition = &c
state, ok := s.set[string(id)]
if !ok {
return errors.WithStack(err)
}
err.State = state
if !state.Enabled {
return errors.WithStack(err)
}
return nil
}
// CapError is an error for unsupported capability
type CapError struct {
ID CapID
Definition *Cap
State *pb.APICap
}
func (e CapError) Error() string {
if e.Definition == nil {
return fmt.Sprintf("unknown API capability %s", e.ID)
}
typ := ""
if e.Definition.Status == CapStatusExperimental {
typ = "experimental "
}
if e.Definition.Status == CapStatusPrerelease {
typ = "prerelease "
}
name := ""
if e.Definition.Name != "" {
name = "(" + e.Definition.Name + ")"
}
b := &strings.Builder{}
fmt.Fprintf(b, "requested %sfeature %s %s", typ, e.ID, name)
if e.State == nil {
fmt.Fprint(b, " is not supported by build server")
if hint, ok := e.Definition.SupportedHint[ExportedProduct]; ok {
fmt.Fprintf(b, " (added in %s)", hint)
}
fmt.Fprintf(b, ", please update %s", ExportedProduct)
} else {
fmt.Fprint(b, " has been disabled on the build server")
if e.State.DisabledReasonMsg != "" {
fmt.Fprintf(b, ": %s", e.State.DisabledReasonMsg)
}
}
return b.String()
}

View File

@@ -1,570 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: caps.proto
package moby_buildkit_v1_apicaps
import (
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// APICap defines a capability supported by the service
type APICap struct {
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Enabled bool `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"`
Deprecated bool `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"`
DisabledReason string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"`
DisabledReasonMsg string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"`
DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *APICap) Reset() { *m = APICap{} }
func (m *APICap) String() string { return proto.CompactTextString(m) }
func (*APICap) ProtoMessage() {}
func (*APICap) Descriptor() ([]byte, []int) {
return fileDescriptor_e19c39d9fcb89b83, []int{0}
}
func (m *APICap) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *APICap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_APICap.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *APICap) XXX_Merge(src proto.Message) {
xxx_messageInfo_APICap.Merge(m, src)
}
func (m *APICap) XXX_Size() int {
return m.Size()
}
func (m *APICap) XXX_DiscardUnknown() {
xxx_messageInfo_APICap.DiscardUnknown(m)
}
var xxx_messageInfo_APICap proto.InternalMessageInfo
func (m *APICap) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *APICap) GetEnabled() bool {
if m != nil {
return m.Enabled
}
return false
}
func (m *APICap) GetDeprecated() bool {
if m != nil {
return m.Deprecated
}
return false
}
func (m *APICap) GetDisabledReason() string {
if m != nil {
return m.DisabledReason
}
return ""
}
func (m *APICap) GetDisabledReasonMsg() string {
if m != nil {
return m.DisabledReasonMsg
}
return ""
}
func (m *APICap) GetDisabledAlternative() string {
if m != nil {
return m.DisabledAlternative
}
return ""
}
func init() {
proto.RegisterType((*APICap)(nil), "moby.buildkit.v1.apicaps.APICap")
}
func init() { proto.RegisterFile("caps.proto", fileDescriptor_e19c39d9fcb89b83) }
var fileDescriptor_e19c39d9fcb89b83 = []byte{
// 236 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x4e, 0x2c, 0x28,
0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0x2a, 0xcd,
0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0xd4, 0x4b, 0x2c, 0xc8, 0x04, 0xc9, 0x4b, 0xe9,
0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb,
0x83, 0x35, 0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x48, 0xe9, 0x16, 0x23,
0x17, 0x9b, 0x63, 0x80, 0xa7, 0x73, 0x62, 0x81, 0x10, 0x1f, 0x17, 0x93, 0xa7, 0x8b, 0x04, 0xa3,
0x02, 0xa3, 0x06, 0x67, 0x10, 0x93, 0xa7, 0x8b, 0x90, 0x04, 0x17, 0xbb, 0x6b, 0x5e, 0x62, 0x52,
0x4e, 0x6a, 0x8a, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x8c, 0x2b, 0x24, 0xc7, 0xc5, 0xe5,
0x92, 0x5a, 0x50, 0x94, 0x9a, 0x9c, 0x58, 0x92, 0x9a, 0x22, 0xc1, 0x0c, 0x96, 0x44, 0x12, 0x11,
0x52, 0xe3, 0xe2, 0x73, 0xc9, 0x2c, 0x06, 0xab, 0x0d, 0x4a, 0x4d, 0x2c, 0xce, 0xcf, 0x93, 0x60,
0x01, 0x9b, 0x8a, 0x26, 0x2a, 0xa4, 0xc3, 0x25, 0x88, 0x2a, 0xe2, 0x5b, 0x9c, 0x2e, 0xc1, 0x0a,
0x56, 0x8a, 0x29, 0x21, 0x64, 0xc0, 0x25, 0x0c, 0x13, 0x74, 0xcc, 0x29, 0x49, 0x2d, 0xca, 0x4b,
0x2c, 0xc9, 0x2c, 0x4b, 0x95, 0x60, 0x03, 0xab, 0xc7, 0x26, 0xe5, 0xc4, 0x73, 0xe2, 0x91, 0x1c,
0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x26, 0xb1, 0x81, 0x7d, 0x6c, 0x0c, 0x08,
0x00, 0x00, 0xff, 0xff, 0x02, 0x2d, 0x9e, 0x91, 0x48, 0x01, 0x00, 0x00,
}
func (m *APICap) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *APICap) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *APICap) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.XXX_unrecognized != nil {
i -= len(m.XXX_unrecognized)
copy(dAtA[i:], m.XXX_unrecognized)
}
if len(m.DisabledAlternative) > 0 {
i -= len(m.DisabledAlternative)
copy(dAtA[i:], m.DisabledAlternative)
i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledAlternative)))
i--
dAtA[i] = 0x32
}
if len(m.DisabledReasonMsg) > 0 {
i -= len(m.DisabledReasonMsg)
copy(dAtA[i:], m.DisabledReasonMsg)
i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReasonMsg)))
i--
dAtA[i] = 0x2a
}
if len(m.DisabledReason) > 0 {
i -= len(m.DisabledReason)
copy(dAtA[i:], m.DisabledReason)
i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReason)))
i--
dAtA[i] = 0x22
}
if m.Deprecated {
i--
if m.Deprecated {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x18
}
if m.Enabled {
i--
if m.Enabled {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x10
}
if len(m.ID) > 0 {
i -= len(m.ID)
copy(dAtA[i:], m.ID)
i = encodeVarintCaps(dAtA, i, uint64(len(m.ID)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func encodeVarintCaps(dAtA []byte, offset int, v uint64) int {
offset -= sovCaps(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *APICap) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.ID)
if l > 0 {
n += 1 + l + sovCaps(uint64(l))
}
if m.Enabled {
n += 2
}
if m.Deprecated {
n += 2
}
l = len(m.DisabledReason)
if l > 0 {
n += 1 + l + sovCaps(uint64(l))
}
l = len(m.DisabledReasonMsg)
if l > 0 {
n += 1 + l + sovCaps(uint64(l))
}
l = len(m.DisabledAlternative)
if l > 0 {
n += 1 + l + sovCaps(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovCaps(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozCaps(x uint64) (n int) {
return sovCaps(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *APICap) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCaps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: APICap: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: APICap: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCaps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCaps
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCaps
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCaps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Enabled = bool(v != 0)
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCaps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.Deprecated = bool(v != 0)
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DisabledReason", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCaps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCaps
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCaps
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DisabledReason = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DisabledReasonMsg", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCaps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCaps
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCaps
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DisabledReasonMsg = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DisabledAlternative", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCaps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthCaps
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthCaps
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DisabledAlternative = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCaps(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthCaps
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthCaps
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipCaps(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCaps
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCaps
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowCaps
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthCaps
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupCaps
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthCaps
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthCaps = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowCaps = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupCaps = fmt.Errorf("proto: unexpected end of group")
)

View File

@@ -1,19 +0,0 @@
syntax = "proto3";
package moby.buildkit.v1.apicaps;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
// APICap defines a capability supported by the service
message APICap {
string ID = 1;
bool Enabled = 2;
bool Deprecated = 3; // Unused. May be used for warnings in the future
string DisabledReason = 4; // Reason key for detection code
string DisabledReasonMsg = 5; // Message to the user
string DisabledAlternative = 6; // Identifier that updated client could catch.
}

View File

@@ -1,3 +0,0 @@
package moby_buildkit_v1_apicaps
//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto

View File

@@ -1,41 +0,0 @@
package appcontext
import (
"context"
"os"
"os/signal"
"sync"
"github.com/sirupsen/logrus"
)
var appContextCache context.Context
var appContextOnce sync.Once
// Context returns a static context that reacts to termination signals of the
// running process. Useful in CLI tools.
func Context() context.Context {
appContextOnce.Do(func() {
signals := make(chan os.Signal, 2048)
signal.Notify(signals, terminationSignals...)
const exitLimit = 3
retries := 0
ctx, cancel := context.WithCancel(context.Background())
appContextCache = ctx
go func() {
for {
<-signals
cancel()
retries++
if retries >= exitLimit {
logrus.Errorf("got %d SIGTERM/SIGINTs, forcing shutdown", retries)
os.Exit(1)
}
}
}()
})
return appContextCache
}

View File

@@ -1,11 +0,0 @@
// +build !windows
package appcontext
import (
"os"
"golang.org/x/sys/unix"
)
var terminationSignals = []os.Signal{unix.SIGTERM, unix.SIGINT}

View File

@@ -1,7 +0,0 @@
package appcontext
import (
"os"
)
var terminationSignals = []os.Signal{os.Interrupt}

View File

@@ -1,69 +0,0 @@
// +build !windows
package appdefaults
import (
"os"
"path/filepath"
"strings"
)
const (
Address = "unix:///run/buildkit/buildkitd.sock"
Root = "/var/lib/buildkit"
ConfigDir = "/etc/buildkit"
)
// UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock
func UserAddress() string {
// pam_systemd sets XDG_RUNTIME_DIR but not other dirs.
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
if xdgRuntimeDir != "" {
dirs := strings.Split(xdgRuntimeDir, ":")
return "unix://" + filepath.Join(dirs[0], "buildkit", "buildkitd.sock")
}
return Address
}
// EnsureUserAddressDir sets sticky bit on XDG_RUNTIME_DIR if XDG_RUNTIME_DIR is set.
// See https://github.com/opencontainers/runc/issues/1694
func EnsureUserAddressDir() error {
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
if xdgRuntimeDir != "" {
dirs := strings.Split(xdgRuntimeDir, ":")
dir := filepath.Join(dirs[0], "buildkit")
if err := os.MkdirAll(dir, 0700); err != nil {
return err
}
return os.Chmod(dir, 0700|os.ModeSticky)
}
return nil
}
// UserRoot typically returns /home/$USER/.local/share/buildkit
func UserRoot() string {
// pam_systemd sets XDG_RUNTIME_DIR but not other dirs.
xdgDataHome := os.Getenv("XDG_DATA_HOME")
if xdgDataHome != "" {
dirs := strings.Split(xdgDataHome, ":")
return filepath.Join(dirs[0], "buildkit")
}
home := os.Getenv("HOME")
if home != "" {
return filepath.Join(home, ".local", "share", "buildkit")
}
return Root
}
// UserConfigDir returns dir for storing config. /home/$USER/.config/buildkit/
func UserConfigDir() string {
xdgConfigHome := os.Getenv("XDG_CONFIG_HOME")
if xdgConfigHome != "" {
return filepath.Join(xdgConfigHome, "buildkit")
}
home := os.Getenv("HOME")
if home != "" {
return filepath.Join(home, ".config", "buildkit")
}
return ConfigDir
}

View File

@@ -1,31 +0,0 @@
package appdefaults
import (
"os"
"path/filepath"
)
const (
Address = "npipe:////./pipe/buildkitd"
)
var (
Root = filepath.Join(os.Getenv("ProgramData"), "buildkitd", ".buildstate")
ConfigDir = filepath.Join(os.Getenv("ProgramData"), "buildkitd")
)
func UserAddress() string {
return Address
}
func EnsureUserAddressDir() error {
return nil
}
func UserRoot() string {
return Root
}
func UserConfigDir() string {
return ConfigDir
}

View File

@@ -1,8 +0,0 @@
// +build !386
package binfmt_misc
// This file is generated by running make inside the binfmt_misc package.
// Do not edit manually.
const Binary386 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\xd8\x31\x6e\xc2\x30\x14\x06\xe0\xdf\x8d\xdb\x26\x6a\x07\x1f\x20\xaa\x3a\x74\xe8\x64\xb5\x52\xae\x00\x2c\x88\x8d\x03\x80\x14\xc1\x94\x44\x89\x91\x60\x22\x47\x60\xe0\x20\x8c\x8c\x5c\x80\x13\x70\x19\xf4\xe2\x67\x91\x81\x25\xfb\xfb\xa4\x5f\x16\xcf\xe6\x29\xeb\x7b\xfb\xd1\x74\xac\x94\x42\xf0\x82\x08\xdd\xaf\x83\x8e\x33\x00\x7f\xc6\xd7\x33\x7c\x23\xc2\x2f\x74\xb8\x27\xad\x8e\x29\x27\x00\x14\x4d\x35\x03\x7f\x6f\x7c\x0f\x4a\x02\x80\xf2\xca\x75\x7a\x77\xa4\xb4\x3a\xa6\xa4\x00\x52\xfe\x7f\xc8\x27\xbf\x9f\xcc\xe6\xd4\xef\x42\xb5\xc7\x57\x0a\x21\x84\x10\x42\x08\x21\x84\x10\x62\x88\x33\x0d\xd5\xff\xb7\x6b\x0b\xdb\xac\x1b\x57\xbb\xc5\x12\xb6\x28\x5d\x6e\x57\xc5\xc6\x56\x75\x59\xe5\xb5\xdb\xc1\xba\x7c\xeb\x86\xf4\xfd\x00\xf0\xde\xed\x13\x78\xce\xe7\x19\x3f\xd0\x7c\x7e\xf1\x5c\xff\xc6\x3b\x07\x18\xbf\x2b\x08\x54\xef\x8c\x7a\xf5\xc4\x00\x3f\x4f\xde\xdd\x03\x00\x00\xff\xff\x8d\xf7\xd2\x72\xd0\x10\x00\x00"

View File

@@ -1,7 +0,0 @@
// +build !386
package binfmt_misc
func i386Supported() error {
return check(Binary386)
}

View File

@@ -1,7 +0,0 @@
// +build 386
package binfmt_misc
func i386Supported() error {
return nil
}

View File

@@ -1,56 +0,0 @@
FROM debian:buster-slim AS base
RUN apt-get update && apt-get --no-install-recommends install -y \
binutils-arm-linux-gnueabihf \
binutils-aarch64-linux-gnu \
binutils-x86-64-linux-gnu \
binutils-i686-linux-gnu \
binutils-riscv64-linux-gnu \
binutils-s390x-linux-gnu \
binutils-powerpc64le-linux-gnu
WORKDIR /src
FROM base AS exit-amd64
COPY fixtures/exit.amd64.s .
RUN x86_64-linux-gnu-as -o exit.o exit.amd64.s && x86_64-linux-gnu-ld -o exit -s exit.o
FROM base AS exit-386
COPY fixtures/exit.386.s .
RUN i686-linux-gnu-as -o exit.o exit.386.s && i686-linux-gnu-ld -o exit -s exit.o
FROM base AS exit-arm64
COPY fixtures/exit.arm64.s .
RUN aarch64-linux-gnu-as -o exit.o exit.arm64.s && aarch64-linux-gnu-ld -o exit -s exit.o
FROM base AS exit-arm
COPY fixtures/exit.arm.s .
RUN arm-linux-gnueabihf-as -o exit.o exit.arm.s && arm-linux-gnueabihf-ld -o exit -s exit.o
FROM base AS exit-riscv64
COPY fixtures/exit.riscv64.s .
RUN riscv64-linux-gnu-as -o exit.o exit.riscv64.s && riscv64-linux-gnu-ld -o exit -s exit.o
FROM base AS exit-s390x
COPY fixtures/exit.s390x.s .
RUN s390x-linux-gnu-as -o exit.o exit.s390x.s && s390x-linux-gnu-ld -o exit -s exit.o
FROM base AS exit-ppc64le
COPY fixtures/exit.ppc64le.s .
RUN powerpc64le-linux-gnu-as -o exit.o exit.ppc64le.s && powerpc64le-linux-gnu-ld -o exit -s exit.o
FROM golang:1.13-alpine AS generate
WORKDIR /src
COPY --from=exit-amd64 /src/exit amd64
COPY --from=exit-386 /src/exit 386
COPY --from=exit-arm64 /src/exit arm64
COPY --from=exit-arm /src/exit arm
COPY --from=exit-riscv64 /src/exit riscv64
COPY --from=exit-s390x /src/exit s390x
COPY --from=exit-ppc64le /src/exit ppc64le
COPY generate.go .
RUN go run generate.go amd64 386 arm64 arm riscv64 s390x ppc64le && ls -l
FROM scratch
COPY --from=generate /src/*_binary.go /

View File

@@ -1,4 +0,0 @@
generate:
buildctl build --frontend dockerfile.v0 --local context=. --local dockerfile=. --output type=local,dest=.
.PHONY: generate

View File

@@ -1,8 +0,0 @@
// +build !amd64
package binfmt_misc
// This file is generated by running make inside the binfmt_misc package.
// Do not edit manually.
const Binaryamd64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x3b\x06\x30\x4f\xc0\x01\xcc\x77\x80\x8a\x1b\x08\xc0\x95\x30\x38\x30\x58\x30\x30\x33\x38\x30\xb0\x30\x30\x83\xd5\xb2\x30\x20\x03\x07\x14\x9a\x03\x6a\x34\x8c\x66\x80\x9a\x03\xe2\xb2\x22\xf1\x61\xf6\xc1\x68\x1e\xa8\x30\x8c\x86\xa9\x63\x81\xe2\x17\x50\xe1\x17\x50\x7b\x60\xb4\x02\x54\x1c\x46\x73\x30\x20\xf4\x09\x40\xed\x74\xf7\x0b\x05\xd9\x7f\x80\x05\xea\x8e\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\x14\x8c\x82\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\xb8\x03\x8f\xe3\x07\x6c\x40\x94\xe1\x7f\x7e\x56\x06\xbd\xe2\x8c\xe2\x92\xa2\x92\xc4\x24\x06\xbd\xbc\xfc\x92\x54\xbd\xf4\xbc\x52\xbd\x82\xa2\xfc\x82\xd4\xa2\x92\x4a\x06\xbd\x92\xd4\x8a\x12\x8a\xed\xe3\x66\x60\x60\x60\x07\x8f\x33\xa0\xf7\xdf\x51\xfb\xed\x0c\x68\xfd\x77\x18\x90\x83\xf6\xd9\xd9\x18\xd0\xc7\x0d\xd0\xc6\x0b\x18\x10\xe3\x0c\xe8\x7c\x66\x2c\xee\xe2\x81\xea\x57\x21\xa0\x1f\x10\x00\x00\xff\xff\x8a\x1b\xd7\x73\x30\x11\x00\x00"

View File

@@ -1,7 +0,0 @@
// +build !amd64
package binfmt_misc
func amd64Supported() error {
return check(Binaryamd64)
}

View File

@@ -1,7 +0,0 @@
// +build amd64
package binfmt_misc
func amd64Supported() error {
return nil
}

View File

@@ -1,8 +0,0 @@
// +build !arm64
package binfmt_misc
// This file is generated by running make inside the binfmt_misc package.
// Do not edit manually.
const Binaryarm64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x86\xed\x0c\x20\x5e\x05\x83\x03\x98\xef\x00\x15\x9f\xc1\x80\x00\x0e\x0c\x16\x0c\x8c\x0c\x0e\x0c\xcc\x0c\x4c\x60\xb5\xac\x0c\x0c\x28\xb2\xc8\x74\x0b\x94\xd7\x02\x97\x87\xd9\xd5\x70\x69\x05\x77\xc3\x25\x46\x06\x86\x2b\x0c\x7a\xc5\x19\xc5\x25\x45\x25\x89\x49\x0c\x7a\x25\xa9\x15\x25\x0c\x54\x00\xdc\x50\x9b\xd8\xa0\x7c\x98\x7f\x2a\xa0\x7c\x1e\x34\xf5\x2c\x68\x7c\x90\x5e\x66\x2c\xe6\xc2\xfc\x21\x88\x45\x3d\x32\x00\x04\x00\x00\xff\xff\xe7\x30\x54\x02\x58\x01\x00\x00"

View File

@@ -1,7 +0,0 @@
// +build !arm64
package binfmt_misc
func arm64Supported() error {
return check(Binaryarm64)
}

View File

@@ -1,7 +0,0 @@
// +build arm64
package binfmt_misc
func arm64Supported() error {
return nil
}

View File

@@ -1,8 +0,0 @@
// +build !arm
package binfmt_misc
// This file is generated by running make inside the binfmt_misc package.
// Do not edit manually.
const Binaryarm = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\x8c\x8e\x31\x0e\x82\x40\x14\x44\xdf\x17\x50\x13\x6d\xf4\x04\xda\x51\x6d\xc5\x05\x28\xb4\xd2\xc6\x70\x00\x97\x84\x44\x3a\x02\xdf\xc4\xce\x4b\x78\x00\xee\xc6\x01\xbc\x82\x01\x17\xdd\xc2\xc2\x49\x26\x93\x7d\x3b\xc9\x9f\xfb\xee\xb0\x17\x11\x46\x4d\x88\xe9\x5f\x19\x42\x02\x3c\xde\x30\x4a\xd8\x20\xc4\x84\x04\x7c\xdb\x32\xf8\x0c\x83\xa3\x0f\x6b\x3b\xa9\xda\x0e\x78\xa6\x2b\xc0\x16\x36\x2f\x91\x19\x30\x17\x4c\x73\x69\xb4\x56\x9b\x63\xb4\xb8\x29\x26\x3d\x1d\x8d\x55\xad\xcb\xfc\xaa\x45\xc3\xdf\x5a\xb8\x6b\x53\xb7\x37\x03\x96\xde\x7f\xe8\xb2\x9f\x10\x40\x35\xf2\x7e\xeb\xda\xeb\x89\x97\x81\xc7\x6b\x60\xfb\xa3\xf7\x0a\x00\x00\xff\xff\x73\x8f\xca\xf1\x34\x01\x00\x00"

View File

@@ -1,7 +0,0 @@
// +build !arm
package binfmt_misc
func armSupported() error {
return check(Binaryarm)
}

View File

@@ -1,7 +0,0 @@
// +build arm
package binfmt_misc
func armSupported() error {
return nil
}

View File

@@ -1,42 +0,0 @@
package binfmt_misc
import (
"bytes"
"compress/gzip"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
)
func check(bin string) error {
tmpdir, err := ioutil.TempDir("", "qemu-check")
if err != nil {
return err
}
defer os.RemoveAll(tmpdir)
pp := filepath.Join(tmpdir, "check")
r, err := gzip.NewReader(bytes.NewReader([]byte(bin)))
if err != nil {
return err
}
defer r.Close()
f, err := os.OpenFile(pp, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700)
if err != nil {
return err
}
if _, err := io.Copy(f, r); err != nil {
f.Close()
return err
}
f.Close()
cmd := exec.Command("/check")
withChroot(cmd, tmpdir)
err = cmd.Run()
return err
}

View File

@@ -1,14 +0,0 @@
// +build !windows
package binfmt_misc
import (
"os/exec"
"syscall"
)
func withChroot(cmd *exec.Cmd, dir string) {
cmd.SysProcAttr = &syscall.SysProcAttr{
Chroot: dir,
}
}

View File

@@ -1,10 +0,0 @@
// +build windows
package binfmt_misc
import (
"os/exec"
)
func withChroot(cmd *exec.Cmd, dir string) {
}

View File

@@ -1,134 +0,0 @@
package binfmt_misc
import (
"strings"
"sync"
"github.com/containerd/containerd/platforms"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
)
var mu sync.Mutex
var arr []string
func SupportedPlatforms(noCache bool) []string {
mu.Lock()
defer mu.Unlock()
if !noCache && arr != nil {
return arr
}
def := defaultPlatform()
arr = append([]string{}, def)
if p := "linux/amd64"; def != p && amd64Supported() == nil {
arr = append(arr, p)
}
if p := "linux/arm64"; def != p && arm64Supported() == nil {
arr = append(arr, p)
}
if p := "linux/riscv64"; def != p && riscv64Supported() == nil {
arr = append(arr, p)
}
if p := "linux/ppc64le"; def != p && ppc64leSupported() == nil {
arr = append(arr, p)
}
if p := "linux/s390x"; def != p && s390xSupported() == nil {
arr = append(arr, p)
}
if p := "linux/386"; def != p && i386Supported() == nil {
arr = append(arr, p)
}
if !strings.HasPrefix(def, "linux/arm/") && armSupported() == nil {
arr = append(arr, "linux/arm/v7", "linux/arm/v6")
} else if def == "linux/arm/v7" {
arr = append(arr, "linux/arm/v6")
}
return arr
}
func Check(pp specs.Platform) bool {
p := platforms.Format(pp)
if p == "linux/amd64" && amd64Supported() == nil {
return true
}
if p == "linux/arm64" && arm64Supported() == nil {
return true
}
if p == "linux/riscv64" && riscv64Supported() == nil {
return true
}
if p == "linux/ppc64le" && ppc64leSupported() == nil {
return true
}
if p == "linux/s390x" && s390xSupported() == nil {
return true
}
if p == "linux/386" && i386Supported() == nil {
return true
}
if !strings.HasPrefix(p, "linux/arm/") && armSupported() == nil {
return true
}
return false
}
//WarnIfUnsupported validates the platforms and show warning message if there is,
//the end user could fix the issue based on those warning, and thus no need to drop
//the platform from the candidates.
func WarnIfUnsupported(pfs []string) {
def := defaultPlatform()
for _, p := range pfs {
if p != def {
if p == "linux/amd64" {
if err := amd64Supported(); err != nil {
printPlatfromWarning(p, err)
}
}
if p == "linux/arm64" {
if err := arm64Supported(); err != nil {
printPlatfromWarning(p, err)
}
}
if p == "linux/riscv64" {
if err := riscv64Supported(); err != nil {
printPlatfromWarning(p, err)
}
}
if p == "linux/ppc64le" {
if err := ppc64leSupported(); err != nil {
printPlatfromWarning(p, err)
}
}
if p == "linux/s390x" {
if err := s390xSupported(); err != nil {
printPlatfromWarning(p, err)
}
}
if p == "linux/386" {
if err := i386Supported(); err != nil {
printPlatfromWarning(p, err)
}
}
if strings.HasPrefix(p, "linux/arm/v6") || strings.HasPrefix(p, "linux/arm/v7") {
if err := armSupported(); err != nil {
printPlatfromWarning(p, err)
}
}
}
}
}
func defaultPlatform() string {
return platforms.Format(platforms.Normalize(platforms.DefaultSpec()))
}
func printPlatfromWarning(p string, err error) {
if strings.Contains(err.Error(), "exec format error") {
logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", p)
} else if strings.Contains(err.Error(), "no such file or directory") {
logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'binfmt_misc'.", p)
} else {
logrus.Warnf("platforms %s cannot pass the validation: %s", p, err.Error())
}
}

View File

@@ -1,8 +0,0 @@
// +build !ppc64le
package binfmt_misc
// This file is generated by running make inside the binfmt_misc package.
// Do not edit manually.
const Binaryppc64le = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x51\x06\x10\xaf\x82\x81\x41\x00\xc4\x77\x80\x8a\x2f\x80\xcb\x83\xc4\x2c\x18\x18\x19\x1c\x18\x58\x18\x98\xc1\x6a\x59\x19\x50\x80\x00\x32\xdd\x02\xe5\xb4\xc0\xa5\x19\x61\xa4\x05\x03\x43\x82\x05\x13\x03\x83\x0b\x83\x5e\x71\x46\x71\x49\x51\x49\x62\x12\x83\x5e\x49\x6a\x45\x09\x83\x5e\x6a\x46\x7c\x5a\x51\x62\x6e\x2a\x03\xc5\x80\x1b\x6a\x23\x1b\x94\x0f\xf3\x57\x05\x94\xcf\x83\xa6\x9e\x03\x8d\x2f\x08\xd5\xcf\x84\xf0\x87\x00\xaa\x7f\x50\x01\x0b\x1a\x1f\xa4\x97\x19\x8b\x3a\x98\x7e\x69\x2c\xea\x91\x01\x20\x00\x00\xff\xff\xce\xf7\x15\x75\xa0\x01\x00\x00"

View File

@@ -1,7 +0,0 @@
// +build !ppc64le
package binfmt_misc
func ppc64leSupported() error {
return check(Binaryppc64le)
}

View File

@@ -1,7 +0,0 @@
// +build ppc64le
package binfmt_misc
func ppc64leSupported() error {
return nil
}

View File

@@ -1,8 +0,0 @@
// +build !riscv64
package binfmt_misc
// This file is generated by running make inside the binfmt_misc package.
// Do not edit manually.
const Binaryriscv64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x86\xcf\x0c\x20\x5e\x05\x03\x44\xcc\x01\x2a\x3e\x03\x4a\xb3\x80\xc5\x2c\x18\x18\x19\x1c\x18\x98\x19\x98\xc0\xaa\x58\x19\x90\x01\x23\x0a\xdd\x02\xe5\xc1\x68\x06\x01\x08\x25\xcc\xca\xc0\x30\x99\xe3\x02\x6b\x31\x88\xa3\x57\x9c\x51\x5c\x52\x54\x92\x98\xc4\xa0\x57\x92\x5a\x51\xc2\x40\x05\xc0\x0d\x75\x01\x1b\x94\x0f\xf3\x4f\x05\x94\xcf\x83\xa6\x9e\x05\x8d\x0f\x52\xcd\x8c\xc5\x5c\x98\x3f\x04\xb1\xa8\x47\x06\x80\x00\x00\x00\xff\xff\x39\x41\xdf\xa1\x58\x01\x00\x00"

View File

@@ -1,7 +0,0 @@
// +build !riscv64
package binfmt_misc
func riscv64Supported() error {
return check(Binaryriscv64)
}

View File

@@ -1,7 +0,0 @@
// +build riscv64
package binfmt_misc
func riscv64Supported() error {
return nil
}

View File

@@ -1,8 +0,0 @@
// +build !s390x
package binfmt_misc
// This file is generated by running make inside the binfmt_misc package.
// Do not edit manually.
const Binarys390x = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x62\x64\x80\x03\x26\x06\x31\x06\x06\x06\xb0\x00\x23\x03\x43\x05\x54\xd4\x01\x4a\xcf\x80\xf2\x2c\x18\x18\x19\x1c\x18\x98\x19\x98\xa0\x6a\x59\x19\x90\x00\x23\x1a\xcd\xc0\xc0\xd0\x80\x4a\x0b\x30\x2c\xd7\x64\x60\xe0\x62\x64\x67\x67\xd0\x2b\xce\x28\x2e\x29\x2a\x49\x4c\x62\xd0\x2b\x49\xad\x28\x61\xa0\x1e\xe0\x46\x72\x02\x1b\x9a\x7f\x60\x34\x07\x9a\x1e\x16\x34\x6f\x30\xe3\x30\x1b\xe6\x1f\x41\x34\x71\xb8\x97\x01\x01\x00\x00\xff\xff\x0c\x76\x9a\xe1\x58\x01\x00\x00"

View File

@@ -1,7 +0,0 @@
// +build !s390x
package binfmt_misc
func s390xSupported() error {
return check(Binarys390x)
}

View File

@@ -1,7 +0,0 @@
// +build s390x
package binfmt_misc
func s390xSupported() error {
return nil
}

View File

@@ -1,40 +0,0 @@
package cond
import (
"sync"
)
// NewStatefulCond returns a stateful version of sync.Cond . This cond will
// never block on `Wait()` if `Signal()` has been called after the `Wait()` last
// returned. This is useful for avoiding to take a lock on `cond.Locker` for
// signalling.
func NewStatefulCond(l sync.Locker) *StatefulCond {
sc := &StatefulCond{main: l}
sc.c = sync.NewCond(&sc.mu)
return sc
}
type StatefulCond struct {
main sync.Locker
mu sync.Mutex
c *sync.Cond
signalled bool
}
func (s *StatefulCond) Wait() {
s.main.Unlock()
s.mu.Lock()
if !s.signalled {
s.c.Wait()
}
s.signalled = false
s.mu.Unlock()
s.main.Lock()
}
func (s *StatefulCond) Signal() {
s.mu.Lock()
s.signalled = true
s.c.Signal()
s.mu.Unlock()
}

View File

@@ -1,156 +0,0 @@
package contentutil
import (
"bytes"
"context"
"io/ioutil"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Buffer is a content provider and ingester that keeps data in memory
type Buffer interface {
content.Provider
content.Ingester
}
// NewBuffer returns a new buffer
func NewBuffer() Buffer {
return &buffer{
buffers: map[digest.Digest][]byte{},
refs: map[string]struct{}{},
}
}
type buffer struct {
mu sync.Mutex
buffers map[digest.Digest][]byte
refs map[string]struct{}
}
func (b *buffer) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
var wOpts content.WriterOpts
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil, err
}
}
b.mu.Lock()
if _, ok := b.refs[wOpts.Ref]; ok {
return nil, errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", wOpts.Ref)
}
b.mu.Unlock()
return &bufferedWriter{
main: b,
digester: digest.Canonical.Digester(),
buffer: bytes.NewBuffer(nil),
expected: wOpts.Desc.Digest,
releaseRef: func() {
b.mu.Lock()
delete(b.refs, wOpts.Ref)
b.mu.Unlock()
},
}, nil
}
func (b *buffer) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
r, err := b.getBytesReader(ctx, desc.Digest)
if err != nil {
return nil, err
}
return &readerAt{Reader: r, Closer: ioutil.NopCloser(r), size: int64(r.Len())}, nil
}
func (b *buffer) getBytesReader(ctx context.Context, dgst digest.Digest) (*bytes.Reader, error) {
b.mu.Lock()
defer b.mu.Unlock()
if dt, ok := b.buffers[dgst]; ok {
return bytes.NewReader(dt), nil
}
return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
}
func (b *buffer) addValue(k digest.Digest, dt []byte) {
b.mu.Lock()
defer b.mu.Unlock()
b.buffers[k] = dt
}
type bufferedWriter struct {
main *buffer
ref string
offset int64
total int64
startedAt time.Time
updatedAt time.Time
buffer *bytes.Buffer
expected digest.Digest
digester digest.Digester
releaseRef func()
}
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
n, err = w.buffer.Write(p)
w.digester.Hash().Write(p[:n])
w.offset += int64(len(p))
w.updatedAt = time.Now()
return n, err
}
func (w *bufferedWriter) Close() error {
if w.buffer != nil {
w.releaseRef()
w.buffer = nil
}
return nil
}
func (w *bufferedWriter) Status() (content.Status, error) {
return content.Status{
Ref: w.ref,
Offset: w.offset,
Total: w.total,
StartedAt: w.startedAt,
UpdatedAt: w.updatedAt,
}, nil
}
func (w *bufferedWriter) Digest() digest.Digest {
return w.digester.Digest()
}
func (w *bufferedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opt ...content.Opt) error {
if w.buffer == nil {
return errors.Errorf("can't commit already committed or closed")
}
if s := int64(w.buffer.Len()); size > 0 && size != s {
return errors.Errorf("unexpected commit size %d, expected %d", s, size)
}
dgst := w.digester.Digest()
if expected != "" && expected != dgst {
return errors.Errorf("unexpected digest: %v != %v", dgst, expected)
}
if w.expected != "" && w.expected != dgst {
return errors.Errorf("unexpected digest: %v != %v", dgst, w.expected)
}
w.main.addValue(dgst, w.buffer.Bytes())
return w.Close()
}
func (w *bufferedWriter) Truncate(size int64) error {
if size != 0 {
return errors.New("Truncate: unsupported size")
}
w.offset = 0
w.digester.Hash().Reset()
w.buffer.Reset()
return nil
}

View File

@@ -1,81 +0,0 @@
package contentutil
import (
"context"
"io"
"sync"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func Copy(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispec.Descriptor) error {
if _, err := remotes.FetchHandler(ingester, &localFetcher{provider})(ctx, desc); err != nil {
return err
}
return nil
}
type localFetcher struct {
content.Provider
}
func (f *localFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
r, err := f.Provider.ReaderAt(ctx, desc)
if err != nil {
return nil, err
}
return &rc{ReaderAt: r}, nil
}
type rc struct {
content.ReaderAt
offset int
}
func (r *rc) Read(b []byte) (int, error) {
n, err := r.ReadAt(b, int64(r.offset))
r.offset += n
if n > 0 && err == io.EOF {
err = nil
}
return n, err
}
func CopyChain(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispec.Descriptor) error {
var m sync.Mutex
manifestStack := []ocispec.Descriptor{}
filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
m.Lock()
manifestStack = append(manifestStack, desc)
m.Unlock()
return nil, images.ErrStopHandler
default:
return nil, nil
}
})
handlers := []images.Handler{
images.ChildrenHandler(provider),
filterHandler,
remotes.FetchHandler(ingester, &localFetcher{provider}),
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil {
return errors.WithStack(err)
}
for i := len(manifestStack) - 1; i >= 0; i-- {
if err := Copy(ctx, ingester, provider, manifestStack[i]); err != nil {
return errors.WithStack(err)
}
}
return nil
}

View File

@@ -1,73 +0,0 @@
package contentutil
import (
"context"
"io"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/remotes"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func FromFetcher(f remotes.Fetcher) content.Provider {
return &fetchedProvider{
f: f,
}
}
type fetchedProvider struct {
f remotes.Fetcher
}
func (p *fetchedProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
rc, err := p.f.Fetch(ctx, desc)
if err != nil {
return nil, err
}
return &readerAt{Reader: rc, Closer: rc, size: desc.Size}, nil
}
type readerAt struct {
io.Reader
io.Closer
size int64
offset int64
}
func (r *readerAt) ReadAt(b []byte, off int64) (int, error) {
if ra, ok := r.Reader.(io.ReaderAt); ok {
return ra.ReadAt(b, off)
}
if r.offset != off {
if seeker, ok := r.Reader.(io.Seeker); ok {
if _, err := seeker.Seek(off, io.SeekStart); err != nil {
return 0, err
}
r.offset = off
} else {
return 0, errors.Errorf("unsupported offset")
}
}
var totalN int
for len(b) > 0 {
n, err := r.Reader.Read(b)
if err == io.EOF && n == len(b) {
err = nil
}
r.offset += int64(n)
totalN += n
b = b[n:]
if err != nil {
return totalN, err
}
}
return totalN, nil
}
func (r *readerAt) Size() int64 {
return r.size
}

View File

@@ -1,48 +0,0 @@
package contentutil
import (
"context"
"sync"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// NewMultiProvider creates a new mutable provider with a base provider
func NewMultiProvider(base content.Provider) *MultiProvider {
return &MultiProvider{
base: base,
sub: map[digest.Digest]content.Provider{},
}
}
// MultiProvider is a provider backed by a mutable map of providers
type MultiProvider struct {
mu sync.RWMutex
base content.Provider
sub map[digest.Digest]content.Provider
}
// ReaderAt returns a content.ReaderAt
func (mp *MultiProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
mp.mu.RLock()
if p, ok := mp.sub[desc.Digest]; ok {
mp.mu.RUnlock()
return p.ReaderAt(ctx, desc)
}
mp.mu.RUnlock()
if mp.base == nil {
return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", desc.Digest)
}
return mp.base.ReaderAt(ctx, desc)
}
// Add adds a new child provider for a specific digest
func (mp *MultiProvider) Add(dgst digest.Digest, p content.Provider) {
mp.mu.Lock()
defer mp.mu.Unlock()
mp.sub[dgst] = p
}

View File

@@ -1,122 +0,0 @@
package contentutil
import (
"context"
"runtime"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
func FromPusher(p remotes.Pusher) content.Ingester {
var mu sync.Mutex
c := sync.NewCond(&mu)
return &pushingIngester{
mu: &mu,
c: c,
p: p,
active: map[digest.Digest]struct{}{},
}
}
type pushingIngester struct {
p remotes.Pusher
mu *sync.Mutex
c *sync.Cond
active map[digest.Digest]struct{}
}
// Writer implements content.Ingester. desc.MediaType must be set for manifest blobs.
func (i *pushingIngester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
var wOpts content.WriterOpts
for _, opt := range opts {
if err := opt(&wOpts); err != nil {
return nil, err
}
}
if wOpts.Ref == "" {
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
}
st := time.Now()
i.mu.Lock()
for {
if time.Since(st) > time.Hour {
i.mu.Unlock()
return nil, errors.Wrapf(errdefs.ErrUnavailable, "ref %v locked", wOpts.Desc.Digest)
}
if _, ok := i.active[wOpts.Desc.Digest]; ok {
i.c.Wait()
} else {
break
}
}
i.active[wOpts.Desc.Digest] = struct{}{}
i.mu.Unlock()
var once sync.Once
release := func() {
once.Do(func() {
i.mu.Lock()
delete(i.active, wOpts.Desc.Digest)
i.c.Broadcast()
i.mu.Unlock()
})
}
// pusher requires desc.MediaType to determine the PUT URL, especially for manifest blobs.
contentWriter, err := i.p.Push(ctx, wOpts.Desc)
if err != nil {
release()
return nil, err
}
runtime.SetFinalizer(contentWriter, func(_ content.Writer) {
release()
})
return &writer{
Writer: contentWriter,
contentWriterRef: wOpts.Ref,
release: release,
}, nil
}
type writer struct {
content.Writer // returned from pusher.Push
contentWriterRef string // ref passed for Writer()
release func()
}
func (w *writer) Status() (content.Status, error) {
st, err := w.Writer.Status()
if err != nil {
return st, err
}
if w.contentWriterRef != "" {
st.Ref = w.contentWriterRef
}
return st, nil
}
func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
err := w.Writer.Commit(ctx, size, expected, opts...)
if w.release != nil {
w.release()
}
return err
}
func (w *writer) Close() error {
err := w.Writer.Close()
if w.release != nil {
w.release()
}
return err
}

View File

@@ -1,98 +0,0 @@
package contentutil
import (
"context"
"net/http"
"sync"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/docker/docker/pkg/locker"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func ProviderFromRef(ref string) (ocispec.Descriptor, content.Provider, error) {
remote := docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
})
name, desc, err := remote.Resolve(context.TODO(), ref)
if err != nil {
return ocispec.Descriptor{}, nil, err
}
fetcher, err := remote.Fetcher(context.TODO(), name)
if err != nil {
return ocispec.Descriptor{}, nil, err
}
return desc, FromFetcher(fetcher), nil
}
func IngesterFromRef(ref string) (content.Ingester, error) {
remote := docker.NewResolver(docker.ResolverOptions{
Client: http.DefaultClient,
})
pusher, err := remote.Pusher(context.TODO(), ref)
if err != nil {
return nil, err
}
return &ingester{
locker: locker.New(),
pusher: pusher,
}, nil
}
type ingester struct {
locker *locker.Locker
pusher remotes.Pusher
}
func (w *ingester) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
var wo content.WriterOpts
for _, o := range opts {
if err := o(&wo); err != nil {
return nil, err
}
}
if wo.Ref == "" {
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
}
w.locker.Lock(wo.Ref)
var once sync.Once
unlock := func() {
once.Do(func() {
w.locker.Unlock(wo.Ref)
})
}
writer, err := w.pusher.Push(ctx, wo.Desc)
if err != nil {
unlock()
return nil, err
}
return &lockedWriter{unlock: unlock, Writer: writer}, nil
}
type lockedWriter struct {
unlock func()
content.Writer
}
func (w *lockedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
err := w.Writer.Commit(ctx, size, expected, opts...)
if err == nil {
w.unlock()
}
return err
}
func (w *lockedWriter) Close() error {
err := w.Writer.Close()
w.unlock()
return err
}

View File

@@ -1,60 +0,0 @@
package entitlements
import (
"github.com/pkg/errors"
)
type Entitlement string
const (
EntitlementSecurityInsecure Entitlement = "security.insecure"
EntitlementNetworkHost Entitlement = "network.host"
)
var all = map[Entitlement]struct{}{
EntitlementSecurityInsecure: {},
EntitlementNetworkHost: {},
}
func Parse(s string) (Entitlement, error) {
_, ok := all[Entitlement(s)]
if !ok {
return "", errors.Errorf("unknown entitlement %s", s)
}
return Entitlement(s), nil
}
func WhiteList(allowed, supported []Entitlement) (Set, error) {
m := map[Entitlement]struct{}{}
var supm Set
if supported != nil {
var err error
supm, err = WhiteList(supported, nil)
if err != nil { // should not happen
return nil, err
}
}
for _, e := range allowed {
e, err := Parse(string(e))
if err != nil {
return nil, err
}
if supported != nil {
if !supm.Allowed(e) {
return nil, errors.Errorf("granting entitlement %s is not allowed by build daemon configuration", e)
}
}
m[e] = struct{}{}
}
return Set(m), nil
}
type Set map[Entitlement]struct{}
func (s Set) Allowed(e Entitlement) bool {
_, ok := s[e]
return ok
}

View File

@@ -1,341 +0,0 @@
package flightcontrol
import (
"context"
"io"
"runtime"
"sort"
"sync"
"time"
"github.com/moby/buildkit/util/progress"
"github.com/pkg/errors"
)
// flightcontrol is like singleflight but with support for cancellation and
// nested progress reporting
var (
errRetry = errors.Errorf("retry")
errRetryTimeout = errors.Errorf("exceeded retry timeout")
)
type contextKeyT string
var contextKey = contextKeyT("buildkit/util/flightcontrol.progress")
// Group is a flightcontrol synchronization group
type Group struct {
mu sync.Mutex // protects m
m map[string]*call // lazily initialized
}
// Do executes a context function syncronized by the key
func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (v interface{}, err error) {
var backoff time.Duration
for {
v, err = g.do(ctx, key, fn)
if err == nil || errors.Cause(err) != errRetry {
return v, err
}
// backoff logic
if backoff >= 3*time.Second {
err = errors.Wrapf(errRetryTimeout, "flightcontrol")
return v, err
}
runtime.Gosched()
if backoff > 0 {
time.Sleep(backoff)
backoff *= 2
} else {
backoff = time.Millisecond
}
}
}
func (g *Group) do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (interface{}, error) {
g.mu.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok { // register 2nd waiter
g.mu.Unlock()
return c.wait(ctx)
}
c := newCall(fn)
g.m[key] = c
go func() {
// cleanup after a caller has returned
<-c.ready
g.mu.Lock()
delete(g.m, key)
g.mu.Unlock()
close(c.cleaned)
}()
g.mu.Unlock()
return c.wait(ctx)
}
type call struct {
mu sync.Mutex
result interface{}
err error
ready chan struct{}
cleaned chan struct{}
ctx *sharedContext
ctxs []context.Context
fn func(ctx context.Context) (interface{}, error)
once sync.Once
closeProgressWriter func()
progressState *progressState
progressCtx context.Context
}
func newCall(fn func(ctx context.Context) (interface{}, error)) *call {
c := &call{
fn: fn,
ready: make(chan struct{}),
cleaned: make(chan struct{}),
progressState: newProgressState(),
}
ctx := newContext(c) // newSharedContext
pr, pctx, closeProgressWriter := progress.NewContext(context.Background())
c.progressCtx = pctx
c.ctx = ctx
c.closeProgressWriter = closeProgressWriter
go c.progressState.run(pr) // TODO: remove this, wrap writer instead
return c
}
func (c *call) run() {
defer c.closeProgressWriter()
ctx, cancel := context.WithCancel(c.ctx)
defer cancel()
v, err := c.fn(ctx)
c.mu.Lock()
c.result = v
c.err = err
c.mu.Unlock()
close(c.ready)
}
func (c *call) wait(ctx context.Context) (v interface{}, err error) {
c.mu.Lock()
// detect case where caller has just returned, let it clean up before
select {
case <-c.ready: // could return if no error
c.mu.Unlock()
<-c.cleaned
return nil, errRetry
default:
}
pw, ok, ctx := progress.FromContext(ctx)
if ok {
c.progressState.add(pw)
}
c.ctxs = append(c.ctxs, ctx)
c.mu.Unlock()
go c.once.Do(c.run)
select {
case <-ctx.Done():
select {
case <-c.ctx.Done():
// if this cancelled the last context, then wait for function to shut down
// and don't accept any more callers
<-c.ready
return c.result, c.err
default:
if ok {
c.progressState.close(pw)
}
return nil, ctx.Err()
}
case <-c.ready:
return c.result, c.err // shared not implemented yet
}
}
func (c *call) Deadline() (deadline time.Time, ok bool) {
c.mu.Lock()
defer c.mu.Unlock()
for _, ctx := range c.ctxs {
select {
case <-ctx.Done():
default:
dl, ok := ctx.Deadline()
if ok {
return dl, ok
}
}
}
return time.Time{}, false
}
func (c *call) Done() <-chan struct{} {
c.mu.Lock()
c.ctx.signal()
c.mu.Unlock()
return c.ctx.done
}
func (c *call) Err() error {
select {
case <-c.ctx.Done():
return c.ctx.err
default:
return nil
}
}
func (c *call) Value(key interface{}) interface{} {
if key == contextKey {
return c.progressState
}
c.mu.Lock()
defer c.mu.Unlock()
ctx := c.progressCtx
select {
case <-ctx.Done():
default:
if v := ctx.Value(key); v != nil {
return v
}
}
if len(c.ctxs) > 0 {
ctx = c.ctxs[0]
select {
case <-ctx.Done():
default:
if v := ctx.Value(key); v != nil {
return v
}
}
}
return nil
}
type sharedContext struct {
*call
done chan struct{}
err error
}
func newContext(c *call) *sharedContext {
return &sharedContext{call: c, done: make(chan struct{})}
}
// call with lock
func (c *sharedContext) signal() {
select {
case <-c.done:
default:
var err error
for _, ctx := range c.ctxs {
select {
case <-ctx.Done():
err = ctx.Err()
default:
return
}
}
c.err = err
close(c.done)
}
}
type rawProgressWriter interface {
WriteRawProgress(*progress.Progress) error
Close() error
}
type progressState struct {
mu sync.Mutex
items map[string]*progress.Progress
writers []rawProgressWriter
done bool
}
func newProgressState() *progressState {
return &progressState{
items: make(map[string]*progress.Progress),
}
}
func (ps *progressState) run(pr progress.Reader) {
for {
p, err := pr.Read(context.TODO())
if err != nil {
if err == io.EOF {
ps.mu.Lock()
ps.done = true
ps.mu.Unlock()
for _, w := range ps.writers {
w.Close()
}
}
return
}
ps.mu.Lock()
for _, p := range p {
for _, w := range ps.writers {
w.WriteRawProgress(p)
}
ps.items[p.ID] = p
}
ps.mu.Unlock()
}
}
func (ps *progressState) add(pw progress.Writer) {
rw, ok := pw.(rawProgressWriter)
if !ok {
return
}
ps.mu.Lock()
plist := make([]*progress.Progress, 0, len(ps.items))
for _, p := range ps.items {
plist = append(plist, p)
}
sort.Slice(plist, func(i, j int) bool {
return plist[i].Timestamp.Before(plist[j].Timestamp)
})
for _, p := range plist {
rw.WriteRawProgress(p)
}
if ps.done {
rw.Close()
} else {
ps.writers = append(ps.writers, rw)
}
ps.mu.Unlock()
}
func (ps *progressState) close(pw progress.Writer) {
rw, ok := pw.(rawProgressWriter)
if !ok {
return
}
ps.mu.Lock()
for i, w := range ps.writers {
if w == rw {
w.Close()
ps.writers = append(ps.writers[:i], ps.writers[i+1:]...)
break
}
}
ps.mu.Unlock()
}

View File

@@ -1,196 +0,0 @@
package imageutil
import (
"context"
"encoding/json"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/moby/buildkit/util/leaseutil"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type ContentCache interface {
content.Ingester
content.Provider
}
var leasesMu sync.Mutex
var leasesF []func(context.Context) error
func CancelCacheLeases() {
leasesMu.Lock()
for _, f := range leasesF {
f(context.TODO())
}
leasesF = nil
leasesMu.Unlock()
}
func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *specs.Platform) (digest.Digest, []byte, error) {
// TODO: fix buildkit to take interface instead of struct
var platform platforms.MatchComparer
if p != nil {
platform = platforms.Only(*p)
} else {
platform = platforms.Default()
}
ref, err := reference.Parse(str)
if err != nil {
return "", nil, errors.WithStack(err)
}
if leaseManager != nil {
ctx2, done, err := leaseutil.WithLease(ctx, leaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary)
if err != nil {
return "", nil, errors.WithStack(err)
}
ctx = ctx2
defer func() {
// this lease is not deleted to allow other components to access manifest/config from cache. It will be deleted after 5 min deadline or on pruning inactive builder
leasesMu.Lock()
leasesF = append(leasesF, done)
leasesMu.Unlock()
}()
}
desc := specs.Descriptor{
Digest: ref.Digest(),
}
if desc.Digest != "" {
ra, err := cache.ReaderAt(ctx, desc)
if err == nil {
desc.Size = ra.Size()
mt, err := DetectManifestMediaType(ra)
if err == nil {
desc.MediaType = mt
}
}
}
// use resolver if desc is incomplete
if desc.MediaType == "" {
_, desc, err = resolver.Resolve(ctx, ref.String())
if err != nil {
return "", nil, err
}
}
fetcher, err := resolver.Fetcher(ctx, ref.String())
if err != nil {
return "", nil, err
}
if desc.MediaType == images.MediaTypeDockerSchema1Manifest {
return readSchema1Config(ctx, ref.String(), desc, fetcher, cache)
}
children := childrenConfigHandler(cache, platform)
handlers := []images.Handler{
remotes.FetchHandler(cache, fetcher),
children,
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil {
return "", nil, err
}
config, err := images.Config(ctx, cache, desc, platform)
if err != nil {
return "", nil, err
}
dt, err := content.ReadBlob(ctx, cache, config)
if err != nil {
return "", nil, err
}
return desc.Digest, dt, nil
}
func childrenConfigHandler(provider content.Provider, platform platforms.MatchComparer) images.HandlerFunc {
return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) {
var descs []specs.Descriptor
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest:
p, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
return nil, err
}
// TODO(stevvooe): We just assume oci manifest, for now. There may be
// subtle differences from the docker version.
var manifest specs.Manifest
if err := json.Unmarshal(p, &manifest); err != nil {
return nil, err
}
descs = append(descs, manifest.Config)
case images.MediaTypeDockerSchema2ManifestList, specs.MediaTypeImageIndex:
p, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
return nil, err
}
var index specs.Index
if err := json.Unmarshal(p, &index); err != nil {
return nil, err
}
if platform != nil {
for _, d := range index.Manifests {
if d.Platform == nil || platform.Match(*d.Platform) {
descs = append(descs, d)
}
}
} else {
descs = append(descs, index.Manifests...)
}
case images.MediaTypeDockerSchema2Config, specs.MediaTypeImageConfig, docker.LegacyConfigMediaType:
// childless data types.
return nil, nil
default:
return nil, errors.Errorf("encountered unknown type %v; children may not be fetched", desc.MediaType)
}
return descs, nil
}
}
// specs.MediaTypeImageManifest, // TODO: detect schema1/manifest-list
func DetectManifestMediaType(ra content.ReaderAt) (string, error) {
// TODO: schema1
dt := make([]byte, ra.Size())
if _, err := ra.ReadAt(dt, 0); err != nil {
return "", err
}
return DetectManifestBlobMediaType(dt)
}
func DetectManifestBlobMediaType(dt []byte) (string, error) {
var mfst struct {
MediaType string `json:"mediaType"`
Config json.RawMessage `json:"config"`
}
if err := json.Unmarshal(dt, &mfst); err != nil {
return "", err
}
if mfst.MediaType != "" {
return mfst.MediaType, nil
}
if mfst.Config != nil {
return images.MediaTypeDockerSchema2Manifest, nil
}
return images.MediaTypeDockerSchema2ManifestList, nil
}

View File

@@ -1,87 +0,0 @@
package imageutil
import (
"context"
"encoding/json"
"io/ioutil"
"strings"
"time"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func readSchema1Config(ctx context.Context, ref string, desc specs.Descriptor, fetcher remotes.Fetcher, cache ContentCache) (digest.Digest, []byte, error) {
rc, err := fetcher.Fetch(ctx, desc)
if err != nil {
return "", nil, err
}
defer rc.Close()
dt, err := ioutil.ReadAll(rc)
if err != nil {
return "", nil, errors.Wrap(err, "failed to fetch schema1 manifest")
}
dt, err = convertSchema1ConfigMeta(dt)
if err != nil {
return "", nil, err
}
return desc.Digest, dt, nil
}
func convertSchema1ConfigMeta(in []byte) ([]byte, error) {
type history struct {
V1Compatibility string `json:"v1Compatibility"`
}
var m struct {
History []history `json:"history"`
}
if err := json.Unmarshal(in, &m); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal schema1 manifest")
}
if len(m.History) == 0 {
return nil, errors.Errorf("invalid schema1 manifest")
}
var img specs.Image
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history")
}
img.RootFS = specs.RootFS{
Type: "layers", // filled in by exporter
}
img.History = make([]specs.History, len(m.History))
for i := range m.History {
var h v1History
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal history")
}
img.History[len(m.History)-i-1] = specs.History{
Author: h.Author,
Comment: h.Comment,
Created: &h.Created,
CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "),
EmptyLayer: (h.ThrowAway != nil && *h.ThrowAway) || (h.Size != nil && *h.Size == 0),
}
}
dt, err := json.MarshalIndent(img, "", " ")
if err != nil {
return nil, errors.Wrap(err, "failed to marshal schema1 config")
}
return dt, nil
}
type v1History struct {
Author string `json:"author,omitempty"`
Created time.Time `json:"created"`
Comment string `json:"comment,omitempty"`
ThrowAway *bool `json:"throwaway,omitempty"`
Size *int `json:"Size,omitempty"` // used before ThrowAway field
ContainerConfig struct {
Cmd []string `json:"Cmd,omitempty"`
} `json:"container_config,omitempty"`
}

View File

@@ -1,75 +0,0 @@
package leaseutil
import (
"context"
"time"
"github.com/containerd/containerd/leases"
"github.com/containerd/containerd/namespaces"
)
func WithLease(ctx context.Context, ls leases.Manager, opts ...leases.Opt) (context.Context, func(context.Context) error, error) {
_, ok := leases.FromContext(ctx)
if ok {
return ctx, func(context.Context) error {
return nil
}, nil
}
l, err := ls.Create(ctx, append([]leases.Opt{leases.WithRandomID(), leases.WithExpiration(time.Hour)}, opts...)...)
if err != nil {
return nil, nil, err
}
ctx = leases.WithLease(ctx, l.ID)
return ctx, func(ctx context.Context) error {
return ls.Delete(ctx, l)
}, nil
}
func MakeTemporary(l *leases.Lease) error {
if l.Labels == nil {
l.Labels = map[string]string{}
}
l.Labels["buildkit/lease.temporary"] = time.Now().UTC().Format(time.RFC3339Nano)
return nil
}
func WithNamespace(lm leases.Manager, ns string) leases.Manager {
return &nsLM{manager: lm, ns: ns}
}
type nsLM struct {
manager leases.Manager
ns string
}
func (l *nsLM) Create(ctx context.Context, opts ...leases.Opt) (leases.Lease, error) {
ctx = namespaces.WithNamespace(ctx, l.ns)
return l.manager.Create(ctx, opts...)
}
func (l *nsLM) Delete(ctx context.Context, lease leases.Lease, opts ...leases.DeleteOpt) error {
ctx = namespaces.WithNamespace(ctx, l.ns)
return l.manager.Delete(ctx, lease, opts...)
}
func (l *nsLM) List(ctx context.Context, filters ...string) ([]leases.Lease, error) {
ctx = namespaces.WithNamespace(ctx, l.ns)
return l.manager.List(ctx, filters...)
}
func (l *nsLM) AddResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error {
ctx = namespaces.WithNamespace(ctx, l.ns)
return l.manager.AddResource(ctx, lease, resource)
}
func (l *nsLM) DeleteResource(ctx context.Context, lease leases.Lease, resource leases.Resource) error {
ctx = namespaces.WithNamespace(ctx, l.ns)
return l.manager.DeleteResource(ctx, lease, resource)
}
func (l *nsLM) ListResources(ctx context.Context, lease leases.Lease) ([]leases.Resource, error) {
ctx = namespaces.WithNamespace(ctx, l.ns)
return l.manager.ListResources(ctx, lease)
}

View File

@@ -1,53 +0,0 @@
package logs
import (
"context"
"io"
"os"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/progress"
"github.com/pkg/errors"
)
func NewLogStreams(ctx context.Context, printOutput bool) (io.WriteCloser, io.WriteCloser) {
return newStreamWriter(ctx, 1, printOutput), newStreamWriter(ctx, 2, printOutput)
}
func newStreamWriter(ctx context.Context, stream int, printOutput bool) io.WriteCloser {
pw, _, _ := progress.FromContext(ctx)
return &streamWriter{
pw: pw,
stream: stream,
printOutput: printOutput,
}
}
type streamWriter struct {
pw progress.Writer
stream int
printOutput bool
}
func (sw *streamWriter) Write(dt []byte) (int, error) {
sw.pw.Write(identity.NewID(), client.VertexLog{
Stream: sw.stream,
Data: append([]byte{}, dt...),
})
if sw.printOutput {
switch sw.stream {
case 1:
return os.Stdout.Write(dt)
case 2:
return os.Stderr.Write(dt)
default:
return 0, errors.Errorf("invalid stream %d", sw.stream)
}
}
return len(dt), nil
}
func (sw *streamWriter) Close() error {
return sw.pw.Close()
}

View File

@@ -1,77 +0,0 @@
package progress
import (
"context"
"io"
"sync"
)
type MultiReader struct {
mu sync.Mutex
main Reader
initialized bool
done chan struct{}
writers map[*progressWriter]func()
}
func NewMultiReader(pr Reader) *MultiReader {
mr := &MultiReader{
main: pr,
writers: make(map[*progressWriter]func()),
done: make(chan struct{}),
}
return mr
}
func (mr *MultiReader) Reader(ctx context.Context) Reader {
mr.mu.Lock()
defer mr.mu.Unlock()
pr, ctx, closeWriter := NewContext(ctx)
pw, _, ctx := FromContext(ctx)
w := pw.(*progressWriter)
mr.writers[w] = closeWriter
go func() {
select {
case <-ctx.Done():
case <-mr.done:
}
mr.mu.Lock()
defer mr.mu.Unlock()
delete(mr.writers, w)
}()
if !mr.initialized {
go mr.handle()
mr.initialized = true
}
return pr
}
func (mr *MultiReader) handle() error {
for {
p, err := mr.main.Read(context.TODO())
if err != nil {
if err == io.EOF {
mr.mu.Lock()
for w, c := range mr.writers {
w.Close()
c()
}
mr.mu.Unlock()
return nil
}
return err
}
mr.mu.Lock()
for _, p := range p {
for w := range mr.writers {
w.writeRawProgress(p)
}
}
mr.mu.Unlock()
}
}

View File

@@ -1,105 +0,0 @@
package progress
import (
"sort"
"sync"
"time"
)
type rawProgressWriter interface {
WriteRawProgress(*Progress) error
Close() error
}
type MultiWriter struct {
mu sync.Mutex
items []*Progress
writers map[rawProgressWriter]struct{}
done bool
meta map[string]interface{}
}
func NewMultiWriter(opts ...WriterOption) *MultiWriter {
mw := &MultiWriter{
writers: map[rawProgressWriter]struct{}{},
meta: map[string]interface{}{},
}
for _, o := range opts {
o(mw)
}
return mw
}
func (ps *MultiWriter) Add(pw Writer) {
rw, ok := pw.(rawProgressWriter)
if !ok {
return
}
ps.mu.Lock()
plist := make([]*Progress, 0, len(ps.items))
for _, p := range ps.items {
plist = append(plist, p)
}
sort.Slice(plist, func(i, j int) bool {
return plist[i].Timestamp.Before(plist[j].Timestamp)
})
for _, p := range plist {
rw.WriteRawProgress(p)
}
ps.writers[rw] = struct{}{}
ps.mu.Unlock()
}
func (ps *MultiWriter) Delete(pw Writer) {
rw, ok := pw.(rawProgressWriter)
if !ok {
return
}
ps.mu.Lock()
delete(ps.writers, rw)
ps.mu.Unlock()
}
func (ps *MultiWriter) Write(id string, v interface{}) error {
p := &Progress{
ID: id,
Timestamp: time.Now(),
Sys: v,
meta: ps.meta,
}
return ps.WriteRawProgress(p)
}
func (ps *MultiWriter) WriteRawProgress(p *Progress) error {
meta := p.meta
if len(ps.meta) > 0 {
meta = map[string]interface{}{}
for k, v := range p.meta {
meta[k] = v
}
for k, v := range ps.meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
}
p.meta = meta
return ps.writeRawProgress(p)
}
func (ps *MultiWriter) writeRawProgress(p *Progress) error {
ps.mu.Lock()
defer ps.mu.Unlock()
ps.items = append(ps.items, p)
for w := range ps.writers {
if err := w.WriteRawProgress(p); err != nil {
return err
}
}
return nil
}
func (ps *MultiWriter) Close() error {
return nil
}

View File

@@ -1,256 +0,0 @@
package progress
import (
"context"
"io"
"sort"
"sync"
"time"
"github.com/pkg/errors"
)
// Progress package provides utility functions for using the context to capture
// progress of a running function. All progress items written contain an ID
// that is used to collapse unread messages.
type contextKeyT string
var contextKey = contextKeyT("buildkit/util/progress")
// FromContext returns a progress writer from a context.
func FromContext(ctx context.Context, opts ...WriterOption) (Writer, bool, context.Context) {
v := ctx.Value(contextKey)
pw, ok := v.(*progressWriter)
if !ok {
if pw, ok := v.(*MultiWriter); ok {
return pw, true, ctx
}
return &noOpWriter{}, false, ctx
}
pw = newWriter(pw)
for _, o := range opts {
o(pw)
}
ctx = context.WithValue(ctx, contextKey, pw)
return pw, true, ctx
}
type WriterOption func(Writer)
// NewContext returns a new context and a progress reader that captures all
// progress items writtern to this context. Last returned parameter is a closer
// function to signal that no new writes will happen to this context.
func NewContext(ctx context.Context) (Reader, context.Context, func()) {
pr, pw, cancel := pipe()
ctx = WithProgress(ctx, pw)
return pr, ctx, cancel
}
func WithProgress(ctx context.Context, pw Writer) context.Context {
return context.WithValue(ctx, contextKey, pw)
}
func WithMetadata(key string, val interface{}) WriterOption {
return func(w Writer) {
if pw, ok := w.(*progressWriter); ok {
pw.meta[key] = val
}
if pw, ok := w.(*MultiWriter); ok {
pw.meta[key] = val
}
}
}
type Writer interface {
Write(id string, value interface{}) error
Close() error
}
type Reader interface {
Read(context.Context) ([]*Progress, error)
}
type Progress struct {
ID string
Timestamp time.Time
Sys interface{}
meta map[string]interface{}
}
type Status struct {
Action string
Current int
Total int
Started *time.Time
Completed *time.Time
}
type progressReader struct {
ctx context.Context
cond *sync.Cond
mu sync.Mutex
writers map[*progressWriter]struct{}
dirty map[string]*Progress
}
func (pr *progressReader) Read(ctx context.Context) ([]*Progress, error) {
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-done:
case <-ctx.Done():
pr.mu.Lock()
pr.cond.Broadcast()
pr.mu.Unlock()
}
}()
pr.mu.Lock()
for {
select {
case <-ctx.Done():
pr.mu.Unlock()
return nil, ctx.Err()
default:
}
dmap := pr.dirty
if len(dmap) == 0 {
select {
case <-pr.ctx.Done():
if len(pr.writers) == 0 {
pr.mu.Unlock()
return nil, io.EOF
}
default:
}
pr.cond.Wait()
continue
}
pr.dirty = make(map[string]*Progress)
pr.mu.Unlock()
out := make([]*Progress, 0, len(dmap))
for _, p := range dmap {
out = append(out, p)
}
sort.Slice(out, func(i, j int) bool {
return out[i].Timestamp.Before(out[j].Timestamp)
})
return out, nil
}
}
func (pr *progressReader) append(pw *progressWriter) {
pr.mu.Lock()
defer pr.mu.Unlock()
select {
case <-pr.ctx.Done():
return
default:
pr.writers[pw] = struct{}{}
}
}
func pipe() (*progressReader, *progressWriter, func()) {
ctx, cancel := context.WithCancel(context.Background())
pr := &progressReader{
ctx: ctx,
writers: make(map[*progressWriter]struct{}),
dirty: make(map[string]*Progress),
}
pr.cond = sync.NewCond(&pr.mu)
go func() {
<-ctx.Done()
pr.mu.Lock()
pr.cond.Broadcast()
pr.mu.Unlock()
}()
pw := &progressWriter{
reader: pr,
}
return pr, pw, cancel
}
func newWriter(pw *progressWriter) *progressWriter {
meta := make(map[string]interface{})
for k, v := range pw.meta {
meta[k] = v
}
pw = &progressWriter{
reader: pw.reader,
meta: meta,
}
pw.reader.append(pw)
return pw
}
type progressWriter struct {
done bool
reader *progressReader
meta map[string]interface{}
}
func (pw *progressWriter) Write(id string, v interface{}) error {
if pw.done {
return errors.Errorf("writing %s to closed progress writer", id)
}
return pw.writeRawProgress(&Progress{
ID: id,
Timestamp: time.Now(),
Sys: v,
meta: pw.meta,
})
}
func (pw *progressWriter) WriteRawProgress(p *Progress) error {
meta := p.meta
if len(pw.meta) > 0 {
meta = map[string]interface{}{}
for k, v := range p.meta {
meta[k] = v
}
for k, v := range pw.meta {
if _, ok := meta[k]; !ok {
meta[k] = v
}
}
}
p.meta = meta
return pw.writeRawProgress(p)
}
func (pw *progressWriter) writeRawProgress(p *Progress) error {
pw.reader.mu.Lock()
pw.reader.dirty[p.ID] = p
pw.reader.cond.Broadcast()
pw.reader.mu.Unlock()
return nil
}
func (pw *progressWriter) Close() error {
pw.reader.mu.Lock()
delete(pw.reader.writers, pw)
pw.reader.mu.Unlock()
pw.reader.cond.Broadcast()
pw.done = true
return nil
}
func (p *Progress) Meta(key string) (interface{}, bool) {
v, ok := p.meta[key]
return v, ok
}
type noOpWriter struct{}
func (pw *noOpWriter) Write(_ string, _ interface{}) error {
return nil
}
func (pw *noOpWriter) Close() error {
return nil
}

View File

@@ -1,417 +0,0 @@
package pull
import (
"context"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/containerd/containerd/remotes/docker/schema1"
"github.com/moby/buildkit/snapshot"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/util/progress"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type Puller struct {
Snapshotter snapshot.Snapshotter
ContentStore content.Store
Applier diff.Applier
Src reference.Spec
Platform *ocispec.Platform
// See NewResolver()
Resolver remotes.Resolver
resolveOnce sync.Once
desc ocispec.Descriptor
ref string
resolveErr error
}
type Pulled struct {
Ref string
Descriptor ocispec.Descriptor
Layers []ocispec.Descriptor
MetadataBlobs []ocispec.Descriptor
}
func (p *Puller) Resolve(ctx context.Context) (string, ocispec.Descriptor, error) {
p.resolveOnce.Do(func() {
resolveProgressDone := oneOffProgress(ctx, "resolve "+p.Src.String())
desc := ocispec.Descriptor{
Digest: p.Src.Digest(),
}
if desc.Digest != "" {
info, err := p.ContentStore.Info(ctx, desc.Digest)
if err == nil {
desc.Size = info.Size
p.ref = p.Src.String()
ra, err := p.ContentStore.ReaderAt(ctx, desc)
if err == nil {
mt, err := imageutil.DetectManifestMediaType(ra)
if err == nil {
desc.MediaType = mt
p.desc = desc
resolveProgressDone(nil)
return
}
}
}
}
ref, desc, err := p.Resolver.Resolve(ctx, p.Src.String())
if err != nil {
p.resolveErr = err
resolveProgressDone(err)
return
}
p.desc = desc
p.ref = ref
resolveProgressDone(nil)
})
return p.ref, p.desc, p.resolveErr
}
func (p *Puller) Pull(ctx context.Context) (*Pulled, error) {
if _, _, err := p.Resolve(ctx); err != nil {
return nil, err
}
var platform platforms.MatchComparer
if p.Platform != nil {
platform = platforms.Only(*p.Platform)
} else {
platform = platforms.Default()
}
ongoing := newJobs(p.ref)
pctx, stopProgress := context.WithCancel(ctx)
go showProgress(pctx, ongoing, p.ContentStore)
fetcher, err := p.Resolver.Fetcher(ctx, p.ref)
if err != nil {
stopProgress()
return nil, err
}
// TODO: need a wrapper snapshot interface that combines content
// and snapshots as 1) buildkit shouldn't have a dependency on contentstore
// or 2) cachemanager should manage the contentstore
handlers := []images.Handler{
images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
ongoing.add(desc)
return nil, nil
}),
}
var schema1Converter *schema1.Converter
if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest {
schema1Converter = schema1.NewConverter(p.ContentStore, fetcher)
handlers = append(handlers, schema1Converter)
} else {
// Get all the children for a descriptor
childrenHandler := images.ChildrenHandler(p.ContentStore)
// Filter the children by the platform
childrenHandler = images.FilterPlatforms(childrenHandler, platform)
// Limit manifests pulled to the best match in an index
childrenHandler = images.LimitManifests(childrenHandler, platform, 1)
dslHandler, err := docker.AppendDistributionSourceLabel(p.ContentStore, p.ref)
if err != nil {
stopProgress()
return nil, err
}
handlers = append(handlers,
remotes.FetchHandler(p.ContentStore, fetcher),
childrenHandler,
dslHandler,
)
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
stopProgress()
return nil, err
}
stopProgress()
var usedBlobs, unusedBlobs []ocispec.Descriptor
if schema1Converter != nil {
ongoing.remove(p.desc) // Not left in the content store so this is sufficient.
p.desc, err = schema1Converter.Convert(ctx)
if err != nil {
return nil, err
}
ongoing.add(p.desc)
var mu sync.Mutex // images.Dispatch calls handlers in parallel
allBlobs := make(map[digest.Digest]ocispec.Descriptor)
for _, j := range ongoing.added {
allBlobs[j.Digest] = j.Descriptor
}
handlers := []images.Handler{
images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
mu.Lock()
defer mu.Unlock()
usedBlobs = append(usedBlobs, desc)
delete(allBlobs, desc.Digest)
return nil, nil
}),
images.FilterPlatforms(images.ChildrenHandler(p.ContentStore), platform),
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil {
return nil, err
}
for _, j := range allBlobs {
unusedBlobs = append(unusedBlobs, j)
}
} else {
for _, j := range ongoing.added {
usedBlobs = append(usedBlobs, j.Descriptor)
}
}
// split all pulled data to layers and rest. layers remain roots and are deleted with snapshots. rest will be linked to layers.
var notLayerBlobs []ocispec.Descriptor
var layerBlobs []ocispec.Descriptor
for _, j := range usedBlobs {
switch j.MediaType {
case ocispec.MediaTypeImageLayer, images.MediaTypeDockerSchema2Layer, ocispec.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip, images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip:
layerBlobs = append(layerBlobs, j)
default:
notLayerBlobs = append(notLayerBlobs, j)
}
}
layers, err := getLayers(ctx, p.ContentStore, p.desc, platform)
if err != nil {
return nil, err
}
return &Pulled{
Ref: p.ref,
Descriptor: p.desc,
Layers: layers,
MetadataBlobs: notLayerBlobs,
}, nil
}
func getLayers(ctx context.Context, provider content.Provider, desc ocispec.Descriptor, platform platforms.MatchComparer) ([]ocispec.Descriptor, error) {
manifest, err := images.Manifest(ctx, provider, desc, platform)
if err != nil {
return nil, errors.WithStack(err)
}
image := images.Image{Target: desc}
diffIDs, err := image.RootFS(ctx, provider, platform)
if err != nil {
return nil, errors.Wrap(err, "failed to resolve rootfs")
}
if len(diffIDs) != len(manifest.Layers) {
return nil, errors.Errorf("mismatched image rootfs and manifest layers %+v %+v", diffIDs, manifest.Layers)
}
layers := make([]ocispec.Descriptor, len(diffIDs))
for i := range diffIDs {
desc := manifest.Layers[i]
if desc.Annotations == nil {
desc.Annotations = map[string]string{}
}
desc.Annotations["containerd.io/uncompressed"] = diffIDs[i].String()
layers[i] = desc
}
return layers, nil
}
func showProgress(ctx context.Context, ongoing *jobs, cs content.Store) {
var (
ticker = time.NewTicker(150 * time.Millisecond)
statuses = map[string]statusInfo{}
done bool
)
defer ticker.Stop()
pw, _, ctx := progress.FromContext(ctx)
defer pw.Close()
for {
select {
case <-ticker.C:
case <-ctx.Done():
done = true
}
resolved := "resolved"
if !ongoing.isResolved() {
resolved = "resolving"
}
statuses[ongoing.name] = statusInfo{
Ref: ongoing.name,
Status: resolved,
}
actives := make(map[string]statusInfo)
if !done {
active, err := cs.ListStatuses(ctx, "")
if err != nil {
// log.G(ctx).WithError(err).Error("active check failed")
continue
}
// update status of active entries!
for _, active := range active {
actives[active.Ref] = statusInfo{
Ref: active.Ref,
Status: "downloading",
Offset: active.Offset,
Total: active.Total,
StartedAt: active.StartedAt,
UpdatedAt: active.UpdatedAt,
}
}
}
// now, update the items in jobs that are not in active
for _, j := range ongoing.jobs() {
refKey := remotes.MakeRefKey(ctx, j.Descriptor)
if a, ok := actives[refKey]; ok {
started := j.started
pw.Write(j.Digest.String(), progress.Status{
Action: a.Status,
Total: int(a.Total),
Current: int(a.Offset),
Started: &started,
})
continue
}
if !j.done {
info, err := cs.Info(context.TODO(), j.Digest)
if err != nil {
if errdefs.IsNotFound(err) {
pw.Write(j.Digest.String(), progress.Status{
Action: "waiting",
})
continue
}
} else {
j.done = true
}
if done || j.done {
started := j.started
createdAt := info.CreatedAt
pw.Write(j.Digest.String(), progress.Status{
Action: "done",
Current: int(info.Size),
Total: int(info.Size),
Completed: &createdAt,
Started: &started,
})
}
}
}
if done {
return
}
}
}
// jobs provides a way of identifying the download keys for a particular task
// encountering during the pull walk.
//
// This is very minimal and will probably be replaced with something more
// featured.
type jobs struct {
name string
added map[digest.Digest]*job
mu sync.Mutex
resolved bool
}
type job struct {
ocispec.Descriptor
done bool
started time.Time
}
func newJobs(name string) *jobs {
return &jobs{
name: name,
added: make(map[digest.Digest]*job),
}
}
func (j *jobs) add(desc ocispec.Descriptor) {
j.mu.Lock()
defer j.mu.Unlock()
if _, ok := j.added[desc.Digest]; ok {
return
}
j.added[desc.Digest] = &job{
Descriptor: desc,
started: time.Now(),
}
}
func (j *jobs) remove(desc ocispec.Descriptor) {
j.mu.Lock()
defer j.mu.Unlock()
delete(j.added, desc.Digest)
}
func (j *jobs) jobs() []*job {
j.mu.Lock()
defer j.mu.Unlock()
descs := make([]*job, 0, len(j.added))
for _, j := range j.added {
descs = append(descs, j)
}
return descs
}
func (j *jobs) isResolved() bool {
j.mu.Lock()
defer j.mu.Unlock()
return j.resolved
}
type statusInfo struct {
Ref string
Status string
Offset int64
Total int64
StartedAt time.Time
UpdatedAt time.Time
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}

View File

@@ -1,178 +0,0 @@
package pull
import (
"context"
"sync"
"sync/atomic"
"time"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
distreference "github.com/docker/distribution/reference"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/source"
"github.com/moby/buildkit/util/resolver"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
var cache *resolverCache
func init() {
cache = newResolverCache()
}
func NewResolver(ctx context.Context, hosts docker.RegistryHosts, sm *session.Manager, imageStore images.Store, mode source.ResolveMode, ref string) remotes.Resolver {
if res := cache.Get(ctx, ref); res != nil {
return withLocal(res, imageStore, mode)
}
r := resolver.New(ctx, hosts, sm)
r = cache.Add(ctx, ref, r)
return withLocal(r, imageStore, mode)
}
func EnsureManifestRequested(ctx context.Context, res remotes.Resolver, ref string) {
rr := res
lr, ok := res.(withLocalResolver)
if ok {
if atomic.LoadInt64(&lr.counter) > 0 {
return
}
rr = lr.Resolver
}
cr, ok := rr.(*cachedResolver)
if !ok {
return
}
if atomic.LoadInt64(&cr.counter) == 0 {
res.Resolve(ctx, ref)
}
}
func withLocal(r remotes.Resolver, imageStore images.Store, mode source.ResolveMode) remotes.Resolver {
if imageStore == nil || mode == source.ResolveModeForcePull {
return r
}
return withLocalResolver{Resolver: r, is: imageStore, mode: mode}
}
// A remotes.Resolver which checks the local image store if the real
// resolver cannot find the image, essentially falling back to a local
// image if one is present.
//
// We do not override the Fetcher or Pusher methods:
//
// - Fetcher is called by github.com/containerd/containerd/remotes/:fetch()
// only after it has checked for the content locally, so avoid the
// hassle of interposing a local-fetch proxy and simply pass on the
// request.
// - Pusher wouldn't make sense to push locally, so just forward.
type withLocalResolver struct {
counter int64 // needs to be 64bit aligned for 32bit systems
remotes.Resolver
is images.Store
mode source.ResolveMode
}
func (r withLocalResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) {
if r.mode == source.ResolveModePreferLocal {
if img, err := r.is.Get(ctx, ref); err == nil {
atomic.AddInt64(&r.counter, 1)
return ref, img.Target, nil
}
}
n, desc, err := r.Resolver.Resolve(ctx, ref)
if err == nil {
return n, desc, err
}
if r.mode == source.ResolveModeDefault {
if img, err := r.is.Get(ctx, ref); err == nil {
return ref, img.Target, nil
}
}
return "", ocispec.Descriptor{}, err
}
type resolverCache struct {
mu sync.Mutex
m map[string]cachedResolver
}
type cachedResolver struct {
counter int64
timeout time.Time
remotes.Resolver
}
func (cr *cachedResolver) Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) {
atomic.AddInt64(&cr.counter, 1)
return cr.Resolver.Resolve(ctx, ref)
}
func (r *resolverCache) Add(ctx context.Context, ref string, resolver remotes.Resolver) remotes.Resolver {
r.mu.Lock()
defer r.mu.Unlock()
ref = r.repo(ref) + "-" + session.FromContext(ctx)
cr, ok := r.m[ref]
cr.timeout = time.Now().Add(time.Minute)
if ok {
return &cr
}
cr.Resolver = resolver
r.m[ref] = cr
return &cr
}
func (r *resolverCache) repo(refStr string) string {
ref, err := distreference.ParseNormalizedNamed(refStr)
if err != nil {
return refStr
}
return ref.Name()
}
func (r *resolverCache) Get(ctx context.Context, ref string) remotes.Resolver {
r.mu.Lock()
defer r.mu.Unlock()
ref = r.repo(ref) + "-" + session.FromContext(ctx)
cr, ok := r.m[ref]
if !ok {
return nil
}
return &cr
}
func (r *resolverCache) clean(now time.Time) {
r.mu.Lock()
for k, cr := range r.m {
if now.After(cr.timeout) {
delete(r.m, k)
}
}
r.mu.Unlock()
}
func newResolverCache() *resolverCache {
rc := &resolverCache{
m: map[string]cachedResolver{},
}
t := time.NewTicker(time.Minute)
go func() {
for {
rc.clean(<-t.C)
}
}()
return rc
}

View File

@@ -1,285 +0,0 @@
package push
import (
"context"
"encoding/json"
"fmt"
"strings"
"sync"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/docker/distribution/reference"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/util/flightcontrol"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/buildkit/util/progress"
"github.com/moby/buildkit/util/resolver"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func Push(ctx context.Context, sm *session.Manager, cs content.Store, dgst digest.Digest, ref string, insecure bool, hosts docker.RegistryHosts, byDigest bool) error {
desc := ocispec.Descriptor{
Digest: dgst,
}
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return err
}
if byDigest && !reference.IsNameOnly(parsed) {
return errors.Errorf("can't push tagged ref %s by digest", parsed.String())
}
if byDigest {
ref = parsed.Name()
} else {
ref = reference.TagNameOnly(parsed).String()
}
resolver := resolver.New(ctx, hosts, sm)
pusher, err := resolver.Pusher(ctx, ref)
if err != nil {
return err
}
var m sync.Mutex
manifestStack := []ocispec.Descriptor{}
filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
m.Lock()
manifestStack = append(manifestStack, desc)
m.Unlock()
return nil, images.ErrStopHandler
default:
return nil, nil
}
})
pushHandler := remotes.PushHandler(pusher, cs)
pushUpdateSourceHandler, err := updateDistributionSourceHandler(cs, pushHandler, ref)
if err != nil {
return err
}
handlers := append([]images.Handler{},
images.HandlerFunc(annotateDistributionSourceHandler(cs, childrenHandler(cs))),
filterHandler,
dedupeHandler(pushUpdateSourceHandler),
)
ra, err := cs.ReaderAt(ctx, desc)
if err != nil {
return err
}
mtype, err := imageutil.DetectManifestMediaType(ra)
if err != nil {
return err
}
layersDone := oneOffProgress(ctx, "pushing layers")
err = images.Dispatch(ctx, images.Handlers(handlers...), nil, ocispec.Descriptor{
Digest: dgst,
Size: ra.Size(),
MediaType: mtype,
})
layersDone(err)
if err != nil {
return err
}
mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref))
for i := len(manifestStack) - 1; i >= 0; i-- {
_, err := pushHandler(ctx, manifestStack[i])
if err != nil {
mfstDone(err)
return err
}
}
mfstDone(nil)
return nil
}
func annotateDistributionSourceHandler(cs content.Store, f images.HandlerFunc) func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
children, err := f(ctx, desc)
if err != nil {
return nil, err
}
// only add distribution source for the config or blob data descriptor
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
default:
return children, nil
}
for i := range children {
child := children[i]
info, err := cs.Info(ctx, child.Digest)
if err != nil {
return nil, err
}
for k, v := range info.Labels {
if !strings.HasPrefix(k, "containerd.io/distribution.source.") {
continue
}
if child.Annotations == nil {
child.Annotations = map[string]string{}
}
child.Annotations[k] = v
}
children[i] = child
}
return children, nil
}
}
func oneOffProgress(ctx context.Context, id string) func(err error) error {
pw, _, _ := progress.FromContext(ctx)
now := time.Now()
st := progress.Status{
Started: &now,
}
pw.Write(id, st)
return func(err error) error {
// TODO: set error on status
now := time.Now()
st.Completed = &now
pw.Write(id, st)
pw.Close()
return err
}
}
func childrenHandler(provider content.Provider) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
var descs []ocispec.Descriptor
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
p, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
return nil, err
}
// TODO(stevvooe): We just assume oci manifest, for now. There may be
// subtle differences from the docker version.
var manifest ocispec.Manifest
if err := json.Unmarshal(p, &manifest); err != nil {
return nil, err
}
descs = append(descs, manifest.Config)
descs = append(descs, manifest.Layers...)
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
p, err := content.ReadBlob(ctx, provider, desc)
if err != nil {
return nil, err
}
var index ocispec.Index
if err := json.Unmarshal(p, &index); err != nil {
return nil, err
}
for _, m := range index.Manifests {
if m.Digest != "" {
descs = append(descs, m)
}
}
case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip:
// childless data types.
return nil, nil
default:
logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
}
return descs, nil
}
}
// updateDistributionSourceHandler will update distribution source label after
// pushing layer successfully.
//
// FIXME(fuweid): There is race condition for current design of distribution
// source label if there are pull/push jobs consuming same layer.
func updateDistributionSourceHandler(cs content.Store, pushF images.HandlerFunc, ref string) (images.HandlerFunc, error) {
updateF, err := docker.AppendDistributionSourceLabel(cs, ref)
if err != nil {
return nil, err
}
return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
var islayer bool
switch desc.MediaType {
case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip:
islayer = true
}
children, err := pushF(ctx, desc)
if err != nil {
return nil, err
}
// update distribution source to layer
if islayer {
if _, err := updateF(ctx, desc); err != nil {
logrus.Warnf("failed to update distribution source for layer %v: %v", desc.Digest, err)
}
}
return children, nil
}), nil
}
func dedupeHandler(h images.HandlerFunc) images.HandlerFunc {
var g flightcontrol.Group
res := map[digest.Digest][]ocispec.Descriptor{}
var mu sync.Mutex
return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
res, err := g.Do(ctx, desc.Digest.String(), func(ctx context.Context) (interface{}, error) {
mu.Lock()
if r, ok := res[desc.Digest]; ok {
mu.Unlock()
return r, nil
}
mu.Unlock()
children, err := h(ctx, desc)
if err != nil {
return nil, err
}
mu.Lock()
res[desc.Digest] = children
mu.Unlock()
return children, nil
})
if err != nil {
return nil, err
}
if res == nil {
return nil, nil
}
return res.([]ocispec.Descriptor), nil
})
}

View File

@@ -1,219 +0,0 @@
package resolver
import (
"context"
"crypto/tls"
"crypto/x509"
"io/ioutil"
"net"
"net/http"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/auth"
"github.com/moby/buildkit/util/tracing"
"github.com/pkg/errors"
)
func fillInsecureOpts(host string, c config.RegistryConfig, h *docker.RegistryHost) error {
tc, err := loadTLSConfig(c)
if err != nil {
return err
}
if c.PlainHTTP != nil && *c.PlainHTTP {
h.Scheme = "http"
} else if c.Insecure != nil && *c.Insecure {
tc.InsecureSkipVerify = true
} else if c.PlainHTTP == nil {
if ok, _ := docker.MatchLocalhost(host); ok {
h.Scheme = "http"
}
}
transport := newDefaultTransport()
transport.TLSClientConfig = tc
h.Client = &http.Client{
Transport: tracing.NewTransport(transport),
}
return nil
}
func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) {
for _, d := range c.TLSConfigDir {
fs, err := ioutil.ReadDir(d)
if err != nil && !os.IsNotExist(err) && !os.IsPermission(err) {
return nil, errors.WithStack(err)
}
for _, f := range fs {
if strings.HasSuffix(f.Name(), ".crt") {
c.RootCAs = append(c.RootCAs, filepath.Join(d, f.Name()))
}
if strings.HasSuffix(f.Name(), ".cert") {
c.KeyPairs = append(c.KeyPairs, config.TLSKeyPair{
Certificate: filepath.Join(d, f.Name()),
Key: filepath.Join(d, strings.TrimSuffix(f.Name(), ".cert")+".key"),
})
}
}
}
tc := &tls.Config{}
if len(c.RootCAs) > 0 {
systemPool, err := x509.SystemCertPool()
if err != nil {
if runtime.GOOS == "windows" {
systemPool = x509.NewCertPool()
} else {
return nil, errors.Wrapf(err, "unable to get system cert pool")
}
}
tc.RootCAs = systemPool
}
for _, p := range c.RootCAs {
dt, err := ioutil.ReadFile(p)
if err != nil {
return nil, errors.Wrapf(err, "failed to read %s", p)
}
tc.RootCAs.AppendCertsFromPEM(dt)
}
for _, kp := range c.KeyPairs {
cert, err := tls.LoadX509KeyPair(kp.Certificate, kp.Key)
if err != nil {
return nil, errors.Wrapf(err, "failed to load keypair for %s", kp.Certificate)
}
tc.Certificates = append(tc.Certificates, cert)
}
return tc, nil
}
func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts {
return docker.Registries(
func(host string) ([]docker.RegistryHost, error) {
c, ok := m[host]
if !ok {
return nil, nil
}
var out []docker.RegistryHost
for _, mirror := range c.Mirrors {
h := docker.RegistryHost{
Scheme: "https",
Client: newDefaultClient(),
Host: mirror,
Path: "/v2",
Capabilities: docker.HostCapabilityPull | docker.HostCapabilityResolve,
}
if err := fillInsecureOpts(mirror, m[mirror], &h); err != nil {
return nil, err
}
out = append(out, h)
}
if host == "docker.io" {
host = "registry-1.docker.io"
}
h := docker.RegistryHost{
Scheme: "https",
Client: newDefaultClient(),
Host: host,
Path: "/v2",
Capabilities: docker.HostCapabilityPush | docker.HostCapabilityPull | docker.HostCapabilityResolve,
}
if err := fillInsecureOpts(host, c, &h); err != nil {
return nil, err
}
out = append(out, h)
return out, nil
},
docker.ConfigureDefaultRegistries(
docker.WithClient(newDefaultClient()),
docker.WithPlainHTTP(docker.MatchLocalhost),
),
)
}
func New(ctx context.Context, hosts docker.RegistryHosts, sm *session.Manager) remotes.Resolver {
return docker.NewResolver(docker.ResolverOptions{
Hosts: hostsWithCredentials(ctx, hosts, sm),
})
}
func hostsWithCredentials(ctx context.Context, hosts docker.RegistryHosts, sm *session.Manager) docker.RegistryHosts {
id := session.FromContext(ctx)
if id == "" {
return hosts
}
return func(domain string) ([]docker.RegistryHost, error) {
res, err := hosts(domain)
if err != nil {
return nil, err
}
if len(res) == 0 {
return nil, nil
}
timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
caller, err := sm.Get(timeoutCtx, id)
if err != nil {
return nil, err
}
a := docker.NewDockerAuthorizer(
docker.WithAuthClient(res[0].Client),
docker.WithAuthCreds(auth.CredentialsFunc(context.TODO(), caller)),
)
for i := range res {
res[i].Authorizer = a
}
return res, nil
}
}
func newDefaultClient() *http.Client {
return &http.Client{
Transport: newDefaultTransport(),
}
}
// newDefaultTransport is for pull or push client
//
// NOTE: For push, there must disable http2 for https because the flow control
// will limit data transfer. The net/http package doesn't provide http2 tunable
// settings which limits push performance.
//
// REF: https://github.com/golang/go/issues/14077
func newDefaultTransport() *http.Transport {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 5 * time.Second,
DisableKeepAlives: true,
TLSNextProto: make(map[string]func(authority string, c *tls.Conn) http.RoundTripper),
}
}

View File

@@ -1,14 +0,0 @@
// +build !windows
package system
// DefaultPathEnv is unix style list of directories to search for
// executables. Each directory is separated from the next by a colon
// ':' character .
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
// is the system drive. This is a no-op on Linux.
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
return path, nil
}

View File

@@ -1,37 +0,0 @@
// +build windows
package system
import (
"fmt"
"path/filepath"
"strings"
)
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
// the container. Docker has no context of what the default path should be.
const DefaultPathEnv = ""
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
// This is used, for example, when validating a user provided path in docker cp.
// If a drive letter is supplied, it must be the system drive. The drive letter
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
// need the path in this syntax so that it can ultimately be contatenated with
// a Windows long-path which doesn't support drive-letters. Examples:
// C: --> Fail
// C:\ --> \
// a --> a
// /a --> \a
// d:\ --> Fail
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
return "", fmt.Errorf("No relative path specified in %q", path)
}
if !filepath.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}

View File

@@ -1,29 +0,0 @@
// +build linux,seccomp
package system
import (
"sync"
"golang.org/x/sys/unix"
)
var seccompSupported bool
var seccompOnce sync.Once
func SeccompSupported() bool {
seccompOnce.Do(func() {
seccompSupported = getSeccompSupported()
})
return seccompSupported
}
func getSeccompSupported() bool {
if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL {
// Make sure the kernel has CONFIG_SECCOMP_FILTER.
if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL {
return true
}
}
return false
}

View File

@@ -1,7 +0,0 @@
// +build !linux,seccomp
package system
func SeccompSupported() bool {
return false
}

View File

@@ -1,7 +0,0 @@
// +build !seccomp
package system
func SeccompSupported() bool {
return false
}

View File

@@ -1,58 +0,0 @@
package throttle
import (
"sync"
"time"
)
// Throttle wraps a function so that internal function does not get called
// more frequently than the specified duration.
func Throttle(d time.Duration, f func()) func() {
return throttle(d, f, true)
}
// ThrottleAfter wraps a function so that internal function does not get called
// more frequently than the specified duration. The delay is added after function
// has been called.
func ThrottleAfter(d time.Duration, f func()) func() {
return throttle(d, f, false)
}
func throttle(d time.Duration, f func(), wait bool) func() {
var next, running bool
var mu sync.Mutex
return func() {
mu.Lock()
defer mu.Unlock()
next = true
if !running {
running = true
go func() {
for {
mu.Lock()
if next == false {
running = false
mu.Unlock()
return
}
if !wait {
next = false
}
mu.Unlock()
if wait {
time.Sleep(d)
mu.Lock()
next = false
mu.Unlock()
f()
} else {
f()
time.Sleep(d)
}
}
}()
}
}
}

View File

@@ -1,22 +0,0 @@
package tracing
import (
opentracing "github.com/opentracing/opentracing-go"
)
// MultiSpan allows shared tracing to multiple spans.
// TODO: This is a temporary solution and doesn't really support shared tracing yet. Instead the first always wins.
type MultiSpan struct {
opentracing.Span
}
func NewMultiSpan() *MultiSpan {
return &MultiSpan{}
}
func (ms *MultiSpan) Add(s opentracing.Span) {
if ms.Span == nil {
ms.Span = s
}
}

View File

@@ -1,115 +0,0 @@
package tracing
import (
"context"
"fmt"
"io"
"net/http"
"github.com/opentracing-contrib/go-stdlib/nethttp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
)
// StartSpan starts a new span as a child of the span in context.
// If there is no span in context then this is a no-op.
// The difference from opentracing.StartSpanFromContext is that this method
// does not depend on global tracer.
func StartSpan(ctx context.Context, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) {
parent := opentracing.SpanFromContext(ctx)
tracer := opentracing.Tracer(&opentracing.NoopTracer{})
if parent != nil {
tracer = parent.Tracer()
opts = append(opts, opentracing.ChildOf(parent.Context()))
}
span := tracer.StartSpan(operationName, opts...)
if parent != nil {
return span, opentracing.ContextWithSpan(ctx, span)
}
return span, ctx
}
// FinishWithError finalizes the span and sets the error if one is passed
func FinishWithError(span opentracing.Span, err error) {
if err != nil {
fields := []log.Field{
log.String("event", "error"),
log.String("message", err.Error()),
}
if _, ok := err.(interface {
Cause() error
}); ok {
fields = append(fields, log.String("stack", fmt.Sprintf("%+v", err)))
}
span.LogFields(fields...)
ext.Error.Set(span, true)
}
span.Finish()
}
// ContextWithSpanFromContext sets the tracing span of a context from other
// context if one is not already set. Alternative would be
// context.WithoutCancel() that would copy the context but reset ctx.Done
func ContextWithSpanFromContext(ctx, ctx2 context.Context) context.Context {
// if already is a span then noop
if span := opentracing.SpanFromContext(ctx); span != nil {
return ctx
}
if span := opentracing.SpanFromContext(ctx2); span != nil {
return opentracing.ContextWithSpan(ctx, span)
}
return ctx
}
var DefaultTransport http.RoundTripper = &Transport{
RoundTripper: &nethttp.Transport{RoundTripper: http.DefaultTransport},
}
var DefaultClient = &http.Client{
Transport: DefaultTransport,
}
type Transport struct {
http.RoundTripper
}
func NewTransport(rt http.RoundTripper) http.RoundTripper {
return &Transport{
RoundTripper: &nethttp.Transport{RoundTripper: rt},
}
}
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
span := opentracing.SpanFromContext(req.Context())
if span == nil { // no tracer connected with either request or transport
return t.RoundTripper.RoundTrip(req)
}
req, tracer := nethttp.TraceRequest(span.Tracer(), req)
resp, err := t.RoundTripper.RoundTrip(req)
if err != nil {
tracer.Finish()
return resp, err
}
if req.Method == "HEAD" {
tracer.Finish()
} else {
resp.Body = closeTracker{resp.Body, tracer.Finish}
}
return resp, err
}
type closeTracker struct {
io.ReadCloser
finish func()
}
func (c closeTracker) Close() error {
err := c.ReadCloser.Close()
c.finish()
return err
}

View File

@@ -1,187 +0,0 @@
package winlayers
import (
"archive/tar"
"context"
"io"
"io/ioutil"
"runtime"
"strings"
"sync"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/mount"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
func NewFileSystemApplierWithWindows(cs content.Provider, a diff.Applier) diff.Applier {
if runtime.GOOS == "windows" {
return a
}
return &winApplier{
cs: cs,
a: a,
}
}
type winApplier struct {
cs content.Provider
a diff.Applier
}
func (s *winApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) {
if !hasWindowsLayerMode(ctx) {
return s.a.Apply(ctx, desc, mounts, opts...)
}
compressed, err := images.DiffCompression(ctx, desc.MediaType)
if err != nil {
return ocispec.Descriptor{}, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType)
}
var ocidesc ocispec.Descriptor
if err := mount.WithTempMount(ctx, mounts, func(root string) error {
ra, err := s.cs.ReaderAt(ctx, desc)
if err != nil {
return errors.Wrap(err, "failed to get reader from content store")
}
defer ra.Close()
r := content.NewReader(ra)
if compressed != "" {
ds, err := compression.DecompressStream(r)
if err != nil {
return err
}
defer ds.Close()
r = ds
}
digester := digest.Canonical.Digester()
rc := &readCounter{
r: io.TeeReader(r, digester.Hash()),
}
rc2, discard := filter(rc, func(hdr *tar.Header) bool {
if strings.HasPrefix(hdr.Name, "Files/") {
hdr.Name = strings.TrimPrefix(hdr.Name, "Files/")
hdr.Linkname = strings.TrimPrefix(hdr.Linkname, "Files/")
// TODO: could convert the windows PAX headers to xattr here to reuse
// the original ones in diff for parent directories and file modifications
return true
}
return false
})
if _, err := archive.Apply(ctx, root, rc2); err != nil {
discard(err)
return err
}
// Read any trailing data
if _, err := io.Copy(ioutil.Discard, rc); err != nil {
discard(err)
return err
}
ocidesc = ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Size: rc.c,
Digest: digester.Digest(),
}
return nil
}); err != nil {
return ocispec.Descriptor{}, err
}
return ocidesc, nil
}
type readCounter struct {
r io.Reader
c int64
}
func (rc *readCounter) Read(p []byte) (n int, err error) {
n, err = rc.r.Read(p)
rc.c += int64(n)
return
}
func filter(in io.Reader, f func(*tar.Header) bool) (io.Reader, func(error)) {
pr, pw := io.Pipe()
rc := &readCanceler{Reader: in}
go func() {
tarReader := tar.NewReader(rc)
tarWriter := tar.NewWriter(pw)
pw.CloseWithError(func() error {
for {
h, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
if f(h) {
if err := tarWriter.WriteHeader(h); err != nil {
return err
}
if h.Size > 0 {
if _, err := io.Copy(tarWriter, tarReader); err != nil {
return err
}
}
} else {
if h.Size > 0 {
if _, err := io.Copy(ioutil.Discard, tarReader); err != nil {
return err
}
}
}
}
return tarWriter.Close()
}())
}()
discard := func(err error) {
rc.cancel(err)
pw.CloseWithError(err)
}
return pr, discard
}
type readCanceler struct {
mu sync.Mutex
io.Reader
err error
}
func (r *readCanceler) Read(b []byte) (int, error) {
r.mu.Lock()
if r.err != nil {
r.mu.Unlock()
return 0, r.err
}
n, err := r.Reader.Read(b)
r.mu.Unlock()
return n, err
}
func (r *readCanceler) cancel(err error) {
r.mu.Lock()
r.err = err
r.mu.Unlock()
}

View File

@@ -1,19 +0,0 @@
package winlayers
import "context"
type contextKeyT string
var contextKey = contextKeyT("buildkit/winlayers-on")
func UseWindowsLayerMode(ctx context.Context) context.Context {
return context.WithValue(ctx, contextKey, true)
}
func hasWindowsLayerMode(ctx context.Context) bool {
v := ctx.Value(contextKey)
if v == nil {
return false
}
return true
}

View File

@@ -1,274 +0,0 @@
package winlayers
import (
"archive/tar"
"context"
"crypto/rand"
"encoding/base64"
"fmt"
"io"
"time"
"github.com/containerd/containerd/archive"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const (
keyFileAttr = "MSWINDOWS.fileattr"
keySDRaw = "MSWINDOWS.rawsd"
keyCreationTime = "LIBARCHIVE.creationtime"
)
func NewWalkingDiffWithWindows(store content.Store, d diff.Comparer) diff.Comparer {
return &winDiffer{
store: store,
d: d,
}
}
var emptyDesc = ocispec.Descriptor{}
type winDiffer struct {
store content.Store
d diff.Comparer
}
// Compare creates a diff between the given mounts and uploads the result
// to the content store.
func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) {
if !hasWindowsLayerMode(ctx) {
return s.d.Compare(ctx, lower, upper, opts...)
}
var config diff.Config
for _, opt := range opts {
if err := opt(&config); err != nil {
return emptyDesc, err
}
}
if config.MediaType == "" {
config.MediaType = ocispec.MediaTypeImageLayerGzip
}
var isCompressed bool
switch config.MediaType {
case ocispec.MediaTypeImageLayer:
case ocispec.MediaTypeImageLayerGzip:
isCompressed = true
default:
return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType)
}
var ocidesc ocispec.Descriptor
if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error {
return mount.WithTempMount(ctx, upper, func(upperRoot string) error {
var newReference bool
if config.Reference == "" {
newReference = true
config.Reference = uniqueRef()
}
cw, err := s.store.Writer(ctx,
content.WithRef(config.Reference),
content.WithDescriptor(ocispec.Descriptor{
MediaType: config.MediaType, // most contentstore implementations just ignore this
}))
if err != nil {
return errors.Wrap(err, "failed to open writer")
}
defer func() {
if err != nil {
cw.Close()
if newReference {
if err := s.store.Abort(ctx, config.Reference); err != nil {
log.G(ctx).WithField("ref", config.Reference).Warnf("failed to delete diff upload")
}
}
}
}()
if !newReference {
if err := cw.Truncate(0); err != nil {
return err
}
}
if isCompressed {
dgstr := digest.SHA256.Digester()
compressed, err := compression.CompressStream(cw, compression.Gzip)
if err != nil {
return errors.Wrap(err, "failed to get compressed stream")
}
var w io.Writer = io.MultiWriter(compressed, dgstr.Hash())
w, discard, done := makeWindowsLayer(w)
err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot)
if err != nil {
discard(err)
}
<-done
compressed.Close()
if err != nil {
return errors.Wrap(err, "failed to write compressed diff")
}
if config.Labels == nil {
config.Labels = map[string]string{}
}
config.Labels["containerd.io/uncompressed"] = dgstr.Digest().String()
} else {
w, discard, done := makeWindowsLayer(cw)
if err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot); err != nil {
discard(err)
return errors.Wrap(err, "failed to write diff")
}
<-done
}
var commitopts []content.Opt
if config.Labels != nil {
commitopts = append(commitopts, content.WithLabels(config.Labels))
}
dgst := cw.Digest()
if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil {
return errors.Wrap(err, "failed to commit")
}
info, err := s.store.Info(ctx, dgst)
if err != nil {
return errors.Wrap(err, "failed to get info from content store")
}
ocidesc = ocispec.Descriptor{
MediaType: config.MediaType,
Size: info.Size,
Digest: info.Digest,
}
return nil
})
}); err != nil {
return emptyDesc, err
}
return ocidesc, nil
}
func uniqueRef() string {
t := time.Now()
var b [3]byte
// Ignore read failures, just decreases uniqueness
rand.Read(b[:])
return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:]))
}
func prepareWinHeader(h *tar.Header) {
if h.PAXRecords == nil {
h.PAXRecords = map[string]string{}
}
if h.Typeflag == tar.TypeDir {
h.Mode |= 1 << 14
h.PAXRecords[keyFileAttr] = "16"
}
if h.Typeflag == tar.TypeReg {
h.Mode |= 1 << 15
h.PAXRecords[keyFileAttr] = "32"
}
if !h.ModTime.IsZero() {
h.PAXRecords[keyCreationTime] = fmt.Sprintf("%d.%d", h.ModTime.Unix(), h.ModTime.Nanosecond())
}
h.Format = tar.FormatPAX
}
func addSecurityDescriptor(h *tar.Header) {
if h.Typeflag == tar.TypeDir {
// O:BAG:SYD:(A;OICI;FA;;;BA)(A;OICI;FA;;;SY)(A;;FA;;;BA)(A;OICIIO;GA;;;CO)(A;OICI;0x1200a9;;;BU)(A;CI;LC;;;BU)(A;CI;DC;;;BU)
h.PAXRecords[keySDRaw] = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgCoAAcAAAAAAxgA/wEfAAECAAAAAAAFIAAAACACAAAAAxQA/wEfAAEBAAAAAAAFEgAAAAAAGAD/AR8AAQIAAAAAAAUgAAAAIAIAAAALFAAAAAAQAQEAAAAAAAMAAAAAAAMYAKkAEgABAgAAAAAABSAAAAAhAgAAAAIYAAQAAAABAgAAAAAABSAAAAAhAgAAAAIYAAIAAAABAgAAAAAABSAAAAAhAgAA"
}
if h.Typeflag == tar.TypeReg {
// O:BAG:SYD:(A;;FA;;;BA)(A;;FA;;;SY)(A;;0x1200a9;;;BU)
h.PAXRecords[keySDRaw] = "AQAEgBQAAAAkAAAAAAAAADAAAAABAgAAAAAABSAAAAAgAgAAAQEAAAAAAAUSAAAAAgBMAAMAAAAAABgA/wEfAAECAAAAAAAFIAAAACACAAAAABQA/wEfAAEBAAAAAAAFEgAAAAAAGACpABIAAQIAAAAAAAUgAAAAIQIAAA=="
}
}
func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) {
pr, pw := io.Pipe()
done := make(chan error)
go func() {
tarReader := tar.NewReader(pr)
tarWriter := tar.NewWriter(w)
err := func() error {
h := &tar.Header{
Name: "Hives",
Typeflag: tar.TypeDir,
ModTime: time.Now(),
}
prepareWinHeader(h)
if err := tarWriter.WriteHeader(h); err != nil {
return err
}
h = &tar.Header{
Name: "Files",
Typeflag: tar.TypeDir,
ModTime: time.Now(),
}
prepareWinHeader(h)
if err := tarWriter.WriteHeader(h); err != nil {
return err
}
for {
h, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return err
}
h.Name = "Files/" + h.Name
if h.Linkname != "" {
h.Linkname = "Files/" + h.Linkname
}
prepareWinHeader(h)
addSecurityDescriptor(h)
if err := tarWriter.WriteHeader(h); err != nil {
return err
}
if h.Size > 0 {
if _, err := io.Copy(tarWriter, tarReader); err != nil {
return err
}
}
}
return tarWriter.Close()
}()
if err != nil {
logrus.Errorf("makeWindowsLayer %+v", err)
}
pw.CloseWithError(err)
done <- err
return
}()
discard := func(err error) {
pw.CloseWithError(err)
}
return pw, discard, done
}