mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-04-29 04:04:45 +00:00
vendor: update vendors for kata-monitor
kata-monitor switched from containerd client to CRI. Update the dependencies and vendored code. go mod tidy go mod vendor Signed-off-by: Francesco Giudici <fgiudici@redhat.com>
This commit is contained in:
parent
c2f03e8993
commit
0b03d97d0b
@ -41,6 +41,7 @@ require (
|
||||
github.com/urfave/cli v1.22.2
|
||||
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb
|
||||
go.opentelemetry.io/otel v0.15.0
|
||||
go.opentelemetry.io/otel/exporters/trace/jaeger v0.15.0
|
||||
go.opentelemetry.io/otel/sdk v0.15.0
|
||||
@ -49,6 +50,7 @@ require (
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887
|
||||
google.golang.org/grpc v1.33.2
|
||||
k8s.io/apimachinery v0.20.6
|
||||
k8s.io/cri-api v0.20.6
|
||||
)
|
||||
|
||||
replace (
|
||||
|
@ -169,7 +169,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
@ -256,7 +255,6 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=
|
||||
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
@ -349,7 +347,6 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
|
||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/intel-go/cpuid v0.0.0-20210602155658-5747e5cec0d9 h1:x9HFDMDCsaxTvC4X3o0ZN6mw99dT/wYnTItGwhBRmg0=
|
||||
@ -370,7 +367,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.11.13 h1:eSvu8Tmq6j2psUJqJrLcWH6K3w5Dwc+qipbaA6eVEN4=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@ -404,7 +400,6 @@ github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM=
|
||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||
@ -437,7 +432,6 @@ github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
|
||||
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v1.0.1 h1:G18PGckGdAm3yVQRWDVQ1rLSLntiniKJ0cNRT2Tm5gs=
|
||||
github.com/opencontainers/runc v1.0.1/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
@ -451,7 +445,6 @@ github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xA
|
||||
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM=
|
||||
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@ -552,6 +545,8 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@ -901,6 +896,7 @@ k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MA
|
||||
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
|
||||
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
|
||||
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
|
||||
k8s.io/cri-api v0.20.6 h1:iXX0K2pRrbR8yXbZtDK/bSnmg/uSqIFiVJK1x4LUOMc=
|
||||
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
|
4
src/runtime/vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
4
src/runtime/vendor/github.com/Microsoft/go-winio/backuptar/noop.go
generated
vendored
@ -1,4 +0,0 @@
|
||||
// +build !windows
|
||||
// This file only exists to allow go get on non-Windows platforms.
|
||||
|
||||
package backuptar
|
68
src/runtime/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
generated
vendored
68
src/runtime/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go
generated
vendored
@ -1,68 +0,0 @@
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Functions copied from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
|
||||
// as we need to manage the LIBARCHIVE.creationtime PAXRecord manually.
|
||||
// Idea taken from containerd which did the same thing.
|
||||
|
||||
// parsePAXTime takes a string of the form %d.%d as described in the PAX
|
||||
// specification. Note that this implementation allows for negative timestamps,
|
||||
// which is allowed for by the PAX specification, but not always portable.
|
||||
func parsePAXTime(s string) (time.Time, error) {
|
||||
const maxNanoSecondDigits = 9
|
||||
|
||||
// Split string into seconds and sub-seconds parts.
|
||||
ss, sn := s, ""
|
||||
if pos := strings.IndexByte(s, '.'); pos >= 0 {
|
||||
ss, sn = s[:pos], s[pos+1:]
|
||||
}
|
||||
|
||||
// Parse the seconds.
|
||||
secs, err := strconv.ParseInt(ss, 10, 64)
|
||||
if err != nil {
|
||||
return time.Time{}, tar.ErrHeader
|
||||
}
|
||||
if len(sn) == 0 {
|
||||
return time.Unix(secs, 0), nil // No sub-second values
|
||||
}
|
||||
|
||||
// Parse the nanoseconds.
|
||||
if strings.Trim(sn, "0123456789") != "" {
|
||||
return time.Time{}, tar.ErrHeader
|
||||
}
|
||||
if len(sn) < maxNanoSecondDigits {
|
||||
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
|
||||
} else {
|
||||
sn = sn[:maxNanoSecondDigits] // Right truncate
|
||||
}
|
||||
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
|
||||
if len(ss) > 0 && ss[0] == '-' {
|
||||
return time.Unix(secs, -1*nsecs), nil // Negative correction
|
||||
}
|
||||
return time.Unix(secs, nsecs), nil
|
||||
}
|
||||
|
||||
// formatPAXTime converts ts into a time of the form %d.%d as described in the
|
||||
// PAX specification. This function is capable of negative timestamps.
|
||||
func formatPAXTime(ts time.Time) (s string) {
|
||||
secs, nsecs := ts.Unix(), ts.Nanosecond()
|
||||
if nsecs == 0 {
|
||||
return strconv.FormatInt(secs, 10)
|
||||
}
|
||||
|
||||
// If seconds is negative, then perform correction.
|
||||
sign := ""
|
||||
if secs < 0 {
|
||||
sign = "-" // Remember sign
|
||||
secs = -(secs + 1) // Add a second to secs
|
||||
nsecs = -(nsecs - 1e9) // Take that second away from nsecs
|
||||
}
|
||||
return strings.TrimRight(fmt.Sprintf("%s%d.%09d", sign, secs, nsecs), "0")
|
||||
}
|
452
src/runtime/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
452
src/runtime/vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
@ -1,452 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
package backuptar
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/go-winio"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const (
|
||||
c_ISUID = 04000 // Set uid
|
||||
c_ISGID = 02000 // Set gid
|
||||
c_ISVTX = 01000 // Save text (sticky bit)
|
||||
c_ISDIR = 040000 // Directory
|
||||
c_ISFIFO = 010000 // FIFO
|
||||
c_ISREG = 0100000 // Regular file
|
||||
c_ISLNK = 0120000 // Symbolic link
|
||||
c_ISBLK = 060000 // Block special file
|
||||
c_ISCHR = 020000 // Character special file
|
||||
c_ISSOCK = 0140000 // Socket
|
||||
)
|
||||
|
||||
const (
|
||||
hdrFileAttributes = "MSWINDOWS.fileattr"
|
||||
hdrSecurityDescriptor = "MSWINDOWS.sd"
|
||||
hdrRawSecurityDescriptor = "MSWINDOWS.rawsd"
|
||||
hdrMountPoint = "MSWINDOWS.mountpoint"
|
||||
hdrEaPrefix = "MSWINDOWS.xattr."
|
||||
|
||||
hdrCreationTime = "LIBARCHIVE.creationtime"
|
||||
)
|
||||
|
||||
func writeZeroes(w io.Writer, count int64) error {
|
||||
buf := make([]byte, 8192)
|
||||
c := len(buf)
|
||||
for i := int64(0); i < count; i += int64(c) {
|
||||
if int64(c) > count-i {
|
||||
c = int(count - i)
|
||||
}
|
||||
_, err := w.Write(buf[:c])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error {
|
||||
curOffset := int64(0)
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id != winio.BackupSparseBlock {
|
||||
return fmt.Errorf("unexpected stream %d", bhdr.Id)
|
||||
}
|
||||
|
||||
// archive/tar does not support writing sparse files
|
||||
// so just write zeroes to catch up to the current offset.
|
||||
err = writeZeroes(t, bhdr.Offset-curOffset)
|
||||
if bhdr.Size == 0 {
|
||||
break
|
||||
}
|
||||
n, err := io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curOffset = bhdr.Offset + n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// BasicInfoHeader creates a tar header from basic file information.
|
||||
func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header {
|
||||
hdr := &tar.Header{
|
||||
Format: tar.FormatPAX,
|
||||
Name: filepath.ToSlash(name),
|
||||
Size: size,
|
||||
Typeflag: tar.TypeReg,
|
||||
ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()),
|
||||
ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()),
|
||||
AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()),
|
||||
PAXRecords: make(map[string]string),
|
||||
}
|
||||
hdr.PAXRecords[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes)
|
||||
hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds()))
|
||||
|
||||
if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 {
|
||||
hdr.Mode |= c_ISDIR
|
||||
hdr.Size = 0
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
}
|
||||
return hdr
|
||||
}
|
||||
|
||||
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
|
||||
//
|
||||
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
|
||||
//
|
||||
// The additional Win32 metadata is:
|
||||
//
|
||||
// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value
|
||||
//
|
||||
// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format
|
||||
//
|
||||
// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink)
|
||||
func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error {
|
||||
name = filepath.ToSlash(name)
|
||||
hdr := BasicInfoHeader(name, size, fileInfo)
|
||||
|
||||
// If r can be seeked, then this function is two-pass: pass 1 collects the
|
||||
// tar header data, and pass 2 copies the data stream. If r cannot be
|
||||
// seeked, then some header data (in particular EAs) will be silently lost.
|
||||
var (
|
||||
restartPos int64
|
||||
err error
|
||||
)
|
||||
sr, readTwice := r.(io.Seeker)
|
||||
if readTwice {
|
||||
if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil {
|
||||
readTwice = false
|
||||
}
|
||||
}
|
||||
|
||||
br := winio.NewBackupStreamReader(r)
|
||||
var dataHdr *winio.BackupHeader
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupData:
|
||||
hdr.Mode |= c_ISREG
|
||||
if !readTwice {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
case winio.BackupSecurity:
|
||||
sd, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd)
|
||||
|
||||
case winio.BackupReparseData:
|
||||
hdr.Mode |= c_ISLNK
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
reparseBuffer, err := ioutil.ReadAll(br)
|
||||
rp, err := winio.DecodeReparsePoint(reparseBuffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rp.IsMountPoint {
|
||||
hdr.PAXRecords[hdrMountPoint] = "1"
|
||||
}
|
||||
hdr.Linkname = rp.Target
|
||||
|
||||
case winio.BackupEaData:
|
||||
eab, err := ioutil.ReadAll(br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
eas, err := winio.DecodeExtendedAttributes(eab)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, ea := range eas {
|
||||
// Use base64 encoding for the binary value. Note that there
|
||||
// is no way to encode the EA's flags, since their use doesn't
|
||||
// make any sense for persisted EAs.
|
||||
hdr.PAXRecords[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value)
|
||||
}
|
||||
|
||||
case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if readTwice {
|
||||
// Get back to the data stream.
|
||||
if _, err = sr.Seek(restartPos, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
for dataHdr == nil {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bhdr.Id == winio.BackupData {
|
||||
dataHdr = bhdr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if dataHdr != nil {
|
||||
// A data stream was found. Copy the data.
|
||||
if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
if size != dataHdr.Size {
|
||||
return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size)
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = copySparse(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Look for streams after the data stream. The only ones we handle are alternate data streams.
|
||||
// Other streams may have metadata that could be serialized, but the tar header has already
|
||||
// been written. In practice, this means that we don't get EA or TXF metadata.
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch bhdr.Id {
|
||||
case winio.BackupAlternateData:
|
||||
altName := bhdr.Name
|
||||
if strings.HasSuffix(altName, ":$DATA") {
|
||||
altName = altName[:len(altName)-len(":$DATA")]
|
||||
}
|
||||
if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 {
|
||||
hdr = &tar.Header{
|
||||
Format: hdr.Format,
|
||||
Name: name + altName,
|
||||
Mode: hdr.Mode,
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: bhdr.Size,
|
||||
ModTime: hdr.ModTime,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
err = t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(t, br)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
} else {
|
||||
// Unsupported for now, since the size of the alternate stream is not present
|
||||
// in the backup stream until after the data has been read.
|
||||
return errors.New("tar of sparse alternate data streams is unsupported")
|
||||
}
|
||||
case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData:
|
||||
// ignore these streams
|
||||
default:
|
||||
return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
|
||||
// WriteTarFileFromBackupStream.
|
||||
func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
|
||||
name = hdr.Name
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
size = hdr.Size
|
||||
}
|
||||
fileInfo = &winio.FileBasicInfo{
|
||||
LastAccessTime: windows.NsecToFiletime(hdr.AccessTime.UnixNano()),
|
||||
LastWriteTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
ChangeTime: windows.NsecToFiletime(hdr.ChangeTime.UnixNano()),
|
||||
// Default to ModTime, we'll pull hdrCreationTime below if present
|
||||
CreationTime: windows.NsecToFiletime(hdr.ModTime.UnixNano()),
|
||||
}
|
||||
if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok {
|
||||
attr, err := strconv.ParseUint(attrStr, 10, 32)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileInfo.FileAttributes = uint32(attr)
|
||||
} else {
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
|
||||
}
|
||||
}
|
||||
if creationTimeStr, ok := hdr.PAXRecords[hdrCreationTime]; ok {
|
||||
creationTime, err := parsePAXTime(creationTimeStr)
|
||||
if err != nil {
|
||||
return "", 0, nil, err
|
||||
}
|
||||
fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
|
||||
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
|
||||
// tar file that was not processed, or io.EOF is there are no more.
|
||||
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
|
||||
bw := winio.NewBackupStreamWriter(w)
|
||||
var sd []byte
|
||||
var err error
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
|
||||
// by this library will have raw binary for the security descriptor.
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if len(sd) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupSecurity,
|
||||
Size: int64(len(sd)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var eas []winio.ExtendedAttribute
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err := winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupEaData,
|
||||
Size: int64(len(eadata)),
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(eadata)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
reparse := winio.EncodeReparsePoint(&rp)
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupReparseData,
|
||||
Size: int64(len(reparse)),
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = bw.Write(reparse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
Size: hdr.Size,
|
||||
}
|
||||
err := bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Copy all the alternate data streams and return the next non-ADS header.
|
||||
for {
|
||||
ahdr, err := t.Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
|
||||
return ahdr, nil
|
||||
}
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupAlternateData,
|
||||
Size: ahdr.Size,
|
||||
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
|
||||
}
|
||||
err = bw.WriteHeader(&bhdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = io.Copy(bw, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
88
src/runtime/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go
generated
vendored
88
src/runtime/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go
generated
vendored
@ -1,88 +0,0 @@
|
||||
// Package ociwclayer provides functions for importing and exporting Windows
|
||||
// container layers from and to their OCI tar representation.
|
||||
package ociwclayer
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"io"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Microsoft/go-winio/backuptar"
|
||||
"github.com/Microsoft/hcsshim"
|
||||
)
|
||||
|
||||
var driverInfo = hcsshim.DriverInfo{}
|
||||
|
||||
// ExportLayerToTar writes an OCI layer tar stream from the provided on-disk layer.
|
||||
// The caller must specify the parent layers, if any, ordered from lowest to
|
||||
// highest layer.
|
||||
//
|
||||
// The layer will be mounted for this process, so the caller should ensure that
|
||||
// it is not currently mounted.
|
||||
func ExportLayerToTar(ctx context.Context, w io.Writer, path string, parentLayerPaths []string) error {
|
||||
err := hcsshim.ActivateLayer(driverInfo, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = hcsshim.DeactivateLayer(driverInfo, path)
|
||||
}()
|
||||
|
||||
// Prepare and unprepare the layer to ensure that it has been initialized.
|
||||
err = hcsshim.PrepareLayer(driverInfo, path, parentLayerPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = hcsshim.UnprepareLayer(driverInfo, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := hcsshim.NewLayerReader(driverInfo, path, parentLayerPaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = writeTarFromLayer(ctx, r, w)
|
||||
cerr := r.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cerr
|
||||
}
|
||||
|
||||
func writeTarFromLayer(ctx context.Context, r hcsshim.LayerReader, w io.Writer) error {
|
||||
t := tar.NewWriter(w)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
name, size, fileInfo, err := r.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fileInfo == nil {
|
||||
// Write a whiteout file.
|
||||
hdr := &tar.Header{
|
||||
Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), whiteoutPrefix+filepath.Base(name))),
|
||||
}
|
||||
err := t.WriteHeader(hdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return t.Close()
|
||||
}
|
148
src/runtime/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go
generated
vendored
148
src/runtime/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go
generated
vendored
@ -1,148 +0,0 @@
|
||||
package ociwclayer
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/backuptar"
|
||||
"github.com/Microsoft/hcsshim"
|
||||
)
|
||||
|
||||
const whiteoutPrefix = ".wh."
|
||||
|
||||
var (
|
||||
// mutatedFiles is a list of files that are mutated by the import process
|
||||
// and must be backed up and restored.
|
||||
mutatedFiles = map[string]string{
|
||||
"UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak",
|
||||
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak",
|
||||
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak",
|
||||
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak",
|
||||
}
|
||||
)
|
||||
|
||||
// ImportLayerFromTar reads a layer from an OCI layer tar stream and extracts it to the
|
||||
// specified path. The caller must specify the parent layers, if any, ordered
|
||||
// from lowest to highest layer.
|
||||
//
|
||||
// The caller must ensure that the thread or process has acquired backup and
|
||||
// restore privileges.
|
||||
//
|
||||
// This function returns the total size of the layer's files, in bytes.
|
||||
func ImportLayerFromTar(ctx context.Context, r io.Reader, path string, parentLayerPaths []string) (int64, error) {
|
||||
err := os.MkdirAll(path, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
w, err := hcsshim.NewLayerWriter(hcsshim.DriverInfo{}, path, parentLayerPaths)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n, err := writeLayerFromTar(ctx, r, w, path)
|
||||
cerr := w.Close()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if cerr != nil {
|
||||
return 0, cerr
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func writeLayerFromTar(ctx context.Context, r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) {
|
||||
t := tar.NewReader(r)
|
||||
hdr, err := t.Next()
|
||||
totalSize := int64(0)
|
||||
buf := bufio.NewWriter(nil)
|
||||
for err == nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
base := path.Base(hdr.Name)
|
||||
if strings.HasPrefix(base, whiteoutPrefix) {
|
||||
name := path.Join(path.Dir(hdr.Name), base[len(whiteoutPrefix):])
|
||||
err = w.Remove(filepath.FromSlash(name))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hdr, err = t.Next()
|
||||
} else if hdr.Typeflag == tar.TypeLink {
|
||||
err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hdr, err = t.Next()
|
||||
} else {
|
||||
var (
|
||||
name string
|
||||
size int64
|
||||
fileInfo *winio.FileBasicInfo
|
||||
)
|
||||
name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = w.Add(filepath.FromSlash(name), fileInfo)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root)
|
||||
totalSize += size
|
||||
}
|
||||
}
|
||||
if err != io.EOF {
|
||||
return 0, err
|
||||
}
|
||||
return totalSize, nil
|
||||
}
|
||||
|
||||
// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and
|
||||
// writes it to a backup stream, and also saves any files that will be mutated
|
||||
// by the import layer process to a backup location.
|
||||
func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) {
|
||||
var bcdBackup *os.File
|
||||
var bcdBackupWriter *winio.BackupFileWriter
|
||||
if backupPath, ok := mutatedFiles[hdr.Name]; ok {
|
||||
bcdBackup, err = os.Create(filepath.Join(root, backupPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
cerr := bcdBackup.Close()
|
||||
if err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false)
|
||||
defer func() {
|
||||
cerr := bcdBackupWriter.Close()
|
||||
if err == nil {
|
||||
err = cerr
|
||||
}
|
||||
}()
|
||||
|
||||
buf.Reset(io.MultiWriter(w, bcdBackupWriter))
|
||||
} else {
|
||||
buf.Reset(w)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
ferr := buf.Flush()
|
||||
if err == nil {
|
||||
err = ferr
|
||||
}
|
||||
}()
|
||||
|
||||
return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr)
|
||||
}
|
1
src/runtime/vendor/github.com/containerd/containerd/.gitattributes
generated
vendored
1
src/runtime/vendor/github.com/containerd/containerd/.gitattributes
generated
vendored
@ -1 +0,0 @@
|
||||
*.go text eol=lf
|
10
src/runtime/vendor/github.com/containerd/containerd/.gitignore
generated
vendored
10
src/runtime/vendor/github.com/containerd/containerd/.gitignore
generated
vendored
@ -1,10 +0,0 @@
|
||||
/bin/
|
||||
/man/
|
||||
coverage.txt
|
||||
profile.out
|
||||
containerd.test
|
||||
_site/
|
||||
releases/*.tar.gz
|
||||
releases/*.tar.gz.sha256sum
|
||||
_output/
|
||||
.vagrant/
|
27
src/runtime/vendor/github.com/containerd/containerd/.golangci.yml
generated
vendored
27
src/runtime/vendor/github.com/containerd/containerd/.golangci.yml
generated
vendored
@ -1,27 +0,0 @@
|
||||
linters:
|
||||
enable:
|
||||
- structcheck
|
||||
- varcheck
|
||||
- staticcheck
|
||||
- unconvert
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- ineffassign
|
||||
- vet
|
||||
- unused
|
||||
- misspell
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
issues:
|
||||
include:
|
||||
- EXC0002
|
||||
|
||||
run:
|
||||
timeout: 3m
|
||||
skip-dirs:
|
||||
- api
|
||||
- design
|
||||
- docs
|
||||
- docs/man
|
125
src/runtime/vendor/github.com/containerd/containerd/.mailmap
generated
vendored
125
src/runtime/vendor/github.com/containerd/containerd/.mailmap
generated
vendored
@ -1,125 +0,0 @@
|
||||
Abhinandan Prativadi <abhi@docker.com>
|
||||
Abhinandan Prativadi <abhi@docker.com> <aprativadi@gmail.com>
|
||||
Ace-Tang <aceapril@126.com>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.akihiro@lab.ntt.co.jp>
|
||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> <suda.kyoto@gmail.com>
|
||||
Allen Sun <shlallen1990@gmail.com> <allensun@AllenSundeMacBook-Pro.local>
|
||||
Alexander Morozov <lk4d4math@gmail.com> <lk4d4@docker.com>
|
||||
Antonio Ojea <antonio.ojea.garcia@gmail.com> <aojea@redhat.com>
|
||||
Amit Krishnan <krish.amit@gmail.com> <amit.krishnan@oracle.com>
|
||||
Andrei Vagin <avagin@virtuozzo.com> <avagin@openvz.org>
|
||||
Andrey Kolomentsev <andrey.kolomentsev@gmail.com>
|
||||
Arnaud Porterie <icecrime@gmail.com>
|
||||
Arnaud Porterie <icecrime@gmail.com> <arnaud.porterie@docker.com>
|
||||
Bob Mader <swapdisk@users.noreply.github.com>
|
||||
Boris Popovschi <zyqsempai@mail.ru>
|
||||
Bowen Yan <loneybw@gmail.com>
|
||||
Brent Baude <bbaude@redhat.com>
|
||||
Cao Zhihao <caozhihao@163.com>
|
||||
Cao Zhihao <caozhihao@163.com> <caozhihao.xd@bytedance.com>
|
||||
Carlos Eduardo <me@carlosedp.com> <me@carlosedp.com>
|
||||
chenxiaoyu <weixian.cxy@alibaba-inc.com>
|
||||
Cory Bennett <cbennett@netflix.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com>
|
||||
Cristian Staretu <cristian.staretu@gmail.com> <unclejack@users.noreply.github.com>
|
||||
Daniel Dao <dqminh89@gmail.com>
|
||||
Derek McGowan <derek@mcg.dev> <derek@mcgstyle.net>
|
||||
Edgar Lee <edgarl@netflix.com> <edgar.lee@docker.com>
|
||||
Eric Ernst <eric@amperecomputing.com> <eric.ernst@intel.com>
|
||||
Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen@linux.alibaba.com>
|
||||
Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen.rz@alibaba-linux.com>
|
||||
Eric Ren <renzhen.rz@linux.alibaba.com> <renzhen.rz@alibaba-inc.com>
|
||||
Fahed Dorgaa <fahed.dorgaa@gmail.com>
|
||||
Frank Yang <yyb196@gmail.com>
|
||||
Fupan Li <lifupan@gmail.com>
|
||||
Fupan Li <lifupan@gmail.com> <fupan.lfp@antfin.com>
|
||||
Georgia Panoutsakopoulou <gpanoutsak@gmail.com>
|
||||
Guangming Wang <guangming.wang@daocloud.io>
|
||||
Haiyan Meng <haiyanmeng@google.com>
|
||||
Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
|
||||
Hu Shuai <hus.fnst@cn.fujitsu.com>
|
||||
Hu Shuai <hus.fnst@cn.fujitsu.com> <hushuaiia@qq.com>
|
||||
Iceber Gu <wei.cai-nat@daocloud.io>
|
||||
Jaana Burcu Dogan <burcujdogan@gmail.com> <jbd@golang.org>
|
||||
Jess Valarezo <valarezo.jessica@gmail.com>
|
||||
Jess Valarezo <valarezo.jessica@gmail.com> <jessica.valarezo@docker.com>
|
||||
Jian Liao <jliao@alauda.io>
|
||||
Jian Liao <jliao@alauda.io> <liaojian@Dabllo.local>
|
||||
Ji'an Liu <anthonyliu@zju.edu.cn>
|
||||
Jie Zhang <iamkadisi@163.com>
|
||||
John Howard <github@lowenna.com>
|
||||
John Howard <github@lowenna.com> <john.howard@microsoft.com>
|
||||
John Howard <github@lowenna.com> <jhoward@microsoft.com>
|
||||
John Howard <github@lowenna.com> <jhowardmsft@users.noreply.github.com>
|
||||
Lorenz Brun <lorenz@brun.one> <lorenz@nexantic.com>
|
||||
Luc Perkins <lucperkins@gmail.com>
|
||||
Julien Balestra <julien.balestra@datadoghq.com>
|
||||
Jun Lin Chen <webmaster@mc256.com> <1913688+mc256@users.noreply.github.com>
|
||||
Justin Cormack <justin.cormack@docker.com> <justin@specialbusservice.com>
|
||||
Justin Terry <juterry@microsoft.com>
|
||||
Justin Terry <juterry@microsoft.com> <jterry75@users.noreply.github.com>
|
||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||
Kevin Xu <cming.xu@gmail.com>
|
||||
Kohei Tokunaga <ktokunaga.mail@gmail.com>
|
||||
Krasi Georgiev <krasi.root@gmail.com> <krasi@vip-consult.solutions>
|
||||
Lantao Liu <lantaol@google.com>
|
||||
Lantao Liu <lantaol@google.com> <taotaotheripper@gmail.com>
|
||||
Li Yuxuan <liyuxuan04@baidu.com> <darfux@163.com>
|
||||
Lifubang <lifubang@aliyun.com> <lifubang@acmcoder.com>
|
||||
Lu Jingxiao <lujingxiao@huawei.com>
|
||||
Maksym Pavlenko <pavlenko.maksym@gmail.com> <makpav@amazon.com>
|
||||
Maksym Pavlenko <pavlenko.maksym@gmail.com> <mxpv@apple.com>
|
||||
Mario Hros <spam@k3a.me>
|
||||
Mario Hros <spam@k3a.me> <root@k3a.me>
|
||||
Mario Macias <mariomac@gmail.com> <mmacias@newrelic.com>
|
||||
Mark Gordon <msg555@gmail.com>
|
||||
Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
|
||||
Michael Katsoulis <michaelkatsoulis88@gmail.com>
|
||||
Mike Brown <brownwm@us.ibm.com> <mikebrow@users.noreply.github.com>
|
||||
Mohammad Asif Siddiqui <mohammad.asif.siddiqui1@huawei.com>
|
||||
Nishchay Kumar <mrawesomenix@gmail.com>
|
||||
Oliver Stenbom <oliver@stenbom.eu> <ostenbom@pivotal.io>
|
||||
Phil Estes <estesp@gmail.com> <estesp@linux.vnet.ibm.com>
|
||||
Phil Estes <estesp@gmail.com> <estesp@amazon.com>
|
||||
Reid Li <reid.li@utexas.edu>
|
||||
Robin Winkelewski <w9ncontact@gmail.com>
|
||||
Ross Boucher <rboucher@gmail.com>
|
||||
Ruediger Maass <ruediger.maass@de.ibm.com>
|
||||
Rui Cao <ruicao@alauda.io> <ruicao@alauda.io>
|
||||
Sakeven Jiang <jc5930@sina.cn>
|
||||
Samuel Karp <me@samuelkarp.com> <skarp@amazon.com>
|
||||
Seth Pellegrino <spellegrino@newrelic.com> <30441101+sethp-nr@users.noreply.github.com>
|
||||
Shaobao Feng <shaobao.feng@huawei.com>
|
||||
Shengbo Song <thomassong@tencent.com>
|
||||
Shengjing Zhu <i@zhsj.me> <zhsj@debian.org>
|
||||
Siddharth Yadav <sedflix@gmail.com>
|
||||
SiYu Zhao <d.chaser.zsy@gmail.com>
|
||||
Stefan Berger <stefanb@us.ibm.com> <stefanb@linux.ibm.com>
|
||||
Stefan Berger <stefanb@us.ibm.com> <stefanb@linux.vnet.ibm.com>
|
||||
Stephen J Day <stevvooe@gmail.com> <stephen.day@getcruise.com>
|
||||
Stephen J Day <stevvooe@gmail.com> <stevvooe@users.noreply.github.com>
|
||||
Stephen J Day <stevvooe@gmail.com> <stephen.day@docker.com>
|
||||
Sudeesh John <sudeesh@linux.vnet.ibm.com>
|
||||
Su Fei <fesu@ebay.com> <fesu@ebay.com>
|
||||
Su Xiaolin <linxxnil@126.com>
|
||||
Ted Yu <yuzhihong@gmail.com>
|
||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||
Wade Lee <weidonglee27@gmail.com>
|
||||
Wade Lee <weidonglee27@gmail.com> <weidonglee29@gmail.com>
|
||||
Wade Lee <weidonglee27@gmail.com> <21621232@zju.edu.cn>
|
||||
Wei Fu <fuweid89@gmail.com>
|
||||
Wei Fu <fuweid89@gmail.com> <fhfuwei@163.com>
|
||||
Xiaodong Zhang <a4012017@sina.com>
|
||||
Xuean Yan <yan.xuean@zte.com.cn>
|
||||
Yue Zhang <zy675793960@yeah.net>
|
||||
Yuxing Liu <starnop@163.com>
|
||||
Zhang Wei <zhangwei555@huawei.com>
|
||||
zhangyadong <zhangyadong.0808@bytedance.com>
|
||||
Zhenguang Zhu <zhengguang.zhu@daocloud.io>
|
||||
Zhiyu Li <payall4u@qq.com>
|
||||
Zhiyu Li <payall4u@qq.com> <404977848@qq.com>
|
||||
Zhongming Chang<zhongming.chang@daocloud.io>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||
Zhoulin Xie <zhoulin.xie@daocloud.io> <42261994+JoeWrightss@users.noreply.github.com>
|
||||
张潇 <xiaozhang0210@hotmail.com>
|
35
src/runtime/vendor/github.com/containerd/containerd/.zuul.yaml
generated
vendored
35
src/runtime/vendor/github.com/containerd/containerd/.zuul.yaml
generated
vendored
@ -1,35 +0,0 @@
|
||||
- project:
|
||||
name: containerd/containerd
|
||||
merge-mode: merge
|
||||
check:
|
||||
jobs:
|
||||
- containerd-build-arm64
|
||||
- containerd-test-arm64
|
||||
- containerd-integration-test-arm64
|
||||
|
||||
- job:
|
||||
name: containerd-build-arm64
|
||||
parent: init-test
|
||||
description: |
|
||||
Containerd build in openlab cluster.
|
||||
run: .zuul/playbooks/containerd-build/run.yaml
|
||||
nodeset: ubuntu-xenial-arm64-openlab
|
||||
voting: false
|
||||
|
||||
- job:
|
||||
name: containerd-test-arm64
|
||||
parent: init-test
|
||||
description: |
|
||||
Containerd unit tests in openlab cluster.
|
||||
run: .zuul/playbooks/containerd-build/unit-test.yaml
|
||||
nodeset: ubuntu-xenial-arm64-openlab
|
||||
voting: false
|
||||
|
||||
- job:
|
||||
name: containerd-integration-test-arm64
|
||||
parent: init-test
|
||||
description: |
|
||||
Containerd unit tests in openlab cluster.
|
||||
run: .zuul/playbooks/containerd-build/integration-test.yaml
|
||||
nodeset: ubuntu-xenial-arm64-openlab
|
||||
voting: false
|
48
src/runtime/vendor/github.com/containerd/containerd/ADOPTERS.md
generated
vendored
48
src/runtime/vendor/github.com/containerd/containerd/ADOPTERS.md
generated
vendored
@ -1,48 +0,0 @@
|
||||
## containerd Adopters
|
||||
|
||||
A non-exhaustive list of containerd adopters is provided below.
|
||||
|
||||
**_Docker/Moby engine_** - Containerd began life prior to its CNCF adoption as a lower-layer
|
||||
runtime manager for `runc` processes below the Docker engine. Continuing today, containerd
|
||||
has extremely broad production usage as a component of the [Docker engine](https://github.com/docker/docker-ce)
|
||||
stack. Note that this includes any use of the open source [Moby engine project](https://github.com/moby/moby);
|
||||
including the Balena project listed below.
|
||||
|
||||
**_[IBM Cloud Kubernetes Service (IKS)](https://www.ibm.com/cloud/container-service)_** - offers containerd as the CRI runtime for v1.11 and higher versions.
|
||||
|
||||
**_[IBM Cloud Private (ICP)](https://www.ibm.com/cloud/private)_** - IBM's on-premises cloud offering has containerd as a "tech preview" CRI runtime for the Kubernetes offered within this product for the past two releases, and plans to fully migrate to containerd in a future release.
|
||||
|
||||
**_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - offers containerd as the CRI runtime in **beta** for recent versions of Kubernetes.
|
||||
|
||||
**_[AWS Fargate](https://aws.amazon.com/fargate)_** - uses containerd + Firecracker (noted below) as the runtime and isolation technology for containers run in the Fargate platform. Fargate is a serverless, container-native compute offering from Amazon Web Services.
|
||||
|
||||
**_Cloud Foundry_** - The [Guardian container manager](https://github.com/cloudfoundry/guardian) for CF has been using OCI runC directly with additional code from CF managing the container image and filesystem interactions, but have recently migrated to use containerd as a replacement for the extra code they had written around runC.
|
||||
|
||||
**_Alibaba's PouchContainer_** - The Alibaba [PouchContainer](https://github.com/alibaba/pouch) project uses containerd as its runtime for a cloud native offering that has unique isolation and image distribution capabilities.
|
||||
|
||||
**_Rancher's k3s project_** - Rancher Labs [k3s](https://github.com/rancher/k3s) is a lightweight Kubernetes distribution; in their words: "Easy to install, half the memory, all in a binary less than 40mb." k8s uses containerd as the embedded runtime for this popular lightweight Kubernetes variant.
|
||||
|
||||
**_Rancher's Rio project_** - Rancher Labs [Rio](https://github.com/rancher/rio) project uses containerd as the runtime for a combined Kubernetes, Istio, and container "Cloud Native Container Distribution" platform.
|
||||
|
||||
**_Eliot_** - The [Eliot](https://github.com/ernoaapa/eliot) container project for IoT device container management uses containerd as the runtime.
|
||||
|
||||
**_Balena_** - Resin's [Balena](https://github.com/resin-os/balena) container engine, based on moby/moby but for edge, embedded, and IoT use cases, uses the containerd and runc stack in the same way that the Docker engine uses containerd.
|
||||
|
||||
**_LinuxKit_** - the Moby project's [LinuxKit](https://github.com/linuxkit/linuxkit) for building secure, minimal Linux OS images in a container-native model uses containerd as the core runtime for system and service containers.
|
||||
|
||||
**_BuildKit_** - The Moby project's [BuildKit](https://github.com/moby/buildkit) can use either runC or containerd as build execution backends for building container images. BuildKit support has also been built into the Docker engine in recent releases, making BuildKit provide the backend to the `docker build` command.
|
||||
|
||||
**_Azure acs-engine_** - Microsoft Azure's [acs-engine](https://github.com/Azure/acs-engine) open source project has customizable deployment of Kubernetes clusters, where containerd is a selectable container runtime. At some point in the future Azure's AKS service will default to use containerd as the CRI runtime for deployed Kubernetes clusters.
|
||||
|
||||
**_Amazon Firecracker_** - The AWS [Firecracker VMM project](http://firecracker-microvm.io/) has extended containerd with a new snapshotter and v2 shim to allow containerd to drive virtualized container processes via their VMM implementation. More details on their containerd integration are available in [their GitHub project](https://github.com/firecracker-microvm/firecracker-containerd).
|
||||
|
||||
**_Kata Containers_** - The [Kata containers](https://katacontainers.io/) lightweight-virtualized container runtime project integrates with containerd via a custom v2 shim implementation that drives the Kata container runtime.
|
||||
|
||||
**_D2iQ Konvoy_** - D2iQ Inc [Konvoy](https://d2iq.com/products/konvoy) product uses containerd as the container runtime for its Kubernetes distribution.
|
||||
|
||||
**_Inclavare Containers_** - [Inclavare Containers](https://github.com/alibaba/inclavare-containers) is an innovation of container runtime with the novel approach for launching protected containers in hardware-assisted Trusted Execution Environment (TEE) technology, aka Enclave, which can prevent the untrusted entity, such as Cloud Service Provider (CSP), from accessing the sensitive and confidential assets in use.
|
||||
|
||||
**_Other Projects_** - While the above list provides a cross-section of well known uses of containerd, the simplicity and clear API layer for containerd has inspired many smaller projects around providing simple container management platforms. Several examples of building higher layer functionality on top of the containerd base have come from various containerd community participants:
|
||||
- Michael Crosby's [boss](https://github.com/crosbymichael/boss) project,
|
||||
- Evan Hazlett's [stellar](https://github.com/ehazlett/stellar) project,
|
||||
- Paul Knopf's immutable Linux image builder project: [darch](https://github.com/godarch/darch).
|
279
src/runtime/vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
279
src/runtime/vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
@ -1,279 +0,0 @@
|
||||
# Build containerd from source
|
||||
|
||||
This guide is useful if you intend to contribute on containerd. Thanks for your
|
||||
effort. Every contribution is very appreciated.
|
||||
|
||||
This doc includes:
|
||||
* [Build requirements](#build-requirements)
|
||||
* [Build the development environment](#build-the-development-environment)
|
||||
* [Build containerd](#build-containerd)
|
||||
* [Via docker container](#via-docker-container)
|
||||
* [Testing](#testing-containerd)
|
||||
|
||||
## Build requirements
|
||||
|
||||
To build the `containerd` daemon, and the `ctr` simple test client, the following build system dependencies are required:
|
||||
|
||||
* Go 1.13.x or above except 1.14.x
|
||||
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
|
||||
* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency.
|
||||
|
||||
## Build the development environment
|
||||
|
||||
First you need to setup your Go development environment. You can follow this
|
||||
guideline [How to write go code](https://golang.org/doc/code.html) and at the
|
||||
end you have `go` command in your `PATH`.
|
||||
|
||||
You need `git` to checkout the source code:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/containerd/containerd
|
||||
```
|
||||
|
||||
For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.11.4 release for a 64-bit Linux host:
|
||||
|
||||
```
|
||||
$ wget -c https://github.com/google/protobuf/releases/download/v3.11.4/protoc-3.11.4-linux-x86_64.zip
|
||||
$ sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local
|
||||
```
|
||||
|
||||
`containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you
|
||||
need to satisfy these dependencies in your system:
|
||||
|
||||
* CentOS/Fedora: `yum install btrfs-progs-devel`
|
||||
* Debian/Ubuntu: `apt-get install btrfs-progs libbtrfs-dev`
|
||||
* Debian(before Buster)/Ubuntu(before 19.10): `apt-get install btrfs-tools`
|
||||
|
||||
At this point you are ready to build `containerd` yourself!
|
||||
|
||||
## Build runc
|
||||
|
||||
`runc` is the default container runtime used by `containerd` and is required to
|
||||
run containerd. While it is okay to download a runc binary and install that on
|
||||
the system, sometimes it is necessary to build runc directly when working with
|
||||
container runtime development. You can skip this step if you already have the
|
||||
correct version of `runc` installed.
|
||||
|
||||
`runc` requires `libseccomp`. You may need to install the missing dependencies:
|
||||
|
||||
* CentOS/Fedora: `yum install libseccomp libseccomp-devel`
|
||||
* Debian/Ubuntu: `apt-get install libseccomp libseccomp-dev`
|
||||
|
||||
|
||||
For the quick and dirty installation, you can use the following:
|
||||
|
||||
```
|
||||
git clone https://github.com/opencontainers/runc
|
||||
cd runc
|
||||
make
|
||||
sudo make install
|
||||
```
|
||||
|
||||
Make sure to follow the guidelines for versioning in [RUNC.md](/docs/RUNC.md) for the
|
||||
best results.
|
||||
|
||||
## Build containerd
|
||||
|
||||
`containerd` uses `make` to create a repeatable build flow. It means that you
|
||||
can run:
|
||||
|
||||
```
|
||||
cd containerd
|
||||
make
|
||||
```
|
||||
|
||||
This is going to build all the project binaries in the `./bin/` directory.
|
||||
|
||||
You can move them in your global path, `/usr/local/bin` with:
|
||||
|
||||
```sudo
|
||||
sudo make install
|
||||
```
|
||||
|
||||
When making any changes to the gRPC API, you can use the installed `protoc`
|
||||
compiler to regenerate the API generated code packages with:
|
||||
|
||||
```sudo
|
||||
make generate
|
||||
```
|
||||
|
||||
> *Note*: Several build tags are currently available:
|
||||
> * `no_btrfs`: A build tag disables building the btrfs snapshot driver.
|
||||
> * `no_cri`: A build tag disables building Kubernetes [CRI](http://blog.kubernetes.io/2016/12/container-runtime-interface-cri-in-kubernetes.html) support into containerd.
|
||||
> See [here](https://github.com/containerd/cri-containerd#build-tags) for build tags of CRI plugin.
|
||||
> * `no_devmapper`: A build tag disables building the device mapper snapshot driver.
|
||||
>
|
||||
> For example, adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
|
||||
> Makefile target will disable the btrfs driver within the containerd Go build.
|
||||
|
||||
Vendoring of external imports uses the [Go Modules](https://golang.org/ref/mod#vendoring). You need
|
||||
to use `go mod` command to modify the dependencies. After modifition, you should run `go mod tidy`
|
||||
and `go mod vendor` to ensure the `go.mod`, `go.sum` files and `vendor` directory are up to date.
|
||||
Changes to these files should become a single commit for a PR which relies on vendored updates.
|
||||
|
||||
Please refer to [RUNC.md](/docs/RUNC.md) for the currently supported version of `runc` that is used by containerd.
|
||||
|
||||
### Static binaries
|
||||
|
||||
You can build static binaries by providing a few variables to `make`:
|
||||
|
||||
```sudo
|
||||
make EXTRA_FLAGS="-buildmode pie" \
|
||||
EXTRA_LDFLAGS='-linkmode external -extldflags "-fno-PIC -static"' \
|
||||
BUILDTAGS="netgo osusergo static_build"
|
||||
```
|
||||
|
||||
> *Note*:
|
||||
> - static build is discouraged
|
||||
> - static containerd binary does not support loading shared object plugins (`*.so`)
|
||||
|
||||
# Via Docker container
|
||||
|
||||
The following instructions assume you are at the parent directory of containerd source directory.
|
||||
|
||||
## Build containerd
|
||||
|
||||
You can build `containerd` via a Linux-based Docker container.
|
||||
You can build an image from this `Dockerfile`:
|
||||
|
||||
```
|
||||
FROM golang
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libbtrfs-dev
|
||||
```
|
||||
|
||||
Let's suppose that you built an image called `containerd/build`. From the
|
||||
containerd source root directory you can run the following command:
|
||||
|
||||
```sh
|
||||
docker run -it \
|
||||
-v ${PWD}/containerd:/go/src/github.com/containerd/containerd \
|
||||
-e GOPATH=/go \
|
||||
-w /go/src/github.com/containerd/containerd containerd/build sh
|
||||
```
|
||||
|
||||
This mounts `containerd` repository
|
||||
|
||||
You are now ready to [build](#build-containerd):
|
||||
|
||||
```sh
|
||||
make && make install
|
||||
```
|
||||
|
||||
## Build containerd and runc
|
||||
To have complete core container runtime, you will need both `containerd` and `runc`. It is possible to build both of these via Docker container.
|
||||
|
||||
You can use `git` to checkout `runc`:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/opencontainers/runc
|
||||
```
|
||||
|
||||
We can build an image from this `Dockerfile`:
|
||||
|
||||
```sh
|
||||
FROM golang
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y libbtrfs-dev libseccomp-dev
|
||||
|
||||
```
|
||||
|
||||
In our Docker container we will build `runc` build, which includes
|
||||
[seccomp](https://en.wikipedia.org/wiki/seccomp), [SELinux](https://en.wikipedia.org/wiki/Security-Enhanced_Linux),
|
||||
and [AppArmor](https://en.wikipedia.org/wiki/AppArmor) support. Seccomp support
|
||||
in runc requires `libseccomp-dev` as a dependency (AppArmor and SELinux support
|
||||
do not require external libraries at build time). Refer to [RUNC.md](docs/RUNC.md)
|
||||
in the docs directory to for details about building runc, and to learn about
|
||||
supported versions of `runc` as used by containerd.
|
||||
|
||||
Let's suppose you build an image called `containerd/build` from the above Dockerfile. You can run the following command:
|
||||
|
||||
```sh
|
||||
docker run -it --privileged \
|
||||
-v /var/lib/containerd \
|
||||
-v ${PWD}/runc:/go/src/github.com/opencontainers/runc \
|
||||
-v ${PWD}/containerd:/go/src/github.com/containerd/containerd \
|
||||
-e GOPATH=/go \
|
||||
-w /go/src/github.com/containerd/containerd containerd/build sh
|
||||
```
|
||||
|
||||
This mounts both `runc` and `containerd` repositories in our Docker container.
|
||||
|
||||
From within our Docker container let's build `containerd`:
|
||||
|
||||
```sh
|
||||
cd /go/src/github.com/containerd/containerd
|
||||
make && make install
|
||||
```
|
||||
|
||||
These binaries can be found in the `./bin` directory in your host.
|
||||
`make install` will move the binaries in your `$PATH`.
|
||||
|
||||
Next, let's build `runc`:
|
||||
|
||||
```sh
|
||||
cd /go/src/github.com/opencontainers/runc
|
||||
make && make install
|
||||
```
|
||||
|
||||
For further details about building runc, refer to [RUNC.md](docs/RUNC.md) in the
|
||||
docs directory.
|
||||
|
||||
When working with `ctr`, the simple test client we just built, don't forget to start the daemon!
|
||||
|
||||
```sh
|
||||
containerd --config config.toml
|
||||
```
|
||||
|
||||
# Testing containerd
|
||||
|
||||
During the automated CI the unit tests and integration tests are run as part of the PR validation. As a developer you can run these tests locally by using any of the following `Makefile` targets:
|
||||
- `make test`: run all non-integration tests that do not require `root` privileges
|
||||
- `make root-test`: run all non-integration tests which require `root`
|
||||
- `make integration`: run all tests, including integration tests and those which require `root`. `TESTFLAGS_PARALLEL` can be used to control parallelism. For example, `TESTFLAGS_PARALLEL=1 make integration` will lead a non-parallel execution. The default value of `TESTFLAGS_PARALLEL` is **8**.
|
||||
|
||||
To execute a specific test or set of tests you can use the `go test` capabilities
|
||||
without using the `Makefile` targets. The following examples show how to specify a test
|
||||
name and also how to use the flag directly against `go test` to run root-requiring tests.
|
||||
|
||||
```sh
|
||||
# run the test <TEST_NAME>:
|
||||
go test -v -run "<TEST_NAME>" .
|
||||
# enable the root-requiring tests:
|
||||
go test -v -run . -test.root
|
||||
```
|
||||
|
||||
Example output from directly running `go test` to execute the `TestContainerList` test:
|
||||
```sh
|
||||
sudo go test -v -run "TestContainerList" . -test.root
|
||||
INFO[0000] running tests against containerd revision=f2ae8a020a985a8d9862c9eb5ab66902c2888361 version=v1.0.0-beta.2-49-gf2ae8a0
|
||||
=== RUN TestContainerList
|
||||
--- PASS: TestContainerList (0.00s)
|
||||
PASS
|
||||
ok github.com/containerd/containerd 4.778s
|
||||
```
|
||||
|
||||
## Additional tools
|
||||
|
||||
### containerd-stress
|
||||
In addition to `go test`-based testing executed via the `Makefile` targets, the `containerd-stress` tool is available and built with the `all` or `binaries` targets and installed during `make install`.
|
||||
|
||||
With this tool you can stress a running containerd daemon for a specified period of time, selecting a concurrency level to generate stress against the daemon. The following command is an example of having five workers running for two hours against a default containerd gRPC socket address:
|
||||
|
||||
```sh
|
||||
containerd-stress -c 5 -t 120
|
||||
```
|
||||
|
||||
For more information on this tool's options please run `containerd-stress --help`.
|
||||
|
||||
### bucketbench
|
||||
[Bucketbench](https://github.com/estesp/bucketbench) is an external tool which can be used to drive load against a container runtime, specifying a particular set of lifecycle operations to run with a specified amount of concurrency. Bucketbench is more focused on generating performance details than simply inducing load against containerd.
|
||||
|
||||
Bucketbench differs from the `containerd-stress` tool in a few ways:
|
||||
- Bucketbench has support for testing the Docker engine, the `runc` binary, and containerd 0.2.x (via `ctr`) and 1.0 (via the client library) branches.
|
||||
- Bucketbench is driven via configuration file that allows specifying a list of lifecycle operations to execute. This can be used to generate detailed statistics per-command (e.g. start, stop, pause, delete).
|
||||
- Bucketbench generates detailed reports and timing data at the end of the configured test run.
|
||||
|
||||
More details on how to install and run `bucketbench` are available at the [GitHub project page](https://github.com/estesp/bucketbench).
|
403
src/runtime/vendor/github.com/containerd/containerd/Makefile
generated
vendored
403
src/runtime/vendor/github.com/containerd/containerd/Makefile
generated
vendored
@ -1,403 +0,0 @@
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# Go command to use for build
|
||||
GO ?= go
|
||||
|
||||
# Root directory of the project (absolute path).
|
||||
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
# Base path used to install.
|
||||
DESTDIR ?= /usr/local
|
||||
TEST_IMAGE_LIST ?=
|
||||
|
||||
# Used to populate variables in version package.
|
||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
||||
PACKAGE=github.com/containerd/containerd
|
||||
SHIM_CGO_ENABLED ?= 0
|
||||
|
||||
ifneq "$(strip $(shell command -v $(GO) 2>/dev/null))" ""
|
||||
GOOS ?= $(shell $(GO) env GOOS)
|
||||
GOARCH ?= $(shell $(GO) env GOARCH)
|
||||
else
|
||||
ifeq ($(GOOS),)
|
||||
# approximate GOOS for the platform if we don't have Go and GOOS isn't
|
||||
# set. We leave GOARCH unset, so that may need to be fixed.
|
||||
ifeq ($(OS),Windows_NT)
|
||||
GOOS = windows
|
||||
else
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
GOOS = linux
|
||||
endif
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
GOOS = darwin
|
||||
endif
|
||||
ifeq ($(UNAME_S),FreeBSD)
|
||||
GOOS = freebsd
|
||||
endif
|
||||
endif
|
||||
else
|
||||
GOOS ?= $$GOOS
|
||||
GOARCH ?= $$GOARCH
|
||||
endif
|
||||
endif
|
||||
|
||||
ifndef GODEBUG
|
||||
EXTRA_LDFLAGS += -s -w
|
||||
DEBUG_GO_GCFLAGS :=
|
||||
DEBUG_TAGS :=
|
||||
else
|
||||
DEBUG_GO_GCFLAGS := -gcflags=all="-N -l"
|
||||
DEBUG_TAGS := static_build
|
||||
endif
|
||||
|
||||
WHALE = "🇩"
|
||||
ONI = "👹"
|
||||
|
||||
RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH}
|
||||
CRIRELEASE=cri-containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH}
|
||||
CRICNIRELEASE=cri-containerd-cni-$(VERSION:v%=%)-${GOOS}-${GOARCH}
|
||||
|
||||
PKG=github.com/containerd/containerd
|
||||
|
||||
# Project binaries.
|
||||
COMMANDS=ctr containerd containerd-stress
|
||||
MANPAGES=ctr.8 containerd.8 containerd-config.8 containerd-config.toml.5
|
||||
|
||||
ifdef BUILDTAGS
|
||||
GO_BUILDTAGS = ${BUILDTAGS}
|
||||
endif
|
||||
GO_BUILDTAGS ?=
|
||||
GO_BUILDTAGS += ${DEBUG_TAGS}
|
||||
GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(GO_BUILDTAGS)",)
|
||||
GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) $(EXTRA_LDFLAGS)'
|
||||
SHIM_GO_LDFLAGS=-ldflags '-X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PACKAGE) -extldflags "-static" $(EXTRA_LDFLAGS)'
|
||||
|
||||
# Project packages.
|
||||
PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /vendor/ | grep -v /integration)
|
||||
TEST_REQUIRES_ROOT_PACKAGES=$(filter \
|
||||
${PACKAGES}, \
|
||||
$(shell \
|
||||
for f in $$(git grep -l testutil.RequiresRoot | grep -v Makefile); do \
|
||||
d="$$(dirname $$f)"; \
|
||||
[ "$$d" = "." ] && echo "${PKG}" && continue; \
|
||||
echo "${PKG}/$$d"; \
|
||||
done | sort -u) \
|
||||
)
|
||||
|
||||
ifdef SKIPTESTS
|
||||
PACKAGES:=$(filter-out ${SKIPTESTS},${PACKAGES})
|
||||
TEST_REQUIRES_ROOT_PACKAGES:=$(filter-out ${SKIPTESTS},${TEST_REQUIRES_ROOT_PACKAGES})
|
||||
endif
|
||||
|
||||
#Replaces ":" (*nix), ";" (windows) with newline for easy parsing
|
||||
GOPATHS=$(shell echo ${GOPATH} | tr ":" "\n" | tr ";" "\n")
|
||||
|
||||
TESTFLAGS_RACE=
|
||||
GO_BUILD_FLAGS=
|
||||
# See Golang issue re: '-trimpath': https://github.com/golang/go/issues/13809
|
||||
GO_GCFLAGS=$(shell \
|
||||
set -- ${GOPATHS}; \
|
||||
echo "-gcflags=-trimpath=$${1}/src"; \
|
||||
)
|
||||
|
||||
BINARIES=$(addprefix bin/,$(COMMANDS))
|
||||
|
||||
#include platform specific makefile
|
||||
-include Makefile.$(GOOS)
|
||||
|
||||
# Flags passed to `go test`
|
||||
TESTFLAGS ?= $(TESTFLAGS_RACE) $(EXTRA_TESTFLAGS)
|
||||
TESTFLAGS_PARALLEL ?= 8
|
||||
|
||||
# Use this to replace `go test` with, for instance, `gotestsum`
|
||||
GOTEST ?= $(GO) test
|
||||
|
||||
OUTPUTDIR = $(join $(ROOTDIR), _output)
|
||||
CRIDIR=$(OUTPUTDIR)/cri
|
||||
|
||||
.PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man genman install-cri-deps cri-release cri-cni-release cri-integration install-deps bin/cri-integration.test
|
||||
.DEFAULT: default
|
||||
|
||||
all: binaries
|
||||
|
||||
check: proto-fmt ## run all linters
|
||||
@echo "$(WHALE) $@"
|
||||
GOGC=75 golangci-lint run
|
||||
|
||||
ci: check binaries checkprotos coverage coverage-integration ## to be used by the CI
|
||||
|
||||
AUTHORS: .mailmap .git/HEAD
|
||||
git log --format='%aN <%aE>' | sort -fu > $@
|
||||
|
||||
generate: protos
|
||||
@echo "$(WHALE) $@"
|
||||
@PATH="${ROOTDIR}/bin:${PATH}" $(GO) generate -x ${PACKAGES}
|
||||
|
||||
protos: bin/protoc-gen-gogoctrd ## generate protobuf
|
||||
@echo "$(WHALE) $@"
|
||||
@PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${PACKAGES}
|
||||
|
||||
check-protos: protos ## check if protobufs needs to be generated again
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \
|
||||
((git diff | cat) && \
|
||||
(echo "$(ONI) please run 'make protos' when making changes to proto files" && false))
|
||||
|
||||
check-api-descriptors: protos ## check that protobuf changes aren't present.
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(git status --short | grep ".pb.txt" | tee /dev/stderr)" || \
|
||||
((git diff $$(find . -name '*.pb.txt') | cat) && \
|
||||
(echo "$(ONI) please run 'make protos' when making changes to proto files and check-in the generated descriptor file changes" && false))
|
||||
|
||||
proto-fmt: ## check format of proto files
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(find . -path ./vendor -prune -o -path ./protobuf/google/rpc -prune -o -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
|
||||
(echo "$(ONI) please indent proto files with tabs only" && false)
|
||||
@test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \
|
||||
(echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false)
|
||||
|
||||
build: ## build the go packages
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${EXTRA_FLAGS} ${GO_LDFLAGS} ${PACKAGES}
|
||||
|
||||
test: ## run tests, except integration tests and tests that require root
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GOTEST) ${TESTFLAGS} ${PACKAGES}
|
||||
|
||||
root-test: ## run tests, except integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GOTEST) ${TESTFLAGS} ${TEST_REQUIRES_ROOT_PACKAGES} -test.root
|
||||
|
||||
integration: ## run integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@cd "${ROOTDIR}/integration/client" && $(GO) mod download && $(GOTEST) -v ${TESTFLAGS} -test.root -parallel ${TESTFLAGS_PARALLEL} .
|
||||
|
||||
# TODO integrate cri integration bucket with coverage
|
||||
bin/cri-integration.test:
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) test -c ./integration -o bin/cri-integration.test
|
||||
|
||||
cri-integration: binaries bin/cri-integration.test ## run cri integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@./script/test/cri-integration.sh
|
||||
@rm -rf bin/cri-integration.test
|
||||
|
||||
benchmark: ## run benchmarks tests
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) test ${TESTFLAGS} -bench . -run Benchmark -test.root
|
||||
|
||||
FORCE:
|
||||
|
||||
define BUILD_BINARY
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_LDFLAGS} ${GO_TAGS} ./$<
|
||||
endef
|
||||
|
||||
# Build a binary from a cmd.
|
||||
bin/%: cmd/% FORCE
|
||||
$(call BUILD_BINARY)
|
||||
|
||||
bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
|
||||
|
||||
bin/containerd-shim-runc-v1: cmd/containerd-shim-runc-v1 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim-runc-v1"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v1 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v1
|
||||
|
||||
bin/containerd-shim-runc-v2: cmd/containerd-shim-runc-v2 FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim-runc-v2"
|
||||
@CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o bin/containerd-shim-runc-v2 ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim-runc-v2
|
||||
|
||||
binaries: $(BINARIES) ## build binaries
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
man: mandir $(addprefix man/,$(MANPAGES))
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
mandir:
|
||||
@mkdir -p man
|
||||
|
||||
# Kept for backwards compatibility
|
||||
genman: man/containerd.8 man/ctr.8
|
||||
|
||||
man/containerd.8: FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) run cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
|
||||
man/ctr.8: FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
$(GO) run cmd/gen-manpages/main.go $(@F) $(@D)
|
||||
|
||||
man/%: docs/man/%.md FORCE
|
||||
@echo "$(WHALE) $@"
|
||||
go-md2man -in "$<" -out "$@"
|
||||
|
||||
define installmanpage
|
||||
mkdir -p $(DESTDIR)/man/man$(2);
|
||||
gzip -c $(1) >$(DESTDIR)/man/man$(2)/$(3).gz;
|
||||
endef
|
||||
|
||||
install-man:
|
||||
@echo "$(WHALE) $@"
|
||||
$(foreach manpage,$(addprefix man/,$(MANPAGES)), $(call installmanpage,$(manpage),$(subst .,,$(suffix $(manpage))),$(notdir $(manpage))))
|
||||
|
||||
releases/$(RELEASE).tar.gz: $(BINARIES)
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz
|
||||
@install -d releases/$(RELEASE)/bin
|
||||
@install $(BINARIES) releases/$(RELEASE)/bin
|
||||
@tar -czf releases/$(RELEASE).tar.gz -C releases/$(RELEASE) bin
|
||||
@rm -rf releases/$(RELEASE)
|
||||
|
||||
release: releases/$(RELEASE).tar.gz
|
||||
@echo "$(WHALE) $@"
|
||||
@cd releases && sha256sum $(RELEASE).tar.gz >$(RELEASE).tar.gz.sha256sum
|
||||
|
||||
# install of cri deps into release output directory
|
||||
ifeq ($(GOOS),windows)
|
||||
install-cri-deps: $(BINARIES)
|
||||
mkdir -p $(CRIDIR)
|
||||
DESTDIR=$(CRIDIR) script/setup/install-cni-windows
|
||||
cp bin/* $(CRIDIR)
|
||||
else
|
||||
install-cri-deps: $(BINARIES)
|
||||
@rm -rf ${CRIDIR}
|
||||
@install -d ${CRIDIR}/usr/local/bin
|
||||
@install -D -m 755 bin/* ${CRIDIR}/usr/local/bin
|
||||
@install -d ${CRIDIR}/opt/containerd/cluster
|
||||
@cp -r contrib/gce ${CRIDIR}/opt/containerd/cluster/
|
||||
@install -d ${CRIDIR}/etc/systemd/system
|
||||
@install -m 644 containerd.service ${CRIDIR}/etc/systemd/system
|
||||
echo "CONTAINERD_VERSION: '$(VERSION:v%=%)'" | tee ${CRIDIR}/opt/containerd/cluster/version
|
||||
|
||||
DESTDIR=$(CRIDIR) script/setup/install-runc
|
||||
DESTDIR=$(CRIDIR) script/setup/install-cni
|
||||
DESTDIR=$(CRIDIR) script/setup/install-critools
|
||||
DESTDIR=$(CRIDIR) script/setup/install-imgcrypt
|
||||
|
||||
@install -d $(CRIDIR)/bin
|
||||
@install $(BINARIES) $(CRIDIR)/bin
|
||||
endif
|
||||
|
||||
ifeq ($(GOOS),windows)
|
||||
releases/$(CRIRELEASE).tar.gz: install-cri-deps
|
||||
@echo "$(WHALE) $@"
|
||||
@cd $(CRIDIR) && tar -czf ../../releases/$(CRIRELEASE).tar.gz *
|
||||
|
||||
releases/$(CRICNIRELEASE).tar.gz: install-cri-deps
|
||||
@echo "$(WHALE) $@"
|
||||
@cd $(CRIDIR) && tar -czf ../../releases/$(CRICNIRELEASE).tar.gz *
|
||||
else
|
||||
releases/$(CRIRELEASE).tar.gz: install-cri-deps
|
||||
@echo "$(WHALE) $@"
|
||||
@tar -czf releases/$(CRIRELEASE).tar.gz -C $(CRIDIR) etc/crictl.yaml etc/systemd usr opt/containerd
|
||||
|
||||
releases/$(CRICNIRELEASE).tar.gz: install-cri-deps
|
||||
@echo "$(WHALE) $@"
|
||||
@tar -czf releases/$(CRICNIRELEASE).tar.gz -C $(CRIDIR) etc usr opt
|
||||
endif
|
||||
|
||||
cri-release: releases/$(CRIRELEASE).tar.gz
|
||||
@echo "$(WHALE) $@"
|
||||
@cd releases && sha256sum $(CRIRELEASE).tar.gz >$(CRIRELEASE).tar.gz.sha256sum && ln -sf $(CRIRELEASE).tar.gz cri-containerd.tar.gz
|
||||
|
||||
cri-cni-release: releases/$(CRICNIRELEASE).tar.gz
|
||||
@echo "$(WHALE) $@"
|
||||
@cd releases && sha256sum $(CRICNIRELEASE).tar.gz >$(CRICNIRELEASE).tar.gz.sha256sum && ln -sf $(CRICNIRELEASE).tar.gz cri-cni-containerd.tar.gz
|
||||
|
||||
clean: ## clean up binaries
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f $(BINARIES)
|
||||
@rm -f releases/*.tar.gz*
|
||||
@rm -rf $(OUTPUTDIR)
|
||||
@rm -rf bin/cri-integration.test
|
||||
|
||||
clean-test: ## clean up debris from previously failed tests
|
||||
@echo "$(WHALE) $@"
|
||||
$(eval containers=$(shell find /run/containerd/runc -mindepth 2 -maxdepth 3 -type d -exec basename {} \;))
|
||||
$(shell pidof containerd containerd-shim runc | xargs -r -n 1 kill -9)
|
||||
@( for container in $(containers); do \
|
||||
grep $$container /proc/self/mountinfo | while read -r mountpoint; do \
|
||||
umount $$(echo $$mountpoint | awk '{print $$5}'); \
|
||||
done; \
|
||||
find /sys/fs/cgroup -name $$container -print0 | xargs -r -0 rmdir; \
|
||||
done )
|
||||
@rm -rf /run/containerd/runc/*
|
||||
@rm -rf /run/containerd/fifo/*
|
||||
@rm -rf /run/containerd-test/*
|
||||
@rm -rf bin/cri-integration.test
|
||||
|
||||
install: ## install binaries
|
||||
@echo "$(WHALE) $@ $(BINARIES)"
|
||||
@mkdir -p $(DESTDIR)/bin
|
||||
@install $(BINARIES) $(DESTDIR)/bin
|
||||
|
||||
uninstall:
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES)))
|
||||
|
||||
ifeq ($(GOOS),windows)
|
||||
install-deps:
|
||||
# TODO: need a script for hcshim something like containerd/cri/hack/install/windows/install-hcsshim.sh
|
||||
script/setup/install-critools
|
||||
script/setup/install-cni-windows
|
||||
else
|
||||
install-deps: ## install cri dependencies
|
||||
script/setup/install-seccomp
|
||||
script/setup/install-runc
|
||||
script/setup/install-critools
|
||||
script/setup/install-cni
|
||||
endif
|
||||
|
||||
coverage: ## generate coverprofiles from the unit tests, except tests that require root
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f coverage.txt
|
||||
@$(GO) test -i ${TESTFLAGS} ${PACKAGES} 2> /dev/null
|
||||
@( for pkg in ${PACKAGES}; do \
|
||||
$(GO) test ${TESTFLAGS} \
|
||||
-cover \
|
||||
-coverprofile=profile.out \
|
||||
-covermode=atomic $$pkg || exit; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi; \
|
||||
done )
|
||||
|
||||
root-coverage: ## generate coverage profiles for unit tests that require root
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) test -i ${TESTFLAGS} ${TEST_REQUIRES_ROOT_PACKAGES} 2> /dev/null
|
||||
@( for pkg in ${TEST_REQUIRES_ROOT_PACKAGES}; do \
|
||||
$(GO) test ${TESTFLAGS} \
|
||||
-cover \
|
||||
-coverprofile=profile.out \
|
||||
-covermode=atomic $$pkg -test.root || exit; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi; \
|
||||
done )
|
||||
|
||||
vendor: ## vendor
|
||||
@echo "$(WHALE) $@"
|
||||
@$(GO) mod tidy
|
||||
@$(GO) mod vendor
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort
|
22
src/runtime/vendor/github.com/containerd/containerd/Makefile.darwin
generated
vendored
22
src/runtime/vendor/github.com/containerd/containerd/Makefile.darwin
generated
vendored
@ -1,22 +0,0 @@
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
#darwin specific settings
|
||||
COMMANDS += containerd-shim
|
||||
|
||||
# amd64 supports go test -race
|
||||
ifeq ($(GOARCH),amd64)
|
||||
TESTFLAGS_RACE= -race
|
||||
endif
|
22
src/runtime/vendor/github.com/containerd/containerd/Makefile.freebsd
generated
vendored
22
src/runtime/vendor/github.com/containerd/containerd/Makefile.freebsd
generated
vendored
@ -1,22 +0,0 @@
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
#freebsd specific settings
|
||||
COMMANDS += containerd-shim
|
||||
|
||||
# amd64 supports go test -race
|
||||
ifeq ($(GOARCH),amd64)
|
||||
TESTFLAGS_RACE= -race
|
||||
endif
|
31
src/runtime/vendor/github.com/containerd/containerd/Makefile.linux
generated
vendored
31
src/runtime/vendor/github.com/containerd/containerd/Makefile.linux
generated
vendored
@ -1,31 +0,0 @@
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
#linux specific settings
|
||||
WHALE="+"
|
||||
ONI="-"
|
||||
COMMANDS += containerd-shim containerd-shim-runc-v1 containerd-shim-runc-v2
|
||||
|
||||
# check GOOS for cross compile builds
|
||||
ifeq ($(GOOS),linux)
|
||||
ifneq ($(GOARCH),$(filter $(GOARCH),mips mipsle mips64 mips64le ppc64 riscv64))
|
||||
GO_GCFLAGS += -buildmode=pie
|
||||
endif
|
||||
endif
|
||||
|
||||
# amd64 supports go test -race
|
||||
ifeq ($(GOARCH),amd64)
|
||||
TESTFLAGS_RACE= -race
|
||||
endif
|
28
src/runtime/vendor/github.com/containerd/containerd/Makefile.windows
generated
vendored
28
src/runtime/vendor/github.com/containerd/containerd/Makefile.windows
generated
vendored
@ -1,28 +0,0 @@
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
#Windows specific settings.
|
||||
WHALE = "+"
|
||||
ONI = "-"
|
||||
|
||||
# amd64 supports go test -race
|
||||
ifeq ($(GOARCH),amd64)
|
||||
TESTFLAGS_RACE= -race
|
||||
endif
|
||||
|
||||
BINARIES:=$(addsuffix .exe,$(BINARIES))
|
||||
|
||||
bin/%.exe: cmd/% FORCE
|
||||
$(BUILD_BINARY)
|
71
src/runtime/vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
71
src/runtime/vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
@ -1,71 +0,0 @@
|
||||
version = "unstable"
|
||||
generator = "gogoctrd"
|
||||
plugins = ["grpc", "fieldpath"]
|
||||
|
||||
# Control protoc include paths. Below are usually some good defaults, but feel
|
||||
# free to try it without them if it works for your project.
|
||||
[includes]
|
||||
# Include paths that will be added before all others. Typically, you want to
|
||||
# treat the root of the project as an include, but this may not be necessary.
|
||||
before = ["./protobuf"]
|
||||
|
||||
# Paths that should be treated as include roots in relation to the vendor
|
||||
# directory. These will be calculated with the vendor directory nearest the
|
||||
# target package.
|
||||
packages = ["github.com/gogo/protobuf", "github.com/gogo/googleapis"]
|
||||
|
||||
# Paths that will be added untouched to the end of the includes. We use
|
||||
# `/usr/local/include` to pickup the common install location of protobuf.
|
||||
# This is the default.
|
||||
after = ["/usr/local/include", "/usr/include"]
|
||||
|
||||
# This section maps protobuf imports to Go packages. These will become
|
||||
# `-M` directives in the call to the go protobuf generator.
|
||||
[packages]
|
||||
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
|
||||
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/rpc/status.proto" = "github.com/gogo/googleapis/google/rpc"
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/containerd/containerd/api/events"]
|
||||
plugins = ["fieldpath"] # disable grpc for this package
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/containerd/containerd/api/services/ttrpc/events/v1"]
|
||||
plugins = ["ttrpc", "fieldpath"]
|
||||
|
||||
[[overrides]]
|
||||
# enable ttrpc and disable fieldpath and grpc for the shim
|
||||
prefixes = ["github.com/containerd/containerd/runtime/v1/shim/v1", "github.com/containerd/containerd/runtime/v2/task"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
# Aggregrate the API descriptors to lock down API changes.
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/api"
|
||||
target = "api/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
||||
|
||||
# Lock down runc config
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/runtime/linux/runctypes"
|
||||
target = "runtime/linux/runctypes/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
||||
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/runtime/v2/runc/options"
|
||||
target = "runtime/v2/runc/options/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
332
src/runtime/vendor/github.com/containerd/containerd/README.md
generated
vendored
332
src/runtime/vendor/github.com/containerd/containerd/README.md
generated
vendored
@ -1,332 +0,0 @@
|
||||

|
||||
|
||||
[](https://pkg.go.dev/github.com/containerd/containerd)
|
||||
[](https://github.com/containerd/containerd/actions?query=workflow%3ACI)
|
||||
[](https://github.com/containerd/containerd/actions?query=workflow%3ANightly)
|
||||
[](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
||||
[](https://goreportcard.com/report/github.com/containerd/containerd)
|
||||
[](https://bestpractices.coreinfrastructure.org/projects/1271)
|
||||
|
||||
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc.
|
||||
|
||||
containerd is a member of CNCF with ['graduated'](https://landscape.cncf.io/selected=containerd) status.
|
||||
|
||||
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
|
||||
|
||||

|
||||
|
||||
## Now Recruiting
|
||||
|
||||
We are a large inclusive OSS project that is welcoming help of any kind shape or form:
|
||||
* Documentation help is needed to make the product easier to consume and extend.
|
||||
* We need OSS community outreach / organizing help to get the word out; manage
|
||||
and create messaging and educational content; and to help with social media, community forums/groups, and google groups.
|
||||
* We are actively inviting new [security advisors](https://github.com/containerd/project/blob/master/GOVERNANCE.md#security-advisors) to join the team.
|
||||
* New sub-projects are being created, core and non-core that could use additional development help.
|
||||
* Each of the [containerd projects](https://github.com/containerd) has a list of issues currently being worked on or that need help resolving.
|
||||
- If the issue has not already been assigned to someone, or has not made recent progress and you are interested, please inquire.
|
||||
- If you are interested in starting with a smaller / beginner level issue, look for issues with an `exp/beginner` tag, for example [containerd/containerd beginner issues.](https://github.com/containerd/containerd/issues?q=is%3Aissue+is%3Aopen+label%3Aexp%2Fbeginner)
|
||||
|
||||
## Getting Started
|
||||
|
||||
See our documentation on [containerd.io](https://containerd.io):
|
||||
* [for ops and admins](docs/ops.md)
|
||||
* [namespaces](docs/namespaces.md)
|
||||
* [client options](docs/client-opts.md)
|
||||
|
||||
See how to build containerd from source at [BUILDING](BUILDING.md).
|
||||
|
||||
If you are interested in trying out containerd see our example at [Getting Started](docs/getting-started.md).
|
||||
|
||||
## Nightly builds
|
||||
|
||||
There are nightly builds available for download [here](https://github.com/containerd/containerd/actions?query=workflow%3ANightly).
|
||||
Binaries are generated from `master` branch every night for `Linux` and `Windows`.
|
||||
|
||||
Please be aware: nightly builds might have critical bugs, it's not recommended for use in production and no support provided.
|
||||
|
||||
## Runtime Requirements
|
||||
|
||||
Runtime requirements for containerd are very minimal. Most interactions with
|
||||
the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
|
||||
OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft).
|
||||
The current required version of `runc` is described in [RUNC.md](docs/RUNC.md).
|
||||
|
||||
There are specific features
|
||||
used by containerd core code and snapshotters that will require a minimum kernel
|
||||
version on Linux. With the understood caveat of distro kernel versioning, a
|
||||
reasonable starting point for Linux is a minimum 4.x kernel version.
|
||||
|
||||
The overlay filesystem snapshotter, used by default, uses features that were
|
||||
finalized in the 4.x kernel series. If you choose to use btrfs, there may
|
||||
be more flexibility in kernel version (minimum recommended is 3.18), but will
|
||||
require the btrfs kernel module and btrfs tools to be installed on your Linux
|
||||
distribution.
|
||||
|
||||
To use Linux checkpoint and restore features, you will need `criu` installed on
|
||||
your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
|
||||
|
||||
Build requirements for developers are listed in [BUILDING](BUILDING.md).
|
||||
|
||||
## Features
|
||||
|
||||
### Client
|
||||
|
||||
containerd offers a full client package to help you integrate containerd into your platform.
|
||||
|
||||
```go
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
)
|
||||
|
||||
|
||||
func main() {
|
||||
client, err := containerd.New("/run/containerd/containerd.sock")
|
||||
defer client.Close()
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Namespaces
|
||||
|
||||
Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content but still having separation with containers and images.
|
||||
|
||||
To set a namespace for requests to the API:
|
||||
|
||||
```go
|
||||
context = context.Background()
|
||||
// create a context for docker
|
||||
docker = namespaces.WithNamespace(context, "docker")
|
||||
|
||||
containerd, err := client.NewContainer(docker, "id")
|
||||
```
|
||||
|
||||
To set a default namespace on the client:
|
||||
|
||||
```go
|
||||
client, err := containerd.New(address, containerd.WithDefaultNamespace("docker"))
|
||||
```
|
||||
|
||||
### Distribution
|
||||
|
||||
```go
|
||||
// pull an image
|
||||
image, err := client.Pull(context, "docker.io/library/redis:latest")
|
||||
|
||||
// push an image
|
||||
err := client.Push(context, "docker.io/library/redis:latest", image.Target())
|
||||
```
|
||||
|
||||
### Containers
|
||||
|
||||
In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container.
|
||||
|
||||
```go
|
||||
redis, err := client.NewContainer(context, "redis-master")
|
||||
defer redis.Delete(context)
|
||||
```
|
||||
|
||||
### OCI Runtime Specification
|
||||
|
||||
containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters.
|
||||
|
||||
You can specify options when creating a container about how to modify the specification.
|
||||
|
||||
```go
|
||||
redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(oci.WithImageConfig(image)))
|
||||
```
|
||||
|
||||
### Root Filesystems
|
||||
|
||||
containerd allows you to use overlay or snapshot filesystems with your containers. It comes with built in support for overlayfs and btrfs.
|
||||
|
||||
```go
|
||||
// pull an image and unpack it into the configured snapshotter
|
||||
image, err := client.Pull(context, "docker.io/library/redis:latest", containerd.WithPullUnpack)
|
||||
|
||||
// allocate a new RW root filesystem for a container based on the image
|
||||
redis, err := client.NewContainer(context, "redis-master",
|
||||
containerd.WithNewSnapshot("redis-rootfs", image),
|
||||
containerd.WithNewSpec(oci.WithImageConfig(image)),
|
||||
)
|
||||
|
||||
// use a readonly filesystem with multiple containers
|
||||
for i := 0; i < 10; i++ {
|
||||
id := fmt.Sprintf("id-%s", i)
|
||||
container, err := client.NewContainer(ctx, id,
|
||||
containerd.WithNewSnapshotView(id, image),
|
||||
containerd.WithNewSpec(oci.WithImageConfig(image)),
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
### Tasks
|
||||
|
||||
Taking a container object and turning it into a runnable process on a system is done by creating a new `Task` from the container. A task represents the runnable object within containerd.
|
||||
|
||||
```go
|
||||
// create a new task
|
||||
task, err := redis.NewTask(context, cio.NewCreator(cio.WithStdio))
|
||||
defer task.Delete(context)
|
||||
|
||||
// the task is now running and has a pid that can be used to setup networking
|
||||
// or other runtime settings outside of containerd
|
||||
pid := task.Pid()
|
||||
|
||||
// start the redis-server process inside the container
|
||||
err := task.Start(context)
|
||||
|
||||
// wait for the task to exit and get the exit status
|
||||
status, err := task.Wait(context)
|
||||
```
|
||||
|
||||
### Checkpoint and Restore
|
||||
|
||||
If you have [criu](https://criu.org/Main_Page) installed on your machine you can checkpoint and restore containers and their tasks. This allows you to clone and/or live migrate containers to other machines.
|
||||
|
||||
```go
|
||||
// checkpoint the task then push it to a registry
|
||||
checkpoint, err := task.Checkpoint(context)
|
||||
|
||||
err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
|
||||
|
||||
// on a new machine pull the checkpoint and restore the redis container
|
||||
checkpoint, err := client.Pull(context, "myregistry/checkpoints/redis:master")
|
||||
|
||||
redis, err = client.NewContainer(context, "redis-master", containerd.WithNewSnapshot("redis-rootfs", checkpoint))
|
||||
defer container.Delete(context)
|
||||
|
||||
task, err = redis.NewTask(context, cio.NewCreator(cio.WithStdio), containerd.WithTaskCheckpoint(checkpoint))
|
||||
defer task.Delete(context)
|
||||
|
||||
err := task.Start(context)
|
||||
```
|
||||
|
||||
### Snapshot Plugins
|
||||
|
||||
In addition to the built-in Snapshot plugins in containerd, additional external
|
||||
plugins can be configured using GRPC. An external plugin is made available using
|
||||
the configured name and appears as a plugin alongside the built-in ones.
|
||||
|
||||
To add an external snapshot plugin, add the plugin to containerd's config file
|
||||
(by default at `/etc/containerd/config.toml`). The string following
|
||||
`proxy_plugin.` will be used as the name of the snapshotter and the address
|
||||
should refer to a socket with a GRPC listener serving containerd's Snapshot
|
||||
GRPC API. Remember to restart containerd for any configuration changes to take
|
||||
effect.
|
||||
|
||||
```
|
||||
[proxy_plugins]
|
||||
[proxy_plugins.customsnapshot]
|
||||
type = "snapshot"
|
||||
address = "/var/run/mysnapshotter.sock"
|
||||
```
|
||||
|
||||
See [PLUGINS.md](/docs/PLUGINS.md) for how to create plugins
|
||||
|
||||
### Releases and API Stability
|
||||
|
||||
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
|
||||
of containerd components.
|
||||
|
||||
Downloadable 64-bit Intel/AMD binaries of all official releases are available on
|
||||
our [releases page](https://github.com/containerd/containerd/releases).
|
||||
|
||||
For other architectures and distribution support, you will find that many
|
||||
Linux distributions package their own containerd and provide it across several
|
||||
architectures, such as [Canonical's Ubuntu packaging](https://launchpad.net/ubuntu/bionic/+package/containerd).
|
||||
|
||||
#### Enabling command auto-completion
|
||||
|
||||
Starting with containerd 1.4, the urfave client feature for auto-creation of bash and zsh
|
||||
autocompletion data is enabled. To use the autocomplete feature in a bash shell for example, source
|
||||
the autocomplete/ctr file in your `.bashrc`, or manually like:
|
||||
|
||||
```
|
||||
$ source ./contrib/autocomplete/ctr
|
||||
```
|
||||
|
||||
#### Distribution of `ctr` autocomplete for bash and zsh
|
||||
|
||||
For bash, copy the `contrib/autocomplete/ctr` script into
|
||||
`/etc/bash_completion.d/` and rename it to `ctr`. The `zsh_autocomplete`
|
||||
file is also available and can be used similarly for zsh users.
|
||||
|
||||
Provide documentation to users to `source` this file into their shell if
|
||||
you don't place the autocomplete file in a location where it is automatically
|
||||
loaded for the user's shell environment.
|
||||
|
||||
### CRI
|
||||
|
||||
`cri` is a [containerd](https://containerd.io/) plugin implementation of the Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/cri-api/blob/master/pkg/apis/runtime/v1alpha2/api.proto). With it, you are able to use containerd as the container runtime for a Kubernetes cluster.
|
||||
|
||||

|
||||
|
||||
#### CRI Status
|
||||
|
||||
`cri` is a native plugin of containerd. Since containerd 1.1, the cri plugin is built into the release binaries and enabled by default.
|
||||
|
||||
> **Note:** As of containerd 1.5, the `cri` plugin is merged into the containerd/containerd repo. For example, the source code previously stored under [`containerd/cri/pkg`](https://github.com/containerd/cri/tree/release/1.4/pkg)
|
||||
was moved to [`containerd/containerd/pkg/cri` package](https://github.com/containerd/containerd/tree/master/pkg/cri).
|
||||
|
||||
The `cri` plugin has reached GA status, representing that it is:
|
||||
* Feature complete
|
||||
* Works with Kubernetes 1.10 and above
|
||||
* Passes all [CRI validation tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/cri-validation.md).
|
||||
* Passes all [node e2e tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/e2e-node-tests.md).
|
||||
* Passes all [e2e tests](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-testing/e2e-tests.md).
|
||||
|
||||
See results on the containerd k8s [test dashboard](https://k8s-testgrid.appspot.com/sig-node-containerd)
|
||||
|
||||
#### Validating Your `cri` Setup
|
||||
A Kubernetes incubator project, [cri-tools](https://github.com/kubernetes-sigs/cri-tools), includes programs for exercising CRI implementations. More importantly, cri-tools includes the program `critest` which is used for running [CRI Validation Testing](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-node/cri-validation.md).
|
||||
|
||||
#### CRI Guides
|
||||
* [Installing with Ansible and Kubeadm](contrib/ansible/README.md)
|
||||
* [For Non-Ansible Users, Preforming a Custom Installation Using the Release Tarball and Kubeadm](docs/cri/installation.md)
|
||||
* [CRI Plugin Testing Guide](./docs/cri/testing.md)
|
||||
* [Debugging Pods, Containers, and Images with `crictl`](./docs/cri/crictl.md)
|
||||
* [Configuring `cri` Plugins](./docs/cri/config.md)
|
||||
* [Configuring containerd](https://github.com/containerd/containerd/blob/master/docs/man/containerd-config.8.md)
|
||||
|
||||
### Communication
|
||||
|
||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||
This will be the best place to discuss design and implementation.
|
||||
|
||||
For sync communication catch us in the `#containerd` and `#containerd-dev` slack channels on Cloud Native Computing Foundation's (CNCF) slack - `cloud-native.slack.com`. Everyone is welcome to join and chat. [Get Invite to CNCF slack.](https://slack.cncf.io)
|
||||
|
||||
### Security audit
|
||||
|
||||
A third party security audit was performed by Cure53 in 4Q2018; the [full report](docs/SECURITY_AUDIT.pdf) is available in our docs/ directory.
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
|
||||
|
||||
## Licenses
|
||||
|
||||
The containerd codebase is released under the [Apache 2.0 license](LICENSE).
|
||||
The README.md file, and files in the "docs" folder are licensed under the
|
||||
Creative Commons Attribution 4.0 International License. You may obtain a
|
||||
copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.
|
||||
|
||||
## Project details
|
||||
|
||||
**containerd** is the primary open source project within the broader containerd GitHub repository.
|
||||
However, all projects within the repo have common maintainership, governance, and contributing
|
||||
guidelines which are stored in a `project` repository commonly for all containerd projects.
|
||||
|
||||
Please find all these core project documents, including the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
## Adoption
|
||||
|
||||
Interested to see who is using containerd? Are you using containerd in a project?
|
||||
Please add yourself via pull request to our [ADOPTERS.md](./ADOPTERS.md) file.
|
329
src/runtime/vendor/github.com/containerd/containerd/RELEASES.md
generated
vendored
329
src/runtime/vendor/github.com/containerd/containerd/RELEASES.md
generated
vendored
@ -1,329 +0,0 @@
|
||||
# Versioning and Release
|
||||
|
||||
This document details the versioning and release plan for containerd. Stability
|
||||
is a top goal for this project and we hope that this document and the processes
|
||||
it entails will help to achieve that. It covers the release process, versioning
|
||||
numbering, backporting, API stability and support horizons.
|
||||
|
||||
If you rely on containerd, it would be good to spend time understanding the
|
||||
areas of the API that are and are not supported and how they impact your
|
||||
project in the future.
|
||||
|
||||
This document will be considered a living document. Supported timelines,
|
||||
backport targets and API stability guarantees will be updated here as they
|
||||
change.
|
||||
|
||||
If there is something that you require or this document leaves out, please
|
||||
reach out by [filing an issue](https://github.com/containerd/containerd/issues).
|
||||
|
||||
## Releases
|
||||
|
||||
Releases of containerd will be versioned using dotted triples, similar to
|
||||
[Semantic Version](http://semver.org/). For the purposes of this document, we
|
||||
will refer to the respective components of this triple as
|
||||
`<major>.<minor>.<patch>`. The version number may have additional information,
|
||||
such as alpha, beta and release candidate qualifications. Such releases will be
|
||||
considered "pre-releases".
|
||||
|
||||
### Major and Minor Releases
|
||||
|
||||
Major and minor releases of containerd will be made from master. Releases of
|
||||
containerd will be marked with GPG signed tags and announced at
|
||||
https://github.com/containerd/containerd/releases. The tag will be of the
|
||||
format `v<major>.<minor>.<patch>` and should be made with the command `git tag
|
||||
-s v<major>.<minor>.<patch>`.
|
||||
|
||||
After a minor release, a branch will be created, with the format
|
||||
`release/<major>.<minor>` from the minor tag. All further patch releases will
|
||||
be done from that branch. For example, once we release `v1.0.0`, a branch
|
||||
`release/1.0` will be created from that tag. All future patch releases will be
|
||||
done against that branch.
|
||||
|
||||
### Pre-releases
|
||||
|
||||
Pre-releases, such as alphas, betas and release candidates will be conducted
|
||||
from their source branch. For major and minor releases, these releases will be
|
||||
done from master. For patch releases, these pre-releases should be done within
|
||||
the corresponding release branch.
|
||||
|
||||
While pre-releases are done to assist in the stabilization process, no
|
||||
guarantees are provided.
|
||||
|
||||
### Upgrade Path
|
||||
|
||||
The upgrade path for containerd is such that the 0.0.x patch releases are
|
||||
always backward compatible with its major and minor version. Minor (0.x.0)
|
||||
version will always be compatible with the previous minor release. i.e. 1.2.0
|
||||
is backwards compatible with 1.1.0 and 1.1.0 is compatible with 1.0.0. There is
|
||||
no compatibility guarantees for upgrades that span multiple, _minor_ releases.
|
||||
For example, 1.0.0 to 1.2.0 is not supported. One should first upgrade to 1.1,
|
||||
then 1.2.
|
||||
|
||||
There are no compatibility guarantees with upgrades to _major_ versions. For
|
||||
example, upgrading from 1.0.0 to 2.0.0 may require resources to migrated or
|
||||
integrations to change. Each major version will be supported for at least 1
|
||||
year with bug fixes and security patches.
|
||||
|
||||
### Next Release
|
||||
|
||||
The activity for the next release will be tracked in the
|
||||
[milestones](https://github.com/containerd/containerd/milestones). If your
|
||||
issue or PR is not present in a milestone, please reach out to the maintainers
|
||||
to create the milestone or add an issue or PR to an existing milestone.
|
||||
|
||||
### Support Horizon
|
||||
|
||||
Support horizons will be defined corresponding to a release branch, identified
|
||||
by `<major>.<minor>`. Releases branches will be in one of several states:
|
||||
|
||||
- __*Next*__: The next planned release branch.
|
||||
- __*Active*__: The release branch is currently supported and accepting patches.
|
||||
- __*Extended*__: The release branch is only accepting security patches.
|
||||
- __*End of Life*__: The release branch is no longer supported and no new patches will be accepted.
|
||||
|
||||
Releases will be supported up to one year after a _minor_ release. This means that
|
||||
we will accept bug reports and backports to release branches until the end of
|
||||
life date. If no new _minor_ release has been made, that release will be
|
||||
considered supported until 6 months after the next _minor_ is released or one year,
|
||||
whichever is longer. Additionally, releases may have an extended security support
|
||||
period after the end of the active period to accept security backports. This
|
||||
timeframe will be decided by maintainers before the end of the active status.
|
||||
|
||||
The current state is available in the following table:
|
||||
|
||||
| Release | Status | Start | End of Life |
|
||||
|---------|-------------|------------------|-------------------|
|
||||
| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - |
|
||||
| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - |
|
||||
| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 |
|
||||
| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.3) | End of Life | December 5, 2017 | December 5, 2018 |
|
||||
| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 |
|
||||
| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 |
|
||||
| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 |
|
||||
| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.4) | Active | August 17, 2020 | max(August 17, 2021, release of 1.5.0 + 6 months) |
|
||||
| [1.5](https://github.com/containerd/containerd/milestone/30) | Next | TBD | max(TBD+1 year, release of 1.6.0 + 6 months) |
|
||||
|
||||
Note that branches and release from before 1.0 may not follow these rules.
|
||||
|
||||
This table should be updated as part of the release preparation process.
|
||||
|
||||
### Backporting
|
||||
|
||||
Backports in containerd are community driven. As maintainers, we'll try to
|
||||
ensure that sensible bugfixes make it into _active_ release, but our main focus
|
||||
will be features for the next _minor_ or _major_ release. For the most part,
|
||||
this process is straightforward and we are here to help make it as smooth as
|
||||
possible.
|
||||
|
||||
If there are important fixes that need to be backported, please let use know in
|
||||
one of three ways:
|
||||
|
||||
1. Open an issue.
|
||||
2. Open a PR with cherry-picked change from master.
|
||||
3. Open a PR with a ported fix.
|
||||
|
||||
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
|
||||
Remember that backported PRs must follow the versioning guidelines from this document.
|
||||
|
||||
Any release that is "active" can accept backports. Opening a backport PR is
|
||||
fairly straightforward. The steps differ depending on whether you are pulling
|
||||
a fix from master or need to draft a new commit specific to a particular
|
||||
branch.
|
||||
|
||||
To cherry pick a straightforward commit from master, simply use the cherry pick
|
||||
process:
|
||||
|
||||
1. Pick the branch to which you want backported, usually in the format
|
||||
`release/<major>.<minor>`. The following will create a branch you can
|
||||
use to open a PR:
|
||||
|
||||
```console
|
||||
$ git checkout -b my-backport-branch release/<major>.<minor>.
|
||||
```
|
||||
|
||||
2. Find the commit you want backported.
|
||||
3. Apply it to the release branch:
|
||||
|
||||
```console
|
||||
$ git cherry-pick -xsS <commit>
|
||||
```
|
||||
4. Push the branch and open up a PR against the _release branch_:
|
||||
|
||||
```
|
||||
$ git push -u stevvooe my-backport-branch
|
||||
```
|
||||
|
||||
Make sure to replace `stevvooe` with whatever fork you are using to open
|
||||
the PR. When you open the PR, make sure to switch `master` with whatever
|
||||
release branch you are targeting with the fix. Make sure the PR title has
|
||||
`[<release branch>]` prefixed. e.g.:
|
||||
|
||||
```
|
||||
[release/1.4] Fix foo in bar
|
||||
```
|
||||
|
||||
If there is no existing fix in master, you should first fix the bug in master,
|
||||
or ask us a maintainer or contributor to do it via an issue. Once that PR is
|
||||
completed, open a PR using the process above.
|
||||
|
||||
Only when the bug is not seen in master and must be made for the specific
|
||||
release branch should you open a PR with new code.
|
||||
|
||||
## Public API Stability
|
||||
|
||||
The following table provides an overview of the components covered by
|
||||
containerd versions:
|
||||
|
||||
|
||||
| Component | Status | Stabilized Version | Links |
|
||||
|------------------|----------|--------------------|---------------|
|
||||
| GRPC API | Stable | 1.0 | [api/](api) |
|
||||
| Metrics API | Stable | 1.0 | - |
|
||||
| Runtime Shim API | Stable | 1.2 | - |
|
||||
| Daemon Config | Stable | 1.0 | - |
|
||||
| Go client API | Unstable | _future_ | [godoc](https://godoc.org/github.com/containerd/containerd) |
|
||||
| CRI GRPC API | Unstable | v1alpha2 _current_ | [cri-api](https://github.com/kubernetes/cri-api/tree/master/pkg/apis/runtime/v1alpha2) |
|
||||
| `ctr` tool | Unstable | Out of scope | - |
|
||||
|
||||
From the version stated in the above table, that component must adhere to the
|
||||
stability constraints expected in release versions.
|
||||
|
||||
Unless explicitly stated here, components that are called out as unstable or
|
||||
not covered may change in a future minor version. Breaking changes to
|
||||
"unstable" components will be avoided in patch versions.
|
||||
|
||||
### GRPC API
|
||||
|
||||
The primary product of containerd is the GRPC API. As of the 1.0.0 release, the
|
||||
GRPC API will not have any backwards incompatible changes without a _major_
|
||||
version jump.
|
||||
|
||||
To ensure compatibility, we have collected the entire GRPC API symbol set into
|
||||
a single file. At each _minor_ release of containerd, we will move the current
|
||||
`next.pb.txt` file to a file named for the minor version, such as `1.0.pb.txt`,
|
||||
enumerating the support services and messages. See [api/](api) for details.
|
||||
|
||||
Note that new services may be added in _minor_ releases. New service methods
|
||||
and new fields on messages may be added if they are optional.
|
||||
|
||||
`*.pb.txt` files are generated at each API release. They prevent unintentional changes
|
||||
to the API by having a diff that the CI can run. These files are not intended to be
|
||||
consumed or used by clients.
|
||||
|
||||
### Metrics API
|
||||
|
||||
The metrics API that outputs prometheus style metrics will be versioned independently,
|
||||
prefixed with the API version. i.e. `/v1/metrics`, `/v2/metrics`.
|
||||
|
||||
The metrics API version will be incremented when breaking changes are made to the prometheus
|
||||
output. New metrics can be added to the output in a backwards compatible manner without
|
||||
bumping the API version.
|
||||
|
||||
### Plugins API
|
||||
|
||||
containerd is based on a modular design where plugins are implemented to provide the core functionality.
|
||||
Plugins implemented in tree are supported by the containerd community unless explicitly specified as non-stable.
|
||||
Out of tree plugins are not supported by the containerd maintainers.
|
||||
|
||||
Currently, the Windows runtime and snapshot plugins are not stable and not supported.
|
||||
Please refer to the github milestones for Windows support in a future release.
|
||||
|
||||
#### Error Codes
|
||||
|
||||
Error codes will not change in a patch release, unless a missing error code
|
||||
causes a blocking bug. Error codes of type "unknown" may change to more
|
||||
specific types in the future. Any error code that is not "unknown" that is
|
||||
currently returned by a service will not change without a _major_ release or a
|
||||
new version of the service.
|
||||
|
||||
If you find that an error code that is required by your application is not
|
||||
well-documented in the protobuf service description or tested explicitly,
|
||||
please file and issue and we will clarify.
|
||||
|
||||
#### Opaque Fields
|
||||
|
||||
Unless explicitly stated, the formats of certain fields may not be covered by
|
||||
this guarantee and should be treated opaquely. For example, don't rely on the
|
||||
format details of a URL field unless we explicitly say that the field will
|
||||
follow that format.
|
||||
|
||||
### Go client API
|
||||
|
||||
The Go client API, documented in
|
||||
[godoc](https://godoc.org/github.com/containerd/containerd), is currently
|
||||
considered unstable. It is recommended to vendor the necessary components to
|
||||
stabilize your project build. Note that because the Go API interfaces with the
|
||||
GRPC API, clients written against a 1.0 Go API should remain compatible with
|
||||
future 1.x series releases.
|
||||
|
||||
We intend to stabilize the API in a future release when more integrations have
|
||||
been carried out.
|
||||
|
||||
Any changes to the API should be detectable at compile time, so upgrading will
|
||||
be a matter of fixing compilation errors and moving from there.
|
||||
|
||||
### CRI GRPC API
|
||||
|
||||
The CRI (Container Runtime Interface) GRPC API is used by a Kubernetes kubelet
|
||||
to communicate with a container runtime. This interface is used to manage
|
||||
container lifecycles and container images. Currently this API is under
|
||||
development and unstable across Kubernetes releases. Each Kubernetes release
|
||||
only supports a single version of CRI and the CRI plugin only implements a
|
||||
single version of CRI.
|
||||
|
||||
Each _minor_ release will support one version of CRI and at least one version
|
||||
of Kubernetes. Once this API is stable, a _minor_ will be compatible with any
|
||||
version of Kubernetes which supports that version of CRI.
|
||||
|
||||
### `ctr` tool
|
||||
|
||||
The `ctr` tool provides the ability to introspect and understand the containerd
|
||||
API. It is not considered a primary offering of the project and is unsupported in
|
||||
that sense. While we understand it's value as a debug tool, it may be completely
|
||||
refactored or have breaking changes in _minor_ releases.
|
||||
|
||||
Targeting `ctr` for feature additions reflects a misunderstanding of the containerd
|
||||
architecture. Feature addition should focus on the client Go API and additions to
|
||||
`ctr` may or may not be accepted at the discretion of the maintainers.
|
||||
|
||||
We will do our best to not break compatibility in the tool in _patch_ releases.
|
||||
|
||||
### Daemon Configuration
|
||||
|
||||
The daemon's configuration file, commonly located in `/etc/containerd/config.toml`
|
||||
is versioned and backwards compatible. The `version` field in the config
|
||||
file specifies the config's version. If no version number is specified inside
|
||||
the config file then it is assumed to be a version 1 config and parsed as such.
|
||||
Please use `version = 2` to enable version 2 config as version 1 has been
|
||||
deprecated.
|
||||
|
||||
### Not Covered
|
||||
|
||||
As a general rule, anything not mentioned in this document is not covered by
|
||||
the stability guidelines and may change in any release. Explicitly, this
|
||||
pertains to this non-exhaustive list of components:
|
||||
|
||||
- File System layout
|
||||
- Storage formats
|
||||
- Snapshot formats
|
||||
|
||||
Between upgrades of subsequent, _minor_ versions, we may migrate these formats.
|
||||
Any outside processes relying on details of these file system layouts may break
|
||||
in that process. Container root file systems will be maintained on upgrade.
|
||||
|
||||
### Exceptions
|
||||
|
||||
We may make exceptions in the interest of __security patches__. If a break is
|
||||
required, it will be communicated clearly and the solution will be considered
|
||||
against total impact.
|
||||
|
||||
## Deprecated features
|
||||
|
||||
The deprecated features are shown in the following table:
|
||||
|
||||
| Component | Deprecation release | Target release for removal | Recommendation |
|
||||
|----------------------------------------------------------------------|---------------------|----------------------------|-------------------------------|
|
||||
| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` |
|
||||
| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` |
|
||||
| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter |
|
28
src/runtime/vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
28
src/runtime/vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
@ -1,28 +0,0 @@
|
||||
# containerd roadmap
|
||||
|
||||
containerd uses the issues and milestones to define its roadmap.
|
||||
`ROADMAP.md` files are common in open source projects but we find they quickly become out of date.
|
||||
We opt for an issues and milestone approach that our maintainers and community can keep up-to-date as work is added and completed.
|
||||
|
||||
## Issues
|
||||
|
||||
Issues tagged with the `roadmap` label are high level roadmap items.
|
||||
They are tasks and/or features that the containerd community wants completed.
|
||||
|
||||
Smaller issues and pull requests can reference back to the main roadmap issue that is tagged to help detail progress towards the overall goal.
|
||||
|
||||
## Milestones
|
||||
|
||||
Milestones define when an issue, pull request, and/or roadmap item is to be completed.
|
||||
Issues are the what, milestones are the when.
|
||||
Development is complex therefore roadmap items can move between milestones depending on the remaining development and testing required to release a change.
|
||||
|
||||
## Searching
|
||||
|
||||
To find the roadmap items currently planned for containerd you can filter on the `roadmap` label.
|
||||
|
||||
[Search Roadmap Items](https://github.com/containerd/containerd/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap)
|
||||
|
||||
After searching for roadmap items you can view what milestone they are scheduled to be completed in along with the progress.
|
||||
|
||||
[View Milestones](https://github.com/containerd/containerd/milestones)
|
57
src/runtime/vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
57
src/runtime/vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
@ -1,57 +0,0 @@
|
||||
# Scope and Principles
|
||||
|
||||
Having a clearly defined scope of a project is important for ensuring consistency and focus.
|
||||
These following criteria will be used when reviewing pull requests, features, and changes for the project before being accepted.
|
||||
|
||||
### Components
|
||||
|
||||
Components should not have tight dependencies on each other so that they are able to be used independently.
|
||||
The APIs for images and containers should be designed in a way that when used together the components have a natural flow but still be useful independently.
|
||||
|
||||
An example for this design can be seen with the overlay filesystems and the container execution layer.
|
||||
The execution layer and overlay filesystems can be used independently but if you were to use both, they share a common `Mount` struct that the filesystems produce and the execution layer consumes.
|
||||
|
||||
### Primitives
|
||||
|
||||
containerd should expose primitives to solve problems instead of building high level abstractions in the API.
|
||||
A common example of this is how build would be implemented.
|
||||
Instead of having a build API in containerd we should expose the lower level primitives that allow things required in build to work.
|
||||
Breaking up the filesystem APIs to allow snapshots, copy functionality, and mounts allow people implementing build at the higher levels with more flexibility.
|
||||
|
||||
### Extensibility and Defaults
|
||||
|
||||
For the various components in containerd there should be defined extension points where implementations can be swapped for alternatives.
|
||||
The best example of this is that containerd will use `runc` from OCI as the default runtime in the execution layer but other runtimes conforming to the OCI Runtime specification can be easily added to containerd.
|
||||
|
||||
containerd will come with a default implementation for the various components.
|
||||
These defaults will be chosen by the maintainers of the project and should not change unless better tech for that component comes out.
|
||||
Additional implementations will not be accepted into the core repository and should be developed in a separate repository not maintained by the containerd maintainers.
|
||||
|
||||
|
||||
## Scope
|
||||
|
||||
The following table specifies the various components of containerd and general features of container runtimes.
|
||||
The table specifies whether or not the feature/component is in or out of scope.
|
||||
|
||||
| Name | Description | In/Out | Reason |
|
||||
|------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| execution | Provide an extensible execution layer for executing a container | in | Create,start, stop pause, resume exec, signal, delete |
|
||||
| cow filesystem | Built in functionality for overlay, aufs, and other copy on write filesystems for containers | in | |
|
||||
| distribution | Having the ability to push and pull images as well as operations on images as a first class API object | in | containerd will fully support the management and retrieval of images |
|
||||
| metrics | container-level metrics, cgroup stats, and OOM events | in |
|
||||
| networking | creation and management of network interfaces | out | Networking will be handled and provided to containerd via higher level systems. |
|
||||
| build | Building images as a first class API | out | Build is a higher level tooling feature and can be implemented in many different ways on top of containerd |
|
||||
| volumes | Volume management for external data | out | The API supports mounts, binds, etc where all volumes type systems can be built on top of containerd. |
|
||||
| logging | Persisting container logs | out | Logging can be build on top of containerd because the container’s STDIO will be provided to the clients and they can persist any way they see fit. There is no io copying of container STDIO in containerd. |
|
||||
|
||||
|
||||
containerd is scoped to a single host and makes assumptions based on that fact.
|
||||
It can be used to build things like a node agent that launches containers but does not have any concepts of a distributed system.
|
||||
|
||||
containerd is designed to be embedded into a larger system, hence it only includes a barebone CLI (`ctr`) specifically for development and debugging purpose, with no mandate to be human-friendly, and no guarantee of interface stability over time.
|
||||
|
||||
### How is the scope changed?
|
||||
|
||||
The scope of this project is an allowed list.
|
||||
If it's not mentioned as being in scope, it is out of scope.
|
||||
For the scope of this project to change it requires a 100% vote from all maintainers of the project.
|
260
src/runtime/vendor/github.com/containerd/containerd/Vagrantfile
generated
vendored
260
src/runtime/vendor/github.com/containerd/containerd/Vagrantfile
generated
vendored
@ -1,260 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Copyright The containerd Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Vagrantfile for cgroup2 and SELinux
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "fedora/34-cloud-base"
|
||||
memory = 4096
|
||||
cpus = 2
|
||||
config.vm.provider :virtualbox do |v|
|
||||
v.memory = memory
|
||||
v.cpus = cpus
|
||||
end
|
||||
config.vm.provider :libvirt do |v|
|
||||
v.memory = memory
|
||||
v.cpus = cpus
|
||||
end
|
||||
|
||||
# Disabled by default. To run:
|
||||
# vagrant up --provision-with=upgrade-packages
|
||||
# To upgrade only specific packages:
|
||||
# UPGRADE_PACKAGES=selinux vagrant up --provision-with=upgrade-packages
|
||||
#
|
||||
config.vm.provision "upgrade-packages", type: "shell", run: "never" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-upgrade-packages"
|
||||
sh.env = {
|
||||
'UPGRADE_PACKAGES': ENV['UPGRADE_PACKAGES'],
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
set -eux -o pipefail
|
||||
dnf -y upgrade ${UPGRADE_PACKAGES}
|
||||
SHELL
|
||||
end
|
||||
|
||||
# To re-run, installing CNI from RPM:
|
||||
# INSTALL_PACKAGES="containernetworking-plugins" vagrant up --provision-with=install-packages
|
||||
#
|
||||
config.vm.provision "install-packages", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-packages"
|
||||
sh.env = {
|
||||
'INSTALL_PACKAGES': ENV['INSTALL_PACKAGES'],
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
set -eux -o pipefail
|
||||
dnf -y install \
|
||||
container-selinux \
|
||||
curl \
|
||||
gcc \
|
||||
git \
|
||||
iptables \
|
||||
libseccomp-devel \
|
||||
libselinux-devel \
|
||||
lsof \
|
||||
make \
|
||||
${INSTALL_PACKAGES}
|
||||
SHELL
|
||||
end
|
||||
|
||||
# To re-run this provisioner, installing a different version of go:
|
||||
# GO_VERSION="1.14.6" vagrant up --provision-with=install-golang
|
||||
#
|
||||
config.vm.provision "install-golang", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-golang"
|
||||
sh.env = {
|
||||
'GO_VERSION': ENV['GO_VERSION'] || "1.16.6",
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
set -eux -o pipefail
|
||||
curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local
|
||||
cat >> /etc/environment <<EOF
|
||||
PATH=/usr/local/go/bin:$PATH
|
||||
EOF
|
||||
source /etc/environment
|
||||
cat >> /etc/profile.d/sh.local <<EOF
|
||||
GOPATH=\\$HOME/go
|
||||
PATH=\\$GOPATH/bin:\\$PATH
|
||||
export GOPATH PATH
|
||||
EOF
|
||||
source /etc/profile.d/sh.local
|
||||
SHELL
|
||||
end
|
||||
|
||||
config.vm.provision "setup-gopath", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-setup-gopath"
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
source /etc/environment
|
||||
source /etc/profile.d/sh.local
|
||||
set -eux -o pipefail
|
||||
mkdir -p ${GOPATH}/src/github.com/containerd
|
||||
ln -fnsv /vagrant ${GOPATH}/src/github.com/containerd/containerd
|
||||
SHELL
|
||||
end
|
||||
|
||||
config.vm.provision "install-runc", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-runc"
|
||||
sh.env = {
|
||||
'RUNC_FLAVOR': ENV['RUNC_FLAVOR'] || "runc",
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
source /etc/environment
|
||||
source /etc/profile.d/sh.local
|
||||
set -eux -o pipefail
|
||||
${GOPATH}/src/github.com/containerd/containerd/script/setup/install-runc
|
||||
type runc
|
||||
runc --version
|
||||
chcon -v -t container_runtime_exec_t $(type -ap runc)
|
||||
SHELL
|
||||
end
|
||||
|
||||
config.vm.provision "install-cni", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-cni"
|
||||
sh.env = {
|
||||
'CNI_BINARIES': 'bridge dhcp flannel host-device host-local ipvlan loopback macvlan portmap ptp tuning vlan',
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
source /etc/environment
|
||||
source /etc/profile.d/sh.local
|
||||
set -eux -o pipefail
|
||||
${GOPATH}/src/github.com/containerd/containerd/script/setup/install-cni
|
||||
PATH=/opt/cni/bin:$PATH type ${CNI_BINARIES} || true
|
||||
SHELL
|
||||
end
|
||||
|
||||
config.vm.provision "install-cri-tools", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-cri-tools"
|
||||
sh.env = {
|
||||
'CRI_TOOLS_VERSION': ENV['CRI_TOOLS_VERSION'] || '16911795a3c33833fa0ec83dac1ade3172f6989e',
|
||||
'GOBIN': '/usr/local/bin',
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
source /etc/environment
|
||||
source /etc/profile.d/sh.local
|
||||
set -eux -o pipefail
|
||||
${GOPATH}/src/github.com/containerd/containerd/script/setup/install-critools
|
||||
type crictl critest
|
||||
critest --version
|
||||
SHELL
|
||||
end
|
||||
|
||||
config.vm.provision "install-containerd", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-containerd"
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
source /etc/environment
|
||||
source /etc/profile.d/sh.local
|
||||
set -eux -o pipefail
|
||||
cd ${GOPATH}/src/github.com/containerd/containerd
|
||||
make BUILDTAGS="seccomp selinux no_aufs no_btrfs no_devmapper no_zfs" binaries install
|
||||
type containerd
|
||||
containerd --version
|
||||
chcon -v -t container_runtime_exec_t /usr/local/bin/{containerd,containerd-shim*}
|
||||
./script/setup/config-containerd
|
||||
SHELL
|
||||
end
|
||||
|
||||
config.vm.provision "install-gotestsum", type: "shell", run: "once" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-install-gotestsum"
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
source /etc/environment
|
||||
source /etc/profile.d/sh.local
|
||||
set -eux -o pipefail
|
||||
${GOPATH}/src/github.com/containerd/containerd/script/setup/install-gotestsum
|
||||
sudo cp ${GOPATH}/bin/gotestsum /usr/local/bin/
|
||||
SHELL
|
||||
end
|
||||
|
||||
# SELinux is Enforcing by default.
|
||||
# To set SELinux as Disabled on a VM that has already been provisioned:
|
||||
# SELINUX=Disabled vagrant up --provision-with=selinux
|
||||
# To set SELinux as Permissive on a VM that has already been provsioned
|
||||
# SELINUX=Permissive vagrant up --provision-with=selinux
|
||||
config.vm.provision "selinux", type: "shell", run: "never" do |sh|
|
||||
sh.upload_path = "/tmp/vagrant-selinux"
|
||||
sh.env = {
|
||||
'SELINUX': ENV['SELINUX'] || "Enforcing"
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
/vagrant/script/setup/config-selinux
|
||||
/vagrant/script/setup/config-containerd
|
||||
SHELL
|
||||
end
|
||||
|
||||
# SELinux is permissive by default (via provisioning) in this VM. To re-run with SELinux enforcing:
|
||||
# vagrant up --provision-with=selinux-enforcing,test-integration
|
||||
#
|
||||
config.vm.provision "test-integration", type: "shell", run: "never" do |sh|
|
||||
sh.upload_path = "/tmp/test-integration"
|
||||
sh.env = {
|
||||
'RUNC_FLAVOR': ENV['RUNC_FLAVOR'] || "runc",
|
||||
'GOTEST': ENV['GOTEST'] || "go test",
|
||||
'GOTESTSUM_JUNITFILE': ENV['GOTESTSUM_JUNITFILE'],
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
source /etc/environment
|
||||
source /etc/profile.d/sh.local
|
||||
set -eux -o pipefail
|
||||
rm -rf /var/lib/containerd-test /run/containerd-test
|
||||
cd ${GOPATH}/src/github.com/containerd/containerd
|
||||
make integration EXTRA_TESTFLAGS="-timeout 15m -no-criu -test.v" TEST_RUNTIME=io.containerd.runc.v2 RUNC_FLAVOR=$RUNC_FLAVOR
|
||||
SHELL
|
||||
end
|
||||
|
||||
# SELinux is permissive by default (via provisioning) in this VM. To re-run with SELinux enforcing:
|
||||
# vagrant up --provision-with=selinux-enforcing,test-cri
|
||||
#
|
||||
config.vm.provision "test-cri", type: "shell", run: "never" do |sh|
|
||||
sh.upload_path = "/tmp/test-cri"
|
||||
sh.env = {
|
||||
'GOTEST': ENV['GOTEST'] || "go test",
|
||||
'REPORT_DIR': ENV['REPORT_DIR'],
|
||||
}
|
||||
sh.inline = <<~SHELL
|
||||
#!/usr/bin/env bash
|
||||
source /etc/environment
|
||||
source /etc/profile.d/sh.local
|
||||
set -eux -o pipefail
|
||||
systemctl disable --now containerd || true
|
||||
rm -rf /var/lib/containerd /run/containerd
|
||||
function cleanup()
|
||||
{
|
||||
journalctl -u containerd > /tmp/containerd.log
|
||||
systemctl stop containerd
|
||||
}
|
||||
selinux=$(getenforce)
|
||||
if [[ $selinux == Enforcing ]]; then
|
||||
setenforce 0
|
||||
fi
|
||||
systemctl enable --now ${GOPATH}/src/github.com/containerd/containerd/containerd.service
|
||||
if [[ $selinux == Enforcing ]]; then
|
||||
setenforce 1
|
||||
fi
|
||||
trap cleanup EXIT
|
||||
ctr version
|
||||
critest --parallel=$(nproc) --report-dir="${REPORT_DIR}" --ginkgo.skip='HostIpc is true'
|
||||
SHELL
|
||||
end
|
||||
|
||||
end
|
3584
src/runtime/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
3584
src/runtime/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
@ -1,179 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.containers.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/field_mask.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers";
|
||||
|
||||
// Containers provides metadata storage for containers used in the execution
|
||||
// service.
|
||||
//
|
||||
// The objects here provide an state-independent view of containers for use in
|
||||
// management and resource pinning. From that perspective, containers do not
|
||||
// have a "state" but rather this is the set of resources that will be
|
||||
// considered in use by the container.
|
||||
//
|
||||
// From the perspective of the execution service, these objects represent the
|
||||
// base parameters for creating a container process.
|
||||
//
|
||||
// In general, when looking to add fields for this type, first ask yourself
|
||||
// whether or not the function of the field has to do with runtime execution or
|
||||
// is invariant of the runtime state of the container. If it has to do with
|
||||
// runtime, or changes as the "container" is started and stops, it probably
|
||||
// doesn't belong on this object.
|
||||
service Containers {
|
||||
rpc Get(GetContainerRequest) returns (GetContainerResponse);
|
||||
rpc List(ListContainersRequest) returns (ListContainersResponse);
|
||||
rpc ListStream(ListContainersRequest) returns (stream ListContainerMessage);
|
||||
rpc Create(CreateContainerRequest) returns (CreateContainerResponse);
|
||||
rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse);
|
||||
rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message Container {
|
||||
// ID is the user-specified identifier.
|
||||
//
|
||||
// This field may not be updated.
|
||||
string id = 1;
|
||||
|
||||
// Labels provides an area to include arbitrary data on containers.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
//
|
||||
// Note that to add a new value to this field, read the existing set and
|
||||
// include the entire result in the update call.
|
||||
map<string, string> labels = 2;
|
||||
|
||||
// Image contains the reference of the image used to build the
|
||||
// specification and snapshots for running this container.
|
||||
//
|
||||
// If this field is updated, the spec and rootfs needed to updated, as well.
|
||||
string image = 3;
|
||||
|
||||
message Runtime {
|
||||
// Name is the name of the runtime.
|
||||
string name = 1;
|
||||
// Options specify additional runtime initialization options.
|
||||
google.protobuf.Any options = 2;
|
||||
}
|
||||
// Runtime specifies which runtime to use for executing this container.
|
||||
Runtime runtime = 4;
|
||||
|
||||
// Spec to be used when creating the container. This is runtime specific.
|
||||
google.protobuf.Any spec = 5;
|
||||
|
||||
// Snapshotter specifies the snapshotter name used for rootfs
|
||||
string snapshotter = 6;
|
||||
|
||||
// SnapshotKey specifies the snapshot key to use for the container's root
|
||||
// filesystem. When starting a task from this container, a caller should
|
||||
// look up the mounts from the snapshot service and include those on the
|
||||
// task create request.
|
||||
//
|
||||
// Snapshots referenced in this field will not be garbage collected.
|
||||
//
|
||||
// This field is set to empty when the rootfs is not a snapshot.
|
||||
//
|
||||
// This field may be updated.
|
||||
string snapshot_key = 7;
|
||||
|
||||
// CreatedAt is the time the container was first created.
|
||||
google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// UpdatedAt is the last time the container was mutated.
|
||||
google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// Extensions allow clients to provide zero or more blobs that are directly
|
||||
// associated with the container. One may provide protobuf, json, or other
|
||||
// encoding formats. The primary use of this is to further decorate the
|
||||
// container object with fields that may be specific to a client integration.
|
||||
//
|
||||
// The key portion of this map should identify a "name" for the extension
|
||||
// that should be unique against other extensions. When updating extension
|
||||
// data, one should only update the specified extension using field paths
|
||||
// to select a specific map key.
|
||||
map<string, google.protobuf.Any> extensions = 10 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message GetContainerRequest {
|
||||
string id = 1;
|
||||
}
|
||||
|
||||
message GetContainerResponse {
|
||||
Container container = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ListContainersRequest {
|
||||
// Filters contains one or more filters using the syntax defined in the
|
||||
// containerd filter package.
|
||||
//
|
||||
// The returned result will be those that match any of the provided
|
||||
// filters. Expanded, containers that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message ListContainersResponse {
|
||||
repeated Container containers = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message CreateContainerRequest {
|
||||
Container container = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message CreateContainerResponse {
|
||||
Container container = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// UpdateContainerRequest updates the metadata on one or more container.
|
||||
//
|
||||
// The operation should follow semantics described in
|
||||
// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
|
||||
// unless otherwise qualified.
|
||||
message UpdateContainerRequest {
|
||||
// Container provides the target values, as declared by the mask, for the update.
|
||||
//
|
||||
// The ID field must be set.
|
||||
Container container = 1 [(gogoproto.nullable) = false];
|
||||
|
||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||
// the operation applies to all fields.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
}
|
||||
|
||||
message UpdateContainerResponse {
|
||||
Container container = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message DeleteContainerRequest {
|
||||
string id = 1;
|
||||
}
|
||||
|
||||
message ListContainerMessage {
|
||||
Container container = 1;
|
||||
}
|
5425
src/runtime/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
generated
vendored
5425
src/runtime/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
334
src/runtime/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
generated
vendored
334
src/runtime/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
generated
vendored
@ -1,334 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.content.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/field_mask.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/content/v1;content";
|
||||
|
||||
// Content provides access to a content addressable storage system.
|
||||
service Content {
|
||||
// Info returns information about a committed object.
|
||||
//
|
||||
// This call can be used for getting the size of content and checking for
|
||||
// existence.
|
||||
rpc Info(InfoRequest) returns (InfoResponse);
|
||||
|
||||
// Update updates content metadata.
|
||||
//
|
||||
// This call can be used to manage the mutable content labels. The
|
||||
// immutable metadata such as digest, size, and committed at cannot
|
||||
// be updated.
|
||||
rpc Update(UpdateRequest) returns (UpdateResponse);
|
||||
|
||||
// List streams the entire set of content as Info objects and closes the
|
||||
// stream.
|
||||
//
|
||||
// Typically, this will yield a large response, chunked into messages.
|
||||
// Clients should make provisions to ensure they can handle the entire data
|
||||
// set.
|
||||
rpc List(ListContentRequest) returns (stream ListContentResponse);
|
||||
|
||||
// Delete will delete the referenced object.
|
||||
rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// Read allows one to read an object based on the offset into the content.
|
||||
//
|
||||
// The requested data may be returned in one or more messages.
|
||||
rpc Read(ReadContentRequest) returns (stream ReadContentResponse);
|
||||
|
||||
// Status returns the status for a single reference.
|
||||
rpc Status(StatusRequest) returns (StatusResponse);
|
||||
|
||||
// ListStatuses returns the status of ongoing object ingestions, started via
|
||||
// Write.
|
||||
//
|
||||
// Only those matching the regular expression will be provided in the
|
||||
// response. If the provided regular expression is empty, all ingestions
|
||||
// will be provided.
|
||||
rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);
|
||||
|
||||
// Write begins or resumes writes to a resource identified by a unique ref.
|
||||
// Only one active stream may exist at a time for each ref.
|
||||
//
|
||||
// Once a write stream has started, it may only write to a single ref, thus
|
||||
// once a stream is started, the ref may be omitted on subsequent writes.
|
||||
//
|
||||
// For any write transaction represented by a ref, only a single write may
|
||||
// be made to a given offset. If overlapping writes occur, it is an error.
|
||||
// Writes should be sequential and implementations may throw an error if
|
||||
// this is required.
|
||||
//
|
||||
// If expected_digest is set and already part of the content store, the
|
||||
// write will fail.
|
||||
//
|
||||
// When completed, the commit flag should be set to true. If expected size
|
||||
// or digest is set, the content will be validated against those values.
|
||||
rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);
|
||||
|
||||
// Abort cancels the ongoing write named in the request. Any resources
|
||||
// associated with the write will be collected.
|
||||
rpc Abort(AbortRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message Info {
|
||||
// Digest is the hash identity of the blob.
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
|
||||
// Size is the total number of bytes in the blob.
|
||||
int64 size = 2;
|
||||
|
||||
// CreatedAt provides the time at which the blob was committed.
|
||||
google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// UpdatedAt provides the time the info was last updated.
|
||||
google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// Labels are arbitrary data on snapshots.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
map<string, string> labels = 5;
|
||||
}
|
||||
|
||||
message InfoRequest {
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message InfoResponse {
|
||||
Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message UpdateRequest {
|
||||
Info info = 1 [(gogoproto.nullable) = false];
|
||||
|
||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||
// the operation applies to all fields.
|
||||
//
|
||||
// In info, Digest, Size, and CreatedAt are immutable,
|
||||
// other field may be updated using this mask.
|
||||
// If no mask is provided, all mutable field are updated.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
}
|
||||
|
||||
message UpdateResponse {
|
||||
Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ListContentRequest {
|
||||
// Filters contains one or more filters using the syntax defined in the
|
||||
// containerd filter package.
|
||||
//
|
||||
// The returned result will be those that match any of the provided
|
||||
// filters. Expanded, containers that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message ListContentResponse {
|
||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message DeleteContentRequest {
|
||||
// Digest specifies which content to delete.
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// ReadContentRequest defines the fields that make up a request to read a portion of
|
||||
// data from a stored object.
|
||||
message ReadContentRequest {
|
||||
// Digest is the hash identity to read.
|
||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
|
||||
// Offset specifies the number of bytes from the start at which to begin
|
||||
// the read. If zero or less, the read will be from the start. This uses
|
||||
// standard zero-indexed semantics.
|
||||
int64 offset = 2;
|
||||
|
||||
// size is the total size of the read. If zero, the entire blob will be
|
||||
// returned by the service.
|
||||
int64 size = 3;
|
||||
}
|
||||
|
||||
// ReadContentResponse carries byte data for a read request.
|
||||
message ReadContentResponse {
|
||||
int64 offset = 1; // offset of the returned data
|
||||
bytes data = 2; // actual data
|
||||
}
|
||||
|
||||
message Status {
|
||||
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
string ref = 3;
|
||||
int64 offset = 4;
|
||||
int64 total = 5;
|
||||
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
|
||||
message StatusRequest {
|
||||
string ref = 1;
|
||||
}
|
||||
|
||||
message StatusResponse {
|
||||
Status status = 1;
|
||||
}
|
||||
|
||||
message ListStatusesRequest {
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message ListStatusesResponse {
|
||||
repeated Status statuses = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// WriteAction defines the behavior of a WriteRequest.
|
||||
enum WriteAction {
|
||||
option (gogoproto.goproto_enum_prefix) = false;
|
||||
option (gogoproto.enum_customname) = "WriteAction";
|
||||
|
||||
// WriteActionStat instructs the writer to return the current status while
|
||||
// holding the lock on the write.
|
||||
STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"];
|
||||
|
||||
// WriteActionWrite sets the action for the write request to write data.
|
||||
//
|
||||
// Any data included will be written at the provided offset. The
|
||||
// transaction will be left open for further writes.
|
||||
//
|
||||
// This is the default.
|
||||
WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"];
|
||||
|
||||
// WriteActionCommit will write any outstanding data in the message and
|
||||
// commit the write, storing it under the digest.
|
||||
//
|
||||
// This can be used in a single message to send the data, verify it and
|
||||
// commit it.
|
||||
//
|
||||
// This action will always terminate the write.
|
||||
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
|
||||
}
|
||||
|
||||
// WriteContentRequest writes data to the request ref at offset.
|
||||
message WriteContentRequest {
|
||||
// Action sets the behavior of the write.
|
||||
//
|
||||
// When this is a write and the ref is not yet allocated, the ref will be
|
||||
// allocated and the data will be written at offset.
|
||||
//
|
||||
// If the action is write and the ref is allocated, it will accept data to
|
||||
// an offset that has not yet been written.
|
||||
//
|
||||
// If the action is write and there is no data, the current write status
|
||||
// will be returned. This works differently from status because the stream
|
||||
// holds a lock.
|
||||
WriteAction action = 1;
|
||||
|
||||
// Ref identifies the pre-commit object to write to.
|
||||
string ref = 2;
|
||||
|
||||
// Total can be set to have the service validate the total size of the
|
||||
// committed content.
|
||||
//
|
||||
// The latest value before or with the commit action message will be use to
|
||||
// validate the content. If the offset overflows total, the service may
|
||||
// report an error. It is only required on one message for the write.
|
||||
//
|
||||
// If the value is zero or less, no validation of the final content will be
|
||||
// performed.
|
||||
int64 total = 3;
|
||||
|
||||
// Expected can be set to have the service validate the final content against
|
||||
// the provided digest.
|
||||
//
|
||||
// If the digest is already present in the object store, an AlreadyExists
|
||||
// error will be returned.
|
||||
//
|
||||
// Only the latest version will be used to check the content against the
|
||||
// digest. It is only required to include it on a single message, before or
|
||||
// with the commit action message.
|
||||
string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
|
||||
// Offset specifies the number of bytes from the start at which to begin
|
||||
// the write. For most implementations, this means from the start of the
|
||||
// file. This uses standard, zero-indexed semantics.
|
||||
//
|
||||
// If the action is write, the remote may remove all previously written
|
||||
// data after the offset. Implementations may support arbitrary offsets but
|
||||
// MUST support reseting this value to zero with a write. If an
|
||||
// implementation does not support a write at a particular offset, an
|
||||
// OutOfRange error must be returned.
|
||||
int64 offset = 5;
|
||||
|
||||
// Data is the actual bytes to be written.
|
||||
//
|
||||
// If this is empty and the message is not a commit, a response will be
|
||||
// returned with the current write state.
|
||||
bytes data = 6;
|
||||
|
||||
// Labels are arbitrary data on snapshots.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
map<string, string> labels = 7;
|
||||
}
|
||||
|
||||
// WriteContentResponse is returned on the culmination of a write call.
|
||||
message WriteContentResponse {
|
||||
// Action contains the action for the final message of the stream. A writer
|
||||
// should confirm that they match the intended result.
|
||||
WriteAction action = 1;
|
||||
|
||||
// StartedAt provides the time at which the write began.
|
||||
//
|
||||
// This must be set for stat and commit write actions. All other write
|
||||
// actions may omit this.
|
||||
google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// UpdatedAt provides the last time of a successful write.
|
||||
//
|
||||
// This must be set for stat and commit write actions. All other write
|
||||
// actions may omit this.
|
||||
google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// Offset is the current committed size for the write.
|
||||
int64 offset = 4;
|
||||
|
||||
// Total provides the current, expected total size of the write.
|
||||
//
|
||||
// We include this to provide consistency with the Status structure on the
|
||||
// client writer.
|
||||
//
|
||||
// This is only valid on the Stat and Commit response.
|
||||
int64 total = 5;
|
||||
|
||||
// Digest, if present, includes the digest up to the currently committed
|
||||
// bytes. If action is commit, this field will be set. It is implementation
|
||||
// defined if this is set for other actions.
|
||||
string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message AbortRequest {
|
||||
string ref = 1;
|
||||
}
|
1666
src/runtime/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
generated
vendored
1666
src/runtime/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
81
src/runtime/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
generated
vendored
81
src/runtime/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
generated
vendored
@ -1,81 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.diff.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/diff/v1;diff";
|
||||
|
||||
// Diff service creates and applies diffs
|
||||
service Diff {
|
||||
// Apply applies the content associated with the provided digests onto
|
||||
// the provided mounts. Archive content will be extracted and
|
||||
// decompressed if necessary.
|
||||
rpc Apply(ApplyRequest) returns (ApplyResponse);
|
||||
|
||||
// Diff creates a diff between the given mounts and uploads the result
|
||||
// to the content store.
|
||||
rpc Diff(DiffRequest) returns (DiffResponse);
|
||||
}
|
||||
|
||||
message ApplyRequest {
|
||||
// Diff is the descriptor of the diff to be extracted
|
||||
containerd.types.Descriptor diff = 1;
|
||||
|
||||
repeated containerd.types.Mount mounts = 2;
|
||||
|
||||
map<string, google.protobuf.Any> payloads = 3;
|
||||
}
|
||||
|
||||
message ApplyResponse {
|
||||
// Applied is the descriptor for the object which was applied.
|
||||
// If the input was a compressed blob then the result will be
|
||||
// the descriptor for the uncompressed blob.
|
||||
containerd.types.Descriptor applied = 1;
|
||||
}
|
||||
|
||||
message DiffRequest {
|
||||
// Left are the mounts which represent the older copy
|
||||
// in which is the base of the computed changes.
|
||||
repeated containerd.types.Mount left = 1;
|
||||
|
||||
// Right are the mounts which represents the newer copy
|
||||
// in which changes from the left were made into.
|
||||
repeated containerd.types.Mount right = 2;
|
||||
|
||||
// MediaType is the media type descriptor for the created diff
|
||||
// object
|
||||
string media_type = 3;
|
||||
|
||||
// Ref identifies the pre-commit content store object. This
|
||||
// reference can be used to get the status from the content store.
|
||||
string ref = 4;
|
||||
|
||||
// Labels are the labels to apply to the generated content
|
||||
// on content store commit.
|
||||
map<string, string> labels = 5;
|
||||
}
|
||||
|
||||
message DiffResponse {
|
||||
// Diff is the descriptor of the diff which can be applied
|
||||
containerd.types.Descriptor diff = 3;
|
||||
}
|
18
src/runtime/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go
generated
vendored
18
src/runtime/vendor/github.com/containerd/containerd/api/services/events/v1/doc.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package events defines the event pushing and subscription service.
|
||||
package events
|
1372
src/runtime/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
1372
src/runtime/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
72
src/runtime/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
generated
vendored
72
src/runtime/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
generated
vendored
@ -1,72 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.events.v1;
|
||||
|
||||
import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
|
||||
|
||||
service Events {
|
||||
// Publish an event to a topic.
|
||||
//
|
||||
// The event will be packed into a timestamp envelope with the namespace
|
||||
// introspected from the context. The envelope will then be dispatched.
|
||||
rpc Publish(PublishRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// Forward sends an event that has already been packaged into an envelope
|
||||
// with a timestamp and namespace.
|
||||
//
|
||||
// This is useful if earlier timestamping is required or when forwarding on
|
||||
// behalf of another component, namespace or publisher.
|
||||
rpc Forward(ForwardRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// Subscribe to a stream of events, possibly returning only that match any
|
||||
// of the provided filters.
|
||||
//
|
||||
// Unlike many other methods in containerd, subscribers will get messages
|
||||
// from all namespaces unless otherwise specified. If this is not desired,
|
||||
// a filter can be provided in the format 'namespace==<namespace>' to
|
||||
// restrict the received events.
|
||||
rpc Subscribe(SubscribeRequest) returns (stream Envelope);
|
||||
}
|
||||
|
||||
message PublishRequest {
|
||||
string topic = 1;
|
||||
google.protobuf.Any event = 2;
|
||||
}
|
||||
|
||||
message ForwardRequest {
|
||||
Envelope envelope = 1;
|
||||
}
|
||||
|
||||
message SubscribeRequest {
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message Envelope {
|
||||
option (containerd.plugin.fieldpath) = true;
|
||||
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
string namespace = 2;
|
||||
string topic = 3;
|
||||
google.protobuf.Any event = 4;
|
||||
}
|
17
src/runtime/vendor/github.com/containerd/containerd/api/services/images/v1/docs.go
generated
vendored
17
src/runtime/vendor/github.com/containerd/containerd/api/services/images/v1/docs.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
2738
src/runtime/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
2738
src/runtime/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
140
src/runtime/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
generated
vendored
140
src/runtime/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
generated
vendored
@ -1,140 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.images.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/field_mask.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/images/v1;images";
|
||||
|
||||
// Images is a service that allows one to register images with containerd.
|
||||
//
|
||||
// In containerd, an image is merely the mapping of a name to a content root,
|
||||
// described by a descriptor. The behavior and state of image is purely
|
||||
// dictated by the type of the descriptor.
|
||||
//
|
||||
// From the perspective of this service, these references are mostly shallow,
|
||||
// in that the existence of the required content won't be validated until
|
||||
// required by consuming services.
|
||||
//
|
||||
// As such, this can really be considered a "metadata service".
|
||||
service Images {
|
||||
// Get returns an image by name.
|
||||
rpc Get(GetImageRequest) returns (GetImageResponse);
|
||||
|
||||
// List returns a list of all images known to containerd.
|
||||
rpc List(ListImagesRequest) returns (ListImagesResponse);
|
||||
|
||||
// Create an image record in the metadata store.
|
||||
//
|
||||
// The name of the image must be unique.
|
||||
rpc Create(CreateImageRequest) returns (CreateImageResponse);
|
||||
|
||||
// Update assigns the name to a given target image based on the provided
|
||||
// image.
|
||||
rpc Update(UpdateImageRequest) returns (UpdateImageResponse);
|
||||
|
||||
// Delete deletes the image by name.
|
||||
rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message Image {
|
||||
// Name provides a unique name for the image.
|
||||
//
|
||||
// Containerd treats this as the primary identifier.
|
||||
string name = 1;
|
||||
|
||||
// Labels provides free form labels for the image. These are runtime only
|
||||
// and do not get inherited into the package image in any way.
|
||||
//
|
||||
// Labels may be updated using the field mask.
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
map<string, string> labels = 2;
|
||||
|
||||
// Target describes the content entry point of the image.
|
||||
containerd.types.Descriptor target = 3 [(gogoproto.nullable) = false];
|
||||
|
||||
// CreatedAt is the time the image was first created.
|
||||
google.protobuf.Timestamp created_at = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// UpdatedAt is the last time the image was mutated.
|
||||
google.protobuf.Timestamp updated_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message GetImageRequest {
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message GetImageResponse {
|
||||
Image image = 1;
|
||||
}
|
||||
|
||||
message CreateImageRequest {
|
||||
Image image = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message CreateImageResponse {
|
||||
Image image = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message UpdateImageRequest {
|
||||
// Image provides a full or partial image for update.
|
||||
//
|
||||
// The name field must be set or an error will be returned.
|
||||
Image image = 1 [(gogoproto.nullable) = false];
|
||||
|
||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||
// the operation applies to all fields.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
}
|
||||
|
||||
message UpdateImageResponse {
|
||||
Image image = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ListImagesRequest {
|
||||
// Filters contains one or more filters using the syntax defined in the
|
||||
// containerd filter package.
|
||||
//
|
||||
// The returned result will be those that match any of the provided
|
||||
// filters. Expanded, images that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message ListImagesResponse {
|
||||
repeated Image images = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message DeleteImageRequest {
|
||||
string name = 1;
|
||||
|
||||
// Sync indicates that the delete and cleanup should be done
|
||||
// synchronously before returning to the caller
|
||||
//
|
||||
// Default is false
|
||||
bool sync = 2;
|
||||
}
|
17
src/runtime/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go
generated
vendored
17
src/runtime/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package introspection
|
File diff suppressed because it is too large
Load Diff
@ -1,104 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.introspection.v1;
|
||||
|
||||
import "github.com/containerd/containerd/api/types/platform.proto";
|
||||
import "google/rpc/status.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import weak "gogoproto/gogo.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection";
|
||||
|
||||
service Introspection {
|
||||
// Plugins returns a list of plugins in containerd.
|
||||
//
|
||||
// Clients can use this to detect features and capabilities when using
|
||||
// containerd.
|
||||
rpc Plugins(PluginsRequest) returns (PluginsResponse);
|
||||
// Server returns information about the containerd server
|
||||
rpc Server(google.protobuf.Empty) returns (ServerResponse);
|
||||
}
|
||||
|
||||
message Plugin {
|
||||
// Type defines the type of plugin.
|
||||
//
|
||||
// See package plugin for a list of possible values. Non core plugins may
|
||||
// define their own values during registration.
|
||||
string type = 1;
|
||||
|
||||
// ID identifies the plugin uniquely in the system.
|
||||
string id = 2;
|
||||
|
||||
// Requires lists the plugin types required by this plugin.
|
||||
repeated string requires = 3;
|
||||
|
||||
// Platforms enumerates the platforms this plugin will support.
|
||||
//
|
||||
// If values are provided here, the plugin will only be operable under the
|
||||
// provided platforms.
|
||||
//
|
||||
// If this is empty, the plugin will work across all platforms.
|
||||
//
|
||||
// If the plugin prefers certain platforms over others, they should be
|
||||
// listed from most to least preferred.
|
||||
repeated types.Platform platforms = 4 [(gogoproto.nullable) = false];
|
||||
|
||||
// Exports allows plugins to provide values about state or configuration to
|
||||
// interested parties.
|
||||
//
|
||||
// One example is exposing the configured path of a snapshotter plugin.
|
||||
map<string, string> exports = 5;
|
||||
|
||||
// Capabilities allows plugins to communicate feature switches to allow
|
||||
// clients to detect features that may not be on be default or may be
|
||||
// different from version to version.
|
||||
//
|
||||
// Use this sparingly.
|
||||
repeated string capabilities = 6;
|
||||
|
||||
// InitErr will be set if the plugin fails initialization.
|
||||
//
|
||||
// This means the plugin may have been registered but a non-terminal error
|
||||
// was encountered during initialization.
|
||||
//
|
||||
// Plugins that have this value set cannot be used.
|
||||
google.rpc.Status init_err = 7;
|
||||
}
|
||||
|
||||
message PluginsRequest {
|
||||
// Filters contains one or more filters using the syntax defined in the
|
||||
// containerd filter package.
|
||||
//
|
||||
// The returned result will be those that match any of the provided
|
||||
// filters. Expanded, plugins that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message PluginsResponse {
|
||||
repeated Plugin plugins = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ServerResponse {
|
||||
string uuid = 1 [(gogoproto.customname) = "UUID"];
|
||||
}
|
17
src/runtime/vendor/github.com/containerd/containerd/api/services/leases/v1/doc.go
generated
vendored
17
src/runtime/vendor/github.com/containerd/containerd/api/services/leases/v1/doc.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package leases
|
3108
src/runtime/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
generated
vendored
3108
src/runtime/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
117
src/runtime/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto
generated
vendored
117
src/runtime/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto
generated
vendored
@ -1,117 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.leases.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/leases/v1;leases";
|
||||
|
||||
// Leases service manages resources leases within the metadata store.
|
||||
service Leases {
|
||||
// Create creates a new lease for managing changes to metadata. A lease
|
||||
// can be used to protect objects from being removed.
|
||||
rpc Create(CreateRequest) returns (CreateResponse);
|
||||
|
||||
// Delete deletes the lease and makes any unreferenced objects created
|
||||
// during the lease eligible for garbage collection if not referenced
|
||||
// or retained by other resources during the lease.
|
||||
rpc Delete(DeleteRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// List lists all active leases, returning the full list of
|
||||
// leases and optionally including the referenced resources.
|
||||
rpc List(ListRequest) returns (ListResponse);
|
||||
|
||||
// AddResource references the resource by the provided lease.
|
||||
rpc AddResource(AddResourceRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// DeleteResource dereferences the resource by the provided lease.
|
||||
rpc DeleteResource(DeleteResourceRequest) returns (google.protobuf.Empty);
|
||||
|
||||
// ListResources lists all the resources referenced by the lease.
|
||||
rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse);
|
||||
}
|
||||
|
||||
// Lease is an object which retains resources while it exists.
|
||||
message Lease {
|
||||
string id = 1;
|
||||
|
||||
google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
map<string, string> labels = 3;
|
||||
}
|
||||
|
||||
message CreateRequest {
|
||||
// ID is used to identity the lease, when the id is not set the service
|
||||
// generates a random identifier for the lease.
|
||||
string id = 1;
|
||||
|
||||
map<string, string> labels = 3;
|
||||
}
|
||||
|
||||
message CreateResponse {
|
||||
Lease lease = 1;
|
||||
}
|
||||
|
||||
message DeleteRequest {
|
||||
string id = 1;
|
||||
|
||||
// Sync indicates that the delete and cleanup should be done
|
||||
// synchronously before returning to the caller
|
||||
//
|
||||
// Default is false
|
||||
bool sync = 2;
|
||||
}
|
||||
|
||||
message ListRequest {
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message ListResponse {
|
||||
repeated Lease leases = 1;
|
||||
}
|
||||
|
||||
message Resource {
|
||||
string id = 1;
|
||||
|
||||
// For snapshotter resource, there are many snapshotter types here, like
|
||||
// overlayfs, devmapper etc. The type will be formatted with type,
|
||||
// like "snapshotter/overlayfs".
|
||||
string type = 2;
|
||||
}
|
||||
|
||||
message AddResourceRequest {
|
||||
string id = 1;
|
||||
|
||||
Resource resource = 2 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message DeleteResourceRequest {
|
||||
string id = 1;
|
||||
|
||||
Resource resource = 2 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ListResourcesRequest {
|
||||
string id = 1;
|
||||
}
|
||||
|
||||
message ListResourcesResponse {
|
||||
repeated Resource resources = 1 [(gogoproto.nullable) = false];
|
||||
}
|
2518
src/runtime/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
generated
vendored
2518
src/runtime/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
@ -1,108 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.namespaces.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/field_mask.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/namespaces/v1;namespaces";
|
||||
|
||||
// Namespaces provides the ability to manipulate containerd namespaces.
|
||||
//
|
||||
// All objects in the system are required to be a member of a namespace. If a
|
||||
// namespace is deleted, all objects, including containers, images and
|
||||
// snapshots, will be deleted, as well.
|
||||
//
|
||||
// Unless otherwise noted, operations in containerd apply only to the namespace
|
||||
// supplied per request.
|
||||
//
|
||||
// I hope this goes without saying, but namespaces are themselves NOT
|
||||
// namespaced.
|
||||
service Namespaces {
|
||||
rpc Get(GetNamespaceRequest) returns (GetNamespaceResponse);
|
||||
rpc List(ListNamespacesRequest) returns (ListNamespacesResponse);
|
||||
rpc Create(CreateNamespaceRequest) returns (CreateNamespaceResponse);
|
||||
rpc Update(UpdateNamespaceRequest) returns (UpdateNamespaceResponse);
|
||||
rpc Delete(DeleteNamespaceRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message Namespace {
|
||||
string name = 1;
|
||||
|
||||
// Labels provides an area to include arbitrary data on namespaces.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
//
|
||||
// Note that to add a new value to this field, read the existing set and
|
||||
// include the entire result in the update call.
|
||||
map<string, string> labels = 2;
|
||||
}
|
||||
|
||||
message GetNamespaceRequest {
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message GetNamespaceResponse {
|
||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ListNamespacesRequest {
|
||||
string filter = 1;
|
||||
}
|
||||
|
||||
message ListNamespacesResponse {
|
||||
repeated Namespace namespaces = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message CreateNamespaceRequest {
|
||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message CreateNamespaceResponse {
|
||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// UpdateNamespaceRequest updates the metadata for a namespace.
|
||||
//
|
||||
// The operation should follow semantics described in
|
||||
// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
|
||||
// unless otherwise qualified.
|
||||
message UpdateNamespaceRequest {
|
||||
// Namespace provides the target value, as declared by the mask, for the update.
|
||||
//
|
||||
// The namespace field must be set.
|
||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
||||
|
||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||
// the operation applies to all fields.
|
||||
//
|
||||
// For the most part, this applies only to selectively updating labels on
|
||||
// the namespace. While field masks are typically limited to ascii alphas
|
||||
// and digits, we just take everything after the "labels." as the map key.
|
||||
google.protobuf.FieldMask update_mask = 2;
|
||||
}
|
||||
|
||||
message UpdateNamespaceResponse {
|
||||
Namespace namespace = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message DeleteNamespaceRequest {
|
||||
string name = 1;
|
||||
}
|
5541
src/runtime/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
generated
vendored
5541
src/runtime/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
@ -1,183 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.snapshots.v1;
|
||||
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/field_mask.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/snapshots/v1;snapshots";
|
||||
|
||||
// Snapshot service manages snapshots
|
||||
service Snapshots {
|
||||
rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);
|
||||
rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);
|
||||
rpc Mounts(MountsRequest) returns (MountsResponse);
|
||||
rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);
|
||||
rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);
|
||||
rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);
|
||||
rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse);
|
||||
rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);
|
||||
rpc Usage(UsageRequest) returns (UsageResponse);
|
||||
rpc Cleanup(CleanupRequest) returns (google.protobuf.Empty);
|
||||
}
|
||||
|
||||
message PrepareSnapshotRequest {
|
||||
string snapshotter = 1;
|
||||
string key = 2;
|
||||
string parent = 3;
|
||||
|
||||
// Labels are arbitrary data on snapshots.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
map<string, string> labels = 4;
|
||||
}
|
||||
|
||||
message PrepareSnapshotResponse {
|
||||
repeated containerd.types.Mount mounts = 1;
|
||||
}
|
||||
|
||||
message ViewSnapshotRequest {
|
||||
string snapshotter = 1;
|
||||
string key = 2;
|
||||
string parent = 3;
|
||||
|
||||
// Labels are arbitrary data on snapshots.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
map<string, string> labels = 4;
|
||||
}
|
||||
|
||||
message ViewSnapshotResponse {
|
||||
repeated containerd.types.Mount mounts = 1;
|
||||
}
|
||||
|
||||
message MountsRequest {
|
||||
string snapshotter = 1;
|
||||
string key = 2;
|
||||
}
|
||||
|
||||
message MountsResponse {
|
||||
repeated containerd.types.Mount mounts = 1;
|
||||
}
|
||||
|
||||
message RemoveSnapshotRequest {
|
||||
string snapshotter = 1;
|
||||
string key = 2;
|
||||
}
|
||||
|
||||
message CommitSnapshotRequest {
|
||||
string snapshotter = 1;
|
||||
string name = 2;
|
||||
string key = 3;
|
||||
|
||||
// Labels are arbitrary data on snapshots.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
map<string, string> labels = 4;
|
||||
}
|
||||
|
||||
message StatSnapshotRequest {
|
||||
string snapshotter = 1;
|
||||
string key = 2;
|
||||
}
|
||||
|
||||
enum Kind {
|
||||
option (gogoproto.goproto_enum_prefix) = false;
|
||||
option (gogoproto.enum_customname) = "Kind";
|
||||
|
||||
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"];
|
||||
VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"];
|
||||
ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"];
|
||||
COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"];
|
||||
}
|
||||
|
||||
message Info {
|
||||
string name = 1;
|
||||
string parent = 2;
|
||||
Kind kind = 3;
|
||||
|
||||
// CreatedAt provides the time at which the snapshot was created.
|
||||
google.protobuf.Timestamp created_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// UpdatedAt provides the time the info was last updated.
|
||||
google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
||||
// Labels are arbitrary data on snapshots.
|
||||
//
|
||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
||||
map<string, string> labels = 6;
|
||||
}
|
||||
|
||||
message StatSnapshotResponse {
|
||||
Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message UpdateSnapshotRequest {
|
||||
string snapshotter = 1;
|
||||
Info info = 2 [(gogoproto.nullable) = false];
|
||||
|
||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
||||
// the operation applies to all fields.
|
||||
//
|
||||
// In info, Name, Parent, Kind, Created are immutable,
|
||||
// other field may be updated using this mask.
|
||||
// If no mask is provided, all mutable field are updated.
|
||||
google.protobuf.FieldMask update_mask = 3;
|
||||
}
|
||||
|
||||
message UpdateSnapshotResponse {
|
||||
Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message ListSnapshotsRequest{
|
||||
string snapshotter = 1;
|
||||
|
||||
// Filters contains one or more filters using the syntax defined in the
|
||||
// containerd filter package.
|
||||
//
|
||||
// The returned result will be those that match any of the provided
|
||||
// filters. Expanded, images that match the following will be
|
||||
// returned:
|
||||
//
|
||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
||||
//
|
||||
// If filters is zero-length or nil, all items will be returned.
|
||||
repeated string filters = 2;
|
||||
}
|
||||
|
||||
message ListSnapshotsResponse {
|
||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message UsageRequest {
|
||||
string snapshotter = 1;
|
||||
string key = 2;
|
||||
}
|
||||
|
||||
message UsageResponse {
|
||||
int64 size = 1;
|
||||
int64 inodes = 2;
|
||||
}
|
||||
|
||||
message CleanupRequest {
|
||||
string snapshotter = 1;
|
||||
}
|
7464
src/runtime/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
generated
vendored
7464
src/runtime/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
226
src/runtime/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
generated
vendored
226
src/runtime/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
generated
vendored
@ -1,226 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.tasks.v1;
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import weak "gogoproto/gogo.proto";
|
||||
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||
import "github.com/containerd/containerd/api/types/metrics.proto";
|
||||
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||
import "github.com/containerd/containerd/api/types/task/task.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks";
|
||||
|
||||
service Tasks {
|
||||
// Create a task.
|
||||
rpc Create(CreateTaskRequest) returns (CreateTaskResponse);
|
||||
|
||||
// Start a process.
|
||||
rpc Start(StartRequest) returns (StartResponse);
|
||||
|
||||
// Delete a task and on disk state.
|
||||
rpc Delete(DeleteTaskRequest) returns (DeleteResponse);
|
||||
|
||||
rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
|
||||
|
||||
rpc Get(GetRequest) returns (GetResponse);
|
||||
|
||||
rpc List(ListTasksRequest) returns (ListTasksResponse);
|
||||
|
||||
// Kill a task or process.
|
||||
rpc Kill(KillRequest) returns (google.protobuf.Empty);
|
||||
|
||||
rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);
|
||||
|
||||
rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);
|
||||
|
||||
rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);
|
||||
|
||||
rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty);
|
||||
|
||||
rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty);
|
||||
|
||||
rpc ListPids(ListPidsRequest) returns (ListPidsResponse);
|
||||
|
||||
rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse);
|
||||
|
||||
rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);
|
||||
|
||||
rpc Metrics(MetricsRequest) returns (MetricsResponse);
|
||||
|
||||
rpc Wait(WaitRequest) returns (WaitResponse);
|
||||
}
|
||||
|
||||
message CreateTaskRequest {
|
||||
string container_id = 1;
|
||||
|
||||
// RootFS provides the pre-chroot mounts to perform in the shim before
|
||||
// executing the container task.
|
||||
//
|
||||
// These are for mounts that cannot be performed in the user namespace.
|
||||
// Typically, these mounts should be resolved from snapshots specified on
|
||||
// the container object.
|
||||
repeated containerd.types.Mount rootfs = 3;
|
||||
|
||||
string stdin = 4;
|
||||
string stdout = 5;
|
||||
string stderr = 6;
|
||||
bool terminal = 7;
|
||||
|
||||
containerd.types.Descriptor checkpoint = 8;
|
||||
|
||||
google.protobuf.Any options = 9;
|
||||
}
|
||||
|
||||
message CreateTaskResponse {
|
||||
string container_id = 1;
|
||||
uint32 pid = 2;
|
||||
}
|
||||
|
||||
message StartRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
}
|
||||
|
||||
message StartResponse {
|
||||
uint32 pid = 1;
|
||||
}
|
||||
|
||||
message DeleteTaskRequest {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message DeleteResponse {
|
||||
string id = 1;
|
||||
uint32 pid = 2;
|
||||
uint32 exit_status = 3;
|
||||
google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message DeleteProcessRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
}
|
||||
|
||||
message GetRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
}
|
||||
|
||||
message GetResponse {
|
||||
containerd.v1.types.Process process = 1;
|
||||
}
|
||||
|
||||
message ListTasksRequest {
|
||||
string filter = 1;
|
||||
}
|
||||
|
||||
message ListTasksResponse {
|
||||
repeated containerd.v1.types.Process tasks = 1;
|
||||
}
|
||||
|
||||
message KillRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
uint32 signal = 3;
|
||||
bool all = 4;
|
||||
}
|
||||
|
||||
message ExecProcessRequest {
|
||||
string container_id = 1;
|
||||
string stdin = 2;
|
||||
string stdout = 3;
|
||||
string stderr = 4;
|
||||
bool terminal = 5;
|
||||
// Spec for starting a process in the target container.
|
||||
//
|
||||
// For runc, this is a process spec, for example.
|
||||
google.protobuf.Any spec = 6;
|
||||
// id of the exec process
|
||||
string exec_id = 7;
|
||||
}
|
||||
|
||||
message ExecProcessResponse {
|
||||
}
|
||||
|
||||
message ResizePtyRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
uint32 width = 3;
|
||||
uint32 height = 4;
|
||||
}
|
||||
|
||||
message CloseIORequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
bool stdin = 3;
|
||||
}
|
||||
|
||||
message PauseTaskRequest {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message ResumeTaskRequest {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message ListPidsRequest {
|
||||
string container_id = 1;
|
||||
}
|
||||
|
||||
message ListPidsResponse {
|
||||
// Processes includes the process ID and additional process information
|
||||
repeated containerd.v1.types.ProcessInfo processes = 1;
|
||||
}
|
||||
|
||||
message CheckpointTaskRequest {
|
||||
string container_id = 1;
|
||||
string parent_checkpoint = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
google.protobuf.Any options = 3;
|
||||
}
|
||||
|
||||
message CheckpointTaskResponse {
|
||||
repeated containerd.types.Descriptor descriptors = 1;
|
||||
}
|
||||
|
||||
message UpdateTaskRequest {
|
||||
string container_id = 1;
|
||||
google.protobuf.Any resources = 2;
|
||||
map<string, string> annotations = 3;
|
||||
}
|
||||
|
||||
message MetricsRequest {
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message MetricsResponse {
|
||||
repeated types.Metric metrics = 1;
|
||||
}
|
||||
|
||||
message WaitRequest {
|
||||
string container_id = 1;
|
||||
string exec_id = 2;
|
||||
}
|
||||
|
||||
message WaitResponse {
|
||||
uint32 exit_status = 1;
|
||||
google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
}
|
476
src/runtime/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
generated
vendored
476
src/runtime/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
generated
vendored
@ -1,476 +0,0 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/version/v1/version.proto
|
||||
|
||||
package version
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
types "github.com/gogo/protobuf/types"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type VersionResponse struct {
|
||||
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
|
||||
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *VersionResponse) Reset() { *m = VersionResponse{} }
|
||||
func (*VersionResponse) ProtoMessage() {}
|
||||
func (*VersionResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_128109001e578ffe, []int{0}
|
||||
}
|
||||
func (m *VersionResponse) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *VersionResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_VersionResponse.Merge(m, src)
|
||||
}
|
||||
func (m *VersionResponse) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *VersionResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_VersionResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_VersionResponse proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptor_128109001e578ffe)
|
||||
}
|
||||
|
||||
var fileDescriptor_128109001e578ffe = []byte{
|
||||
// 243 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
|
||||
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
|
||||
0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
|
||||
0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
|
||||
0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
|
||||
0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
|
||||
0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
|
||||
0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
|
||||
0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
|
||||
0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
|
||||
0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
|
||||
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
|
||||
0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
|
||||
0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// VersionClient is the client API for Version service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type VersionClient interface {
|
||||
Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
|
||||
}
|
||||
|
||||
type versionClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewVersionClient(cc *grpc.ClientConn) VersionClient {
|
||||
return &versionClient{cc}
|
||||
}
|
||||
|
||||
func (c *versionClient) Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
|
||||
out := new(VersionResponse)
|
||||
err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// VersionServer is the server API for Version service.
|
||||
type VersionServer interface {
|
||||
Version(context.Context, *types.Empty) (*VersionResponse, error)
|
||||
}
|
||||
|
||||
// UnimplementedVersionServer can be embedded to have forward compatible implementations.
|
||||
type UnimplementedVersionServer struct {
|
||||
}
|
||||
|
||||
func (*UnimplementedVersionServer) Version(ctx context.Context, req *types.Empty) (*VersionResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Version not implemented")
|
||||
}
|
||||
|
||||
func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
|
||||
s.RegisterService(&_Version_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(types.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(VersionServer).Version(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/containerd.services.version.v1.Version/Version",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(VersionServer).Version(ctx, req.(*types.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _Version_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "containerd.services.version.v1.Version",
|
||||
HandlerType: (*VersionServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Version",
|
||||
Handler: _Version_Version_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto",
|
||||
}
|
||||
|
||||
func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *VersionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.Revision) > 0 {
|
||||
i -= len(m.Revision)
|
||||
copy(dAtA[i:], m.Revision)
|
||||
i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.Version) > 0 {
|
||||
i -= len(m.Version)
|
||||
copy(dAtA[i:], m.Version)
|
||||
i = encodeVarintVersion(dAtA, i, uint64(len(m.Version)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintVersion(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovVersion(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *VersionResponse) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Version)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovVersion(uint64(l))
|
||||
}
|
||||
l = len(m.Revision)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovVersion(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovVersion(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozVersion(x uint64) (n int) {
|
||||
return sovVersion(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *VersionResponse) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&VersionResponse{`,
|
||||
`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
|
||||
`Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringVersion(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *VersionResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowVersion
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: VersionResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: VersionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowVersion
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Version = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowVersion
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Revision = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipVersion(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthVersion
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipVersion(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowVersion
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowVersion
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowVersion
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthVersion
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupVersion
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthVersion
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupVersion = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
@ -1,34 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package containerd.services.version.v1;
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
import weak "gogoproto/gogo.proto";
|
||||
|
||||
// TODO(stevvooe): Should version service actually be versioned?
|
||||
option go_package = "github.com/containerd/containerd/api/services/version/v1;version";
|
||||
|
||||
service Version {
|
||||
rpc Version(google.protobuf.Empty) returns (VersionResponse);
|
||||
}
|
||||
|
||||
message VersionResponse {
|
||||
string version = 1;
|
||||
string revision = 2;
|
||||
}
|
287
src/runtime/vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
287
src/runtime/vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
@ -1,287 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compression
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
)
|
||||
|
||||
type (
|
||||
// Compression is the state represents if compressed or not.
|
||||
Compression int
|
||||
)
|
||||
|
||||
const (
|
||||
// Uncompressed represents the uncompressed.
|
||||
Uncompressed Compression = iota
|
||||
// Gzip is gzip compression algorithm.
|
||||
Gzip
|
||||
// Zstd is zstd compression algorithm.
|
||||
Zstd
|
||||
)
|
||||
|
||||
const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ"
|
||||
|
||||
var (
|
||||
initPigz sync.Once
|
||||
unpigzPath string
|
||||
)
|
||||
|
||||
var (
|
||||
bufioReader32KPool = &sync.Pool{
|
||||
New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
|
||||
}
|
||||
)
|
||||
|
||||
// DecompressReadCloser include the stream after decompress and the compress method detected.
|
||||
type DecompressReadCloser interface {
|
||||
io.ReadCloser
|
||||
// GetCompression returns the compress method which is used before decompressing
|
||||
GetCompression() Compression
|
||||
}
|
||||
|
||||
type readCloserWrapper struct {
|
||||
io.Reader
|
||||
compression Compression
|
||||
closer func() error
|
||||
}
|
||||
|
||||
func (r *readCloserWrapper) Close() error {
|
||||
if r.closer != nil {
|
||||
return r.closer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *readCloserWrapper) GetCompression() Compression {
|
||||
return r.compression
|
||||
}
|
||||
|
||||
type writeCloserWrapper struct {
|
||||
io.Writer
|
||||
closer func() error
|
||||
}
|
||||
|
||||
func (w *writeCloserWrapper) Close() error {
|
||||
if w.closer != nil {
|
||||
w.closer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type bufferedReader struct {
|
||||
buf *bufio.Reader
|
||||
}
|
||||
|
||||
func newBufferedReader(r io.Reader) *bufferedReader {
|
||||
buf := bufioReader32KPool.Get().(*bufio.Reader)
|
||||
buf.Reset(r)
|
||||
return &bufferedReader{buf}
|
||||
}
|
||||
|
||||
func (r *bufferedReader) Read(p []byte) (n int, err error) {
|
||||
if r.buf == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
n, err = r.buf.Read(p)
|
||||
if err == io.EOF {
|
||||
r.buf.Reset(nil)
|
||||
bufioReader32KPool.Put(r.buf)
|
||||
r.buf = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *bufferedReader) Peek(n int) ([]byte, error) {
|
||||
if r.buf == nil {
|
||||
return nil, io.EOF
|
||||
}
|
||||
return r.buf.Peek(n)
|
||||
}
|
||||
|
||||
// DetectCompression detects the compression algorithm of the source.
|
||||
func DetectCompression(source []byte) Compression {
|
||||
for compression, m := range map[Compression][]byte{
|
||||
Gzip: {0x1F, 0x8B, 0x08},
|
||||
Zstd: {0x28, 0xb5, 0x2f, 0xfd},
|
||||
} {
|
||||
if len(source) < len(m) {
|
||||
// Len too short
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(m, source[:len(m)]) {
|
||||
return compression
|
||||
}
|
||||
}
|
||||
return Uncompressed
|
||||
}
|
||||
|
||||
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
|
||||
func DecompressStream(archive io.Reader) (DecompressReadCloser, error) {
|
||||
buf := newBufferedReader(archive)
|
||||
bs, err := buf.Peek(10)
|
||||
if err != nil && err != io.EOF {
|
||||
// Note: we'll ignore any io.EOF error because there are some odd
|
||||
// cases where the layer.tar file will be empty (zero bytes) and
|
||||
// that results in an io.EOF from the Peek() call. So, in those
|
||||
// cases we'll just treat it as a non-compressed stream and
|
||||
// that means just create an empty layer.
|
||||
// See Issue docker/docker#18170
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch compression := DetectCompression(bs); compression {
|
||||
case Uncompressed:
|
||||
return &readCloserWrapper{
|
||||
Reader: buf,
|
||||
compression: compression,
|
||||
}, nil
|
||||
case Gzip:
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
gzReader, err := gzipDecompress(ctx, buf)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &readCloserWrapper{
|
||||
Reader: gzReader,
|
||||
compression: compression,
|
||||
closer: func() error {
|
||||
cancel()
|
||||
return gzReader.Close()
|
||||
},
|
||||
}, nil
|
||||
case Zstd:
|
||||
zstdReader, err := zstd.NewReader(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &readCloserWrapper{
|
||||
Reader: zstdReader,
|
||||
compression: compression,
|
||||
closer: func() error {
|
||||
zstdReader.Close()
|
||||
return nil
|
||||
},
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
}
|
||||
|
||||
// CompressStream compresses the dest with specified compression algorithm.
|
||||
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
|
||||
switch compression {
|
||||
case Uncompressed:
|
||||
return &writeCloserWrapper{dest, nil}, nil
|
||||
case Gzip:
|
||||
return gzip.NewWriter(dest), nil
|
||||
case Zstd:
|
||||
return zstd.NewWriter(dest)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
|
||||
}
|
||||
}
|
||||
|
||||
// Extension returns the extension of a file that uses the specified compression algorithm.
|
||||
func (compression *Compression) Extension() string {
|
||||
switch *compression {
|
||||
case Gzip:
|
||||
return "gz"
|
||||
case Zstd:
|
||||
return "zst"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
|
||||
initPigz.Do(func() {
|
||||
if unpigzPath = detectPigz(); unpigzPath != "" {
|
||||
log.L.Debug("using pigz for decompression")
|
||||
}
|
||||
})
|
||||
|
||||
if unpigzPath == "" {
|
||||
return gzip.NewReader(buf)
|
||||
}
|
||||
|
||||
return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
|
||||
}
|
||||
|
||||
func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
|
||||
cmd.Stdin = in
|
||||
cmd.Stdout = writer
|
||||
|
||||
var errBuf bytes.Buffer
|
||||
cmd.Stderr = &errBuf
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := cmd.Wait(); err != nil {
|
||||
writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String()))
|
||||
} else {
|
||||
writer.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
func detectPigz() string {
|
||||
path, err := exec.LookPath("unpigz")
|
||||
if err != nil {
|
||||
log.L.WithError(err).Debug("unpigz not found, falling back to go gzip")
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable
|
||||
value := os.Getenv(disablePigzEnv)
|
||||
if value == "" {
|
||||
return path
|
||||
}
|
||||
|
||||
disable, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value)
|
||||
return path
|
||||
}
|
||||
|
||||
if disable {
|
||||
return ""
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
751
src/runtime/vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
751
src/runtime/vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
@ -1,751 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 32*1024)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
var errInvalidArchive = errors.New("invalid archive")
|
||||
|
||||
// Diff returns a tar stream of the computed filesystem
|
||||
// difference between the provided directories.
|
||||
//
|
||||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
func Diff(ctx context.Context, a, b string) io.ReadCloser {
|
||||
r, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
err := WriteDiff(ctx, w, a, b)
|
||||
if err = w.CloseWithError(err); err != nil {
|
||||
log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
|
||||
}
|
||||
}()
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// WriteDiff writes a tar stream of the computed difference between the
|
||||
// provided paths.
|
||||
//
|
||||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffOpt) error {
|
||||
var options WriteDiffOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(&options); err != nil {
|
||||
return errors.Wrap(err, "failed to apply option")
|
||||
}
|
||||
}
|
||||
if options.writeDiffFunc == nil {
|
||||
options.writeDiffFunc = writeDiffNaive
|
||||
}
|
||||
|
||||
return options.writeDiffFunc(ctx, w, a, b, options)
|
||||
}
|
||||
|
||||
// writeDiffNaive writes a tar stream of the computed difference between the
|
||||
// provided directories on disk.
|
||||
//
|
||||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOptions) error {
|
||||
cw := newChangeWriter(w, b)
|
||||
err := fs.Changes(ctx, a, b, cw.HandleChange)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create diff tar stream")
|
||||
}
|
||||
return cw.Close()
|
||||
}
|
||||
|
||||
const (
|
||||
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||
// filename this means that file has been removed from the base layer.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
|
||||
whiteoutPrefix = ".wh."
|
||||
|
||||
// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||
// for removing an actual file. Normally these files are excluded from exported
|
||||
// archives.
|
||||
whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
|
||||
|
||||
// whiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||
// readdir calls to this directory do not follow to lower layers.
|
||||
whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
|
||||
|
||||
paxSchilyXattr = "SCHILY.xattr."
|
||||
)
|
||||
|
||||
// Apply applies a tar stream of an OCI style diff tar.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
||||
func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) {
|
||||
root = filepath.Clean(root)
|
||||
|
||||
var options ApplyOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(&options); err != nil {
|
||||
return 0, errors.Wrap(err, "failed to apply option")
|
||||
}
|
||||
}
|
||||
if options.Filter == nil {
|
||||
options.Filter = all
|
||||
}
|
||||
if options.applyFunc == nil {
|
||||
options.applyFunc = applyNaive
|
||||
}
|
||||
|
||||
return options.applyFunc(ctx, root, r, options)
|
||||
}
|
||||
|
||||
// applyNaive applies a tar stream of an OCI style diff tar to a directory
|
||||
// applying each file as either a whole file or whiteout.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
||||
func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) {
|
||||
var (
|
||||
dirs []*tar.Header
|
||||
|
||||
tr = tar.NewReader(r)
|
||||
|
||||
// Used for handling opaque directory markers which
|
||||
// may occur out of order
|
||||
unpackedPaths = make(map[string]struct{})
|
||||
|
||||
convertWhiteout = options.ConvertWhiteout
|
||||
)
|
||||
|
||||
if convertWhiteout == nil {
|
||||
// handle whiteouts by removing the target files
|
||||
convertWhiteout = func(hdr *tar.Header, path string) (bool, error) {
|
||||
base := filepath.Base(path)
|
||||
dir := filepath.Dir(path)
|
||||
if base == whiteoutOpaqueDir {
|
||||
_, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = nil // parent was deleted
|
||||
}
|
||||
return err
|
||||
}
|
||||
if path == dir {
|
||||
return nil
|
||||
}
|
||||
if _, exists := unpackedPaths[path]; !exists {
|
||||
err := os.RemoveAll(path)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return false, err
|
||||
}
|
||||
|
||||
if strings.HasPrefix(base, whiteoutPrefix) {
|
||||
originalBase := base[len(whiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
return false, os.RemoveAll(originalPath)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
size += hdr.Size
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
accept, err := options.Filter(hdr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !accept {
|
||||
continue
|
||||
}
|
||||
|
||||
if skipFile(hdr) {
|
||||
log.G(ctx).Warnf("file %q ignored: archive may not be supported on system", hdr.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Split name and resolve symlinks for root directory.
|
||||
ppath, base := filepath.Split(hdr.Name)
|
||||
ppath, err = fs.RootPath(root, ppath)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get root path")
|
||||
}
|
||||
|
||||
// Join to root before joining to parent path to ensure relative links are
|
||||
// already resolved based on the root before adding to parent.
|
||||
path := filepath.Join(ppath, filepath.Join("/", base))
|
||||
if path == root {
|
||||
log.G(ctx).Debugf("file %q ignored: resolved to root", hdr.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// If file is not directly under root, ensure parent directory
|
||||
// exists or is created.
|
||||
if ppath != root {
|
||||
parentPath := ppath
|
||||
if base == "" {
|
||||
parentPath = filepath.Dir(path)
|
||||
}
|
||||
if err := mkparent(ctx, parentPath, root, options.Parents); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Naive whiteout convert function which handles whiteout files by
|
||||
// removing the target files.
|
||||
if err := validateWhiteout(path); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
writeFile, err := convertWhiteout(hdr, path)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to convert whiteout file %q", hdr.Name)
|
||||
}
|
||||
if !writeFile {
|
||||
continue
|
||||
}
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
srcData := io.Reader(tr)
|
||||
srcHdr := hdr
|
||||
|
||||
if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
unpackedPaths[path] = struct{}{}
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
path, err := fs.RootPath(root, hdr.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader) error {
|
||||
// hdr.Mode is in linux format, which we can use for syscalls,
|
||||
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||
hdrInfo := hdr.FileInfo()
|
||||
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeDir:
|
||||
// Create directory unless it exists as a directory already.
|
||||
// In that case we just want to merge the two
|
||||
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
|
||||
if err := mkdir(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = copyBuffered(ctx, file, reader)
|
||||
if err1 := file.Close(); err == nil {
|
||||
err = err1
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeBlock, tar.TypeChar:
|
||||
// Handle this is an OS-specific way
|
||||
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeFifo:
|
||||
// Handle this is an OS-specific way
|
||||
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeLink:
|
||||
targetPath, err := hardlinkRootPath(extractDir, hdr.Linkname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Link(targetPath, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeSymlink:
|
||||
if err := os.Symlink(hdr.Linkname, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case tar.TypeXGlobalHeader:
|
||||
log.G(ctx).Debug("PAX Global Extended Headers found and ignored")
|
||||
return nil
|
||||
|
||||
default:
|
||||
return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag)
|
||||
}
|
||||
|
||||
// Lchown is not supported on Windows.
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range hdr.PAXRecords {
|
||||
if strings.HasPrefix(key, paxSchilyXattr) {
|
||||
key = key[len(paxSchilyXattr):]
|
||||
if err := setxattr(path, key, value); err != nil {
|
||||
if errors.Is(err, syscall.ENOTSUP) {
|
||||
log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// call lchmod after lchown since lchown can modify the file mode
|
||||
if err := lchmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime))
|
||||
}
|
||||
|
||||
func mkparent(ctx context.Context, path, root string, parents []string) error {
|
||||
if dir, err := os.Lstat(path); err == nil {
|
||||
if dir.IsDir() {
|
||||
return nil
|
||||
}
|
||||
return &os.PathError{
|
||||
Op: "mkparent",
|
||||
Path: path,
|
||||
Err: syscall.ENOTDIR,
|
||||
}
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
i := len(path)
|
||||
for i > len(root) && !os.IsPathSeparator(path[i-1]) {
|
||||
i--
|
||||
}
|
||||
|
||||
if i > len(root)+1 {
|
||||
if err := mkparent(ctx, path[:i-1], root, parents); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := mkdir(path, 0755); err != nil {
|
||||
// Check that still doesn't exist
|
||||
dir, err1 := os.Lstat(path)
|
||||
if err1 == nil && dir.IsDir() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
for _, p := range parents {
|
||||
ppath, err := fs.RootPath(p, path[len(root):])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dir, err := os.Lstat(ppath)
|
||||
if err == nil {
|
||||
if !dir.IsDir() {
|
||||
// Replaced, do not copy attributes
|
||||
break
|
||||
}
|
||||
if err := copyDirInfo(dir, path); err != nil {
|
||||
return err
|
||||
}
|
||||
return copyUpXAttrs(path, ppath)
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log.G(ctx).Debugf("parent directory %q not found: default permissions(0755) used", path)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type changeWriter struct {
|
||||
tw *tar.Writer
|
||||
source string
|
||||
whiteoutT time.Time
|
||||
inodeSrc map[uint64]string
|
||||
inodeRefs map[uint64][]string
|
||||
addedDirs map[string]struct{}
|
||||
}
|
||||
|
||||
func newChangeWriter(w io.Writer, source string) *changeWriter {
|
||||
return &changeWriter{
|
||||
tw: tar.NewWriter(w),
|
||||
source: source,
|
||||
whiteoutT: time.Now(),
|
||||
inodeSrc: map[uint64]string{},
|
||||
inodeRefs: map[uint64][]string{},
|
||||
addedDirs: map[string]struct{}{},
|
||||
}
|
||||
}
|
||||
|
||||
func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if k == fs.ChangeKindDelete {
|
||||
whiteOutDir := filepath.Dir(p)
|
||||
whiteOutBase := filepath.Base(p)
|
||||
whiteOut := filepath.Join(whiteOutDir, whiteoutPrefix+whiteOutBase)
|
||||
hdr := &tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: whiteOut[1:],
|
||||
Size: 0,
|
||||
ModTime: cw.whiteoutT,
|
||||
AccessTime: cw.whiteoutT,
|
||||
ChangeTime: cw.whiteoutT,
|
||||
}
|
||||
if err := cw.includeParents(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write whiteout header")
|
||||
}
|
||||
} else {
|
||||
var (
|
||||
link string
|
||||
err error
|
||||
source = filepath.Join(cw.source, p)
|
||||
)
|
||||
|
||||
switch {
|
||||
case f.Mode()&os.ModeSocket != 0:
|
||||
return nil // ignore sockets
|
||||
case f.Mode()&os.ModeSymlink != 0:
|
||||
if link, err = os.Readlink(source); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
hdr, err := tar.FileInfoHeader(f, link)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
||||
|
||||
// truncate timestamp for compatibility. without PAX stdlib rounds timestamps instead
|
||||
hdr.Format = tar.FormatPAX
|
||||
hdr.ModTime = hdr.ModTime.Truncate(time.Second)
|
||||
hdr.AccessTime = time.Time{}
|
||||
hdr.ChangeTime = time.Time{}
|
||||
|
||||
name := p
|
||||
if strings.HasPrefix(name, string(filepath.Separator)) {
|
||||
name, err = filepath.Rel(string(filepath.Separator), name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to make path relative")
|
||||
}
|
||||
}
|
||||
name, err = tarName(name)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot canonicalize path")
|
||||
}
|
||||
// suffix with '/' for directories
|
||||
if f.IsDir() && !strings.HasSuffix(name, "/") {
|
||||
name += "/"
|
||||
}
|
||||
hdr.Name = name
|
||||
|
||||
if err := setHeaderForSpecialDevice(hdr, name, f); err != nil {
|
||||
return errors.Wrap(err, "failed to set device headers")
|
||||
}
|
||||
|
||||
// additionalLinks stores file names which must be linked to
|
||||
// this file when this file is added
|
||||
var additionalLinks []string
|
||||
inode, isHardlink := fs.GetLinkInfo(f)
|
||||
if isHardlink {
|
||||
// If the inode has a source, always link to it
|
||||
if source, ok := cw.inodeSrc[inode]; ok {
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
hdr.Linkname = source
|
||||
hdr.Size = 0
|
||||
} else {
|
||||
if k == fs.ChangeKindUnmodified {
|
||||
cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name)
|
||||
return nil
|
||||
}
|
||||
cw.inodeSrc[inode] = name
|
||||
additionalLinks = cw.inodeRefs[inode]
|
||||
delete(cw.inodeRefs, inode)
|
||||
}
|
||||
} else if k == fs.ChangeKindUnmodified {
|
||||
// Nothing to write to diff
|
||||
return nil
|
||||
}
|
||||
|
||||
if capability, err := getxattr(source, "security.capability"); err != nil {
|
||||
return errors.Wrap(err, "failed to get capabilities xattr")
|
||||
} else if capability != nil {
|
||||
if hdr.PAXRecords == nil {
|
||||
hdr.PAXRecords = map[string]string{}
|
||||
}
|
||||
hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability)
|
||||
}
|
||||
|
||||
if err := cw.includeParents(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write file header")
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
file, err := open(source)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open path: %v", source)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
n, err := copyBuffered(context.TODO(), cw.tw, file)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
}
|
||||
if n != hdr.Size {
|
||||
return errors.New("short write copying file")
|
||||
}
|
||||
}
|
||||
|
||||
if additionalLinks != nil {
|
||||
source = hdr.Name
|
||||
for _, extra := range additionalLinks {
|
||||
hdr.Name = extra
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
hdr.Linkname = source
|
||||
hdr.Size = 0
|
||||
|
||||
if err := cw.includeParents(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cw.tw.WriteHeader(hdr); err != nil {
|
||||
return errors.Wrap(err, "failed to write file header")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cw *changeWriter) Close() error {
|
||||
if err := cw.tw.Close(); err != nil {
|
||||
return errors.Wrap(err, "failed to close tar writer")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cw *changeWriter) includeParents(hdr *tar.Header) error {
|
||||
if cw.addedDirs == nil {
|
||||
return nil
|
||||
}
|
||||
name := strings.TrimRight(hdr.Name, "/")
|
||||
fname := filepath.Join(cw.source, name)
|
||||
parent := filepath.Dir(name)
|
||||
pname := filepath.Join(cw.source, parent)
|
||||
|
||||
// Do not include root directory as parent
|
||||
if fname != cw.source && pname != cw.source {
|
||||
_, ok := cw.addedDirs[parent]
|
||||
if !ok {
|
||||
cw.addedDirs[parent] = struct{}{}
|
||||
fi, err := os.Stat(pname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cw.HandleChange(fs.ChangeKindModify, parent, fi, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
cw.addedDirs[name] = struct{}{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buf := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(buf)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
nr, er := src.Read(*buf)
|
||||
if nr > 0 {
|
||||
nw, ew := dst.Write((*buf)[0:nr])
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
}
|
||||
if ew != nil {
|
||||
err = ew
|
||||
break
|
||||
}
|
||||
if nr != nw {
|
||||
err = io.ErrShortWrite
|
||||
break
|
||||
}
|
||||
}
|
||||
if er != nil {
|
||||
if er != io.EOF {
|
||||
err = er
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return written, err
|
||||
|
||||
}
|
||||
|
||||
// hardlinkRootPath returns target linkname, evaluating and bounding any
|
||||
// symlink to the parent directory.
|
||||
//
|
||||
// NOTE: Allow hardlink to the softlink, not the real one. For example,
|
||||
//
|
||||
// touch /tmp/zzz
|
||||
// ln -s /tmp/zzz /tmp/xxx
|
||||
// ln /tmp/xxx /tmp/yyy
|
||||
//
|
||||
// /tmp/yyy should be softlink which be same of /tmp/xxx, not /tmp/zzz.
|
||||
func hardlinkRootPath(root, linkname string) (string, error) {
|
||||
ppath, base := filepath.Split(linkname)
|
||||
ppath, err := fs.RootPath(root, ppath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
targetPath := filepath.Join(ppath, base)
|
||||
if !strings.HasPrefix(targetPath, root) {
|
||||
targetPath = root
|
||||
}
|
||||
return targetPath, nil
|
||||
}
|
||||
|
||||
func validateWhiteout(path string) error {
|
||||
base := filepath.Base(path)
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
if base == whiteoutOpaqueDir {
|
||||
return nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(base, whiteoutPrefix) {
|
||||
originalBase := base[len(whiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
// Ensure originalPath is under dir
|
||||
if dir[len(dir)-1] != filepath.Separator {
|
||||
dir += string(filepath.Separator)
|
||||
}
|
||||
if !strings.HasPrefix(originalPath, dir) {
|
||||
return errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
48
src/runtime/vendor/github.com/containerd/containerd/archive/tar_freebsd.go
generated
vendored
48
src/runtime/vendor/github.com/containerd/containerd/archive/tar_freebsd.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
// +build freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// mknod wraps unix.Mknod. FreeBSD's unix.Mknod signature is different from
|
||||
// other Unix and Unix-like operating systems.
|
||||
func mknod(path string, mode uint32, dev uint64) error {
|
||||
return unix.Mknod(path, mode, dev)
|
||||
}
|
||||
|
||||
// lsetxattrCreate wraps unix.Lsetxattr with FreeBSD-specific flags and errors
|
||||
func lsetxattrCreate(link string, attr string, data []byte) error {
|
||||
err := unix.Lsetxattr(link, attr, data, 0)
|
||||
if err == unix.ENOTSUP || err == unix.EEXIST {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func lchmod(path string, mode os.FileMode) error {
|
||||
err := unix.Fchmodat(unix.AT_FDCWD, path, uint32(mode), unix.AT_SYMLINK_NOFOLLOW)
|
||||
if err != nil {
|
||||
err = &os.PathError{Op: "lchmod", Path: path, Err: err}
|
||||
}
|
||||
return err
|
||||
}
|
55
src/runtime/vendor/github.com/containerd/containerd/archive/tar_mostunix.go
generated
vendored
55
src/runtime/vendor/github.com/containerd/containerd/archive/tar_mostunix.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
// +build !windows,!freebsd
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// mknod wraps Unix.Mknod and casts dev to int
|
||||
func mknod(path string, mode uint32, dev uint64) error {
|
||||
return unix.Mknod(path, mode, int(dev))
|
||||
}
|
||||
|
||||
// lsetxattrCreate wraps unix.Lsetxattr, passes the unix.XATTR_CREATE flag on
|
||||
// supported operating systems,and ignores appropriate errors
|
||||
func lsetxattrCreate(link string, attr string, data []byte) error {
|
||||
err := unix.Lsetxattr(link, attr, data, unix.XATTR_CREATE)
|
||||
if err == unix.ENOTSUP || err == unix.ENODATA || err == unix.EEXIST {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// lchmod checks for symlink and changes the mode if not a symlink
|
||||
func lchmod(path string, mode os.FileMode) error {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeSymlink == 0 {
|
||||
if err := os.Chmod(path, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
85
src/runtime/vendor/github.com/containerd/containerd/archive/tar_opts.go
generated
vendored
85
src/runtime/vendor/github.com/containerd/containerd/archive/tar_opts.go
generated
vendored
@ -1,85 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ApplyOptions provides additional options for an Apply operation
|
||||
type ApplyOptions struct {
|
||||
Filter Filter // Filter tar headers
|
||||
ConvertWhiteout ConvertWhiteout // Convert whiteout files
|
||||
Parents []string // Parent directories to handle inherited attributes without CoW
|
||||
|
||||
applyFunc func(context.Context, string, io.Reader, ApplyOptions) (int64, error)
|
||||
}
|
||||
|
||||
// ApplyOpt allows setting mutable archive apply properties on creation
|
||||
type ApplyOpt func(options *ApplyOptions) error
|
||||
|
||||
// Filter specific files from the archive
|
||||
type Filter func(*tar.Header) (bool, error)
|
||||
|
||||
// ConvertWhiteout converts whiteout files from the archive
|
||||
type ConvertWhiteout func(*tar.Header, string) (bool, error)
|
||||
|
||||
// all allows all files
|
||||
func all(_ *tar.Header) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// WithFilter uses the filter to select which files are to be extracted.
|
||||
func WithFilter(f Filter) ApplyOpt {
|
||||
return func(options *ApplyOptions) error {
|
||||
options.Filter = f
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithConvertWhiteout uses the convert function to convert the whiteout files.
|
||||
func WithConvertWhiteout(c ConvertWhiteout) ApplyOpt {
|
||||
return func(options *ApplyOptions) error {
|
||||
options.ConvertWhiteout = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParents provides parent directories for resolving inherited attributes
|
||||
// directory from the filesystem.
|
||||
// Inherited attributes are searched from first to last, making the first
|
||||
// element in the list the most immediate parent directory.
|
||||
// NOTE: When applying to a filesystem which supports CoW, file attributes
|
||||
// should be inherited by the filesystem.
|
||||
func WithParents(p []string) ApplyOpt {
|
||||
return func(options *ApplyOptions) error {
|
||||
options.Parents = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WriteDiffOptions provides additional options for a WriteDiff operation
|
||||
type WriteDiffOptions struct {
|
||||
ParentLayers []string // Windows needs the full list of parent layers
|
||||
|
||||
writeDiffFunc func(context.Context, io.Writer, string, string, WriteDiffOptions) error
|
||||
}
|
||||
|
||||
// WriteDiffOpt allows setting mutable archive write properties on creation
|
||||
type WriteDiffOpt func(options *WriteDiffOptions) error
|
59
src/runtime/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go
generated
vendored
59
src/runtime/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go
generated
vendored
@ -1,59 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// AufsConvertWhiteout converts whiteout files for aufs.
|
||||
func AufsConvertWhiteout(_ *tar.Header, _ string) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// OverlayConvertWhiteout converts whiteout files for overlay.
|
||||
func OverlayConvertWhiteout(hdr *tar.Header, path string) (bool, error) {
|
||||
base := filepath.Base(path)
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
// if a directory is marked as opaque, we need to translate that to overlay
|
||||
if base == whiteoutOpaqueDir {
|
||||
// don't write the file itself
|
||||
return false, unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
||||
}
|
||||
|
||||
// if a file was deleted and we are using overlay, we need to create a character device
|
||||
if strings.HasPrefix(base, whiteoutPrefix) {
|
||||
originalBase := base[len(whiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// don't write the file itself
|
||||
return false, os.Chown(originalPath, hdr.Uid, hdr.Gid)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
72
src/runtime/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go
generated
vendored
72
src/runtime/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go
generated
vendored
@ -1,72 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/Microsoft/hcsshim/pkg/ociwclayer"
|
||||
)
|
||||
|
||||
// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
|
||||
func applyWindowsLayer(ctx context.Context, root string, r io.Reader, options ApplyOptions) (size int64, err error) {
|
||||
return ociwclayer.ImportLayerFromTar(ctx, r, root, options.Parents)
|
||||
}
|
||||
|
||||
// AsWindowsContainerLayer indicates that the tar stream to apply is that of
|
||||
// a Windows Container Layer. The caller must be holding SeBackupPrivilege and
|
||||
// SeRestorePrivilege.
|
||||
func AsWindowsContainerLayer() ApplyOpt {
|
||||
return func(options *ApplyOptions) error {
|
||||
options.applyFunc = applyWindowsLayer
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// writeDiffWindowsLayers writes a tar stream of the computed difference between the
|
||||
// provided Windows layers
|
||||
//
|
||||
// Produces a tar using OCI style file markers for deletions. Deleted
|
||||
// files will be prepended with the prefix ".wh.". This style is
|
||||
// based off AUFS whiteouts.
|
||||
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
|
||||
func writeDiffWindowsLayers(ctx context.Context, w io.Writer, _, layer string, options WriteDiffOptions) error {
|
||||
return ociwclayer.ExportLayerToTar(ctx, w, layer, options.ParentLayers)
|
||||
}
|
||||
|
||||
// AsWindowsContainerLayerPair indicates that the paths to diff are a pair of
|
||||
// Windows Container Layers. The caller must be holding SeBackupPrivilege.
|
||||
func AsWindowsContainerLayerPair() WriteDiffOpt {
|
||||
return func(options *WriteDiffOptions) error {
|
||||
options.writeDiffFunc = writeDiffWindowsLayers
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithParentLayers provides the Windows Container Layers that are the parents
|
||||
// of the target (right-hand, "upper") layer, if any. The source (left-hand, "lower")
|
||||
// layer passed to WriteDiff should be "" in this case.
|
||||
func WithParentLayers(p []string) WriteDiffOpt {
|
||||
return func(options *WriteDiffOptions) error {
|
||||
options.ParentLayers = p
|
||||
return nil
|
||||
}
|
||||
}
|
190
src/runtime/vendor/github.com/containerd/containerd/archive/tar_unix.go
generated
vendored
190
src/runtime/vendor/github.com/containerd/containerd/archive/tar_unix.go
generated
vendored
@ -1,190 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/pkg/userns"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func tarName(p string) (string, error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
return perm
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) error {
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
}
|
||||
|
||||
// Rdev is int32 on darwin/bsd, int64 on linux/solaris
|
||||
rdev := uint64(s.Rdev) // nolint: unconvert
|
||||
|
||||
// Currently go does not fill in the major/minors
|
||||
if s.Mode&syscall.S_IFBLK != 0 ||
|
||||
s.Mode&syscall.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(unix.Major(rdev))
|
||||
hdr.Devminor = int64(unix.Minor(rdev))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func open(p string) (*os.File, error) {
|
||||
return os.Open(p)
|
||||
}
|
||||
|
||||
func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
f, err := os.OpenFile(name, flag, perm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Call chmod to avoid permission mask
|
||||
if err := os.Chmod(name, perm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
func mkdir(path string, perm os.FileMode) error {
|
||||
if err := os.Mkdir(path, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
// Only final created directory gets explicit permission
|
||||
// call to avoid permission mask
|
||||
return os.Chmod(path, perm)
|
||||
}
|
||||
|
||||
func skipFile(hdr *tar.Header) bool {
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock, tar.TypeChar:
|
||||
// cannot create a device if running in user namespace
|
||||
return userns.RunningInUserNS()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo.
|
||||
// This function must not be called for Block and Char when running in userns.
|
||||
// (skipFile() should return true for them.)
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
}
|
||||
|
||||
return mknod(path, mode, unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor)))
|
||||
}
|
||||
|
||||
func getxattr(path, attr string) ([]byte, error) {
|
||||
b, err := sysx.LGetxattr(path, attr)
|
||||
if err == unix.ENOTSUP || err == sysx.ENODATA {
|
||||
return nil, nil
|
||||
}
|
||||
return b, err
|
||||
}
|
||||
|
||||
func setxattr(path, key, value string) error {
|
||||
// Do not set trusted attributes
|
||||
if strings.HasPrefix(key, "trusted.") {
|
||||
return errors.Wrap(unix.ENOTSUP, "admin attributes from archive not supported")
|
||||
}
|
||||
return unix.Lsetxattr(path, key, []byte(value), 0)
|
||||
}
|
||||
|
||||
func copyDirInfo(fi os.FileInfo, path string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
if err := os.Lchown(path, int(st.Uid), int(st.Gid)); err != nil {
|
||||
if os.IsPermission(err) {
|
||||
// Normally if uid/gid are the same this would be a no-op, but some
|
||||
// filesystems may still return EPERM... for instance NFS does this.
|
||||
// In such a case, this is not an error.
|
||||
if dstStat, err2 := os.Lstat(path); err2 == nil {
|
||||
st2 := dstStat.Sys().(*syscall.Stat_t)
|
||||
if st.Uid == st2.Uid && st.Gid == st2.Gid {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", path)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Chmod(path, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", path)
|
||||
}
|
||||
|
||||
timespec := []unix.Timespec{
|
||||
unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatAtime(st))),
|
||||
unix.NsecToTimespec(syscall.TimespecToNsec(fs.StatMtime(st))),
|
||||
}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyUpXAttrs(dst, src string) error {
|
||||
xattrKeys, err := sysx.LListxattr(src)
|
||||
if err != nil {
|
||||
if err == unix.ENOTSUP || err == sysx.ENODATA {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(err, "failed to list xattrs on %s", src)
|
||||
}
|
||||
for _, xattr := range xattrKeys {
|
||||
// Do not copy up trusted attributes
|
||||
if strings.HasPrefix(xattr, "trusted.") {
|
||||
continue
|
||||
}
|
||||
data, err := sysx.LGetxattr(src, xattr)
|
||||
if err != nil {
|
||||
if err == unix.ENOTSUP || err == sysx.ENODATA {
|
||||
continue
|
||||
}
|
||||
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
|
||||
}
|
||||
if err := lsetxattrCreate(dst, xattr, data); err != nil {
|
||||
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
124
src/runtime/vendor/github.com/containerd/containerd/archive/tar_windows.go
generated
vendored
124
src/runtime/vendor/github.com/containerd/containerd/archive/tar_windows.go
generated
vendored
@ -1,124 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// tarName returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func tarName(p string) (string, error) {
|
||||
// windows: convert windows style relative path with backslashes
|
||||
// into forward slashes. Since windows does not allow '/' or '\'
|
||||
// in file names, it is mostly safe to replace however we must
|
||||
// check just in case
|
||||
if strings.Contains(p, "/") {
|
||||
return "", fmt.Errorf("windows path contains forward slash: %s", p)
|
||||
}
|
||||
|
||||
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
perm &= 0755
|
||||
// Add the x bit: make everything +x from windows
|
||||
perm |= 0111
|
||||
|
||||
return perm
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(*tar.Header, string, os.FileInfo) error {
|
||||
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
|
||||
return nil
|
||||
}
|
||||
|
||||
func open(p string) (*os.File, error) {
|
||||
// We use sys.OpenSequential to ensure we use sequential file
|
||||
// access on Windows to avoid depleting the standby list.
|
||||
return sys.OpenSequential(p)
|
||||
}
|
||||
|
||||
func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
// Source is regular file. We use sys.OpenFileSequential to use sequential
|
||||
// file access to avoid depleting the standby list on Windows.
|
||||
return sys.OpenFileSequential(name, flag, perm)
|
||||
}
|
||||
|
||||
func mkdir(path string, perm os.FileMode) error {
|
||||
return os.Mkdir(path, perm)
|
||||
}
|
||||
|
||||
func skipFile(hdr *tar.Header) bool {
|
||||
// Windows does not support filenames with colons in them. Ignore
|
||||
// these files. This is not a problem though (although it might
|
||||
// appear that it is). Let's suppose a client is running docker pull.
|
||||
// The daemon it points to is Windows. Would it make sense for the
|
||||
// client to be doing a docker pull Ubuntu for example (which has files
|
||||
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||
// not as it would really only make sense that they were pulling a
|
||||
// Windows image. However, for development, it is necessary to be able
|
||||
// to pull Linux images which are in the repository.
|
||||
//
|
||||
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertently.
|
||||
return strings.Contains(hdr.Name, ":")
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func lchmod(path string, mode os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getxattr(path, attr string) ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func setxattr(path, key, value string) error {
|
||||
// Return not support error, do not wrap underlying not supported
|
||||
// since xattrs should not exist in windows diff archives
|
||||
return errors.New("xattrs not supported on Windows")
|
||||
}
|
||||
|
||||
func copyDirInfo(fi os.FileInfo, path string) error {
|
||||
if err := os.Chmod(path, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyUpXAttrs(dst, src string) error {
|
||||
return nil
|
||||
}
|
54
src/runtime/vendor/github.com/containerd/containerd/archive/time.go
generated
vendored
54
src/runtime/vendor/github.com/containerd/containerd/archive/time.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
minTime = time.Unix(0, 0)
|
||||
maxTime time.Time
|
||||
)
|
||||
|
||||
func init() {
|
||||
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
|
||||
// This is a 64 bit timespec
|
||||
// os.Chtimes limits time to the following
|
||||
maxTime = time.Unix(0, 1<<63-1)
|
||||
} else {
|
||||
// This is a 32 bit timespec
|
||||
maxTime = time.Unix(1<<31-1, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func boundTime(t time.Time) time.Time {
|
||||
if t.Before(minTime) || t.After(maxTime) {
|
||||
return minTime
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
||||
|
||||
func latestTime(t1, t2 time.Time) time.Time {
|
||||
if t1.Before(t2) {
|
||||
return t2
|
||||
}
|
||||
return t1
|
||||
}
|
39
src/runtime/vendor/github.com/containerd/containerd/archive/time_unix.go
generated
vendored
39
src/runtime/vendor/github.com/containerd/containerd/archive/time_unix.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func chtimes(path string, atime, mtime time.Time) error {
|
||||
var utimes [2]unix.Timespec
|
||||
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
|
||||
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
|
||||
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
42
src/runtime/vendor/github.com/containerd/containerd/archive/time_windows.go
generated
vendored
42
src/runtime/vendor/github.com/containerd/containerd/archive/time_windows.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// chtimes will set the create time on a file using the given modtime.
|
||||
// This requires calling SetFileTime and explicitly including the create time.
|
||||
func chtimes(path string, atime, mtime time.Time) error {
|
||||
ctimespec := windows.NsecToTimespec(mtime.UnixNano())
|
||||
pathp, e := windows.UTF16PtrFromString(path)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
h, e := windows.CreateFile(pathp,
|
||||
windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
|
||||
windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
defer windows.Close(h)
|
||||
c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
|
||||
return windows.SetFileTime(h, &c, nil, nil)
|
||||
}
|
359
src/runtime/vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
359
src/runtime/vendor/github.com/containerd/containerd/cio/io.go
generated
vendored
@ -1,359 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/defaults"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 32<<10)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
// Config holds the IO configurations.
|
||||
type Config struct {
|
||||
// Terminal is true if one has been allocated
|
||||
Terminal bool
|
||||
// Stdin path
|
||||
Stdin string
|
||||
// Stdout path
|
||||
Stdout string
|
||||
// Stderr path
|
||||
Stderr string
|
||||
}
|
||||
|
||||
// IO holds the io information for a task or process
|
||||
type IO interface {
|
||||
// Config returns the IO configuration.
|
||||
Config() Config
|
||||
// Cancel aborts all current io operations.
|
||||
Cancel()
|
||||
// Wait blocks until all io copy operations have completed.
|
||||
Wait()
|
||||
// Close cleans up all open io resources. Cancel() is always called before
|
||||
// Close()
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Creator creates new IO sets for a task
|
||||
type Creator func(id string) (IO, error)
|
||||
|
||||
// Attach allows callers to reattach to running tasks
|
||||
//
|
||||
// There should only be one reader for a task's IO set
|
||||
// because fifo's can only be read from one reader or the output
|
||||
// will be sent only to the first reads
|
||||
type Attach func(*FIFOSet) (IO, error)
|
||||
|
||||
// FIFOSet is a set of file paths to FIFOs for a task's standard IO streams
|
||||
type FIFOSet struct {
|
||||
Config
|
||||
close func() error
|
||||
}
|
||||
|
||||
// Close the FIFOSet
|
||||
func (f *FIFOSet) Close() error {
|
||||
if f != nil && f.close != nil {
|
||||
return f.close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFIFOSet returns a new FIFOSet from a Config and a close function
|
||||
func NewFIFOSet(config Config, close func() error) *FIFOSet {
|
||||
return &FIFOSet{Config: config, close: close}
|
||||
}
|
||||
|
||||
// Streams used to configure a Creator or Attach
|
||||
type Streams struct {
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
Stderr io.Writer
|
||||
Terminal bool
|
||||
FIFODir string
|
||||
}
|
||||
|
||||
// Opt customize options for creating a Creator or Attach
|
||||
type Opt func(*Streams)
|
||||
|
||||
// WithStdio sets stream options to the standard input/output streams
|
||||
func WithStdio(opt *Streams) {
|
||||
WithStreams(os.Stdin, os.Stdout, os.Stderr)(opt)
|
||||
}
|
||||
|
||||
// WithTerminal sets the terminal option
|
||||
func WithTerminal(opt *Streams) {
|
||||
opt.Terminal = true
|
||||
}
|
||||
|
||||
// WithStreams sets the stream options to the specified Reader and Writers
|
||||
func WithStreams(stdin io.Reader, stdout, stderr io.Writer) Opt {
|
||||
return func(opt *Streams) {
|
||||
opt.Stdin = stdin
|
||||
opt.Stdout = stdout
|
||||
opt.Stderr = stderr
|
||||
}
|
||||
}
|
||||
|
||||
// WithFIFODir sets the fifo directory.
|
||||
// e.g. "/run/containerd/fifo", "/run/users/1001/containerd/fifo"
|
||||
func WithFIFODir(dir string) Opt {
|
||||
return func(opt *Streams) {
|
||||
opt.FIFODir = dir
|
||||
}
|
||||
}
|
||||
|
||||
// NewCreator returns an IO creator from the options
|
||||
func NewCreator(opts ...Opt) Creator {
|
||||
streams := &Streams{}
|
||||
for _, opt := range opts {
|
||||
opt(streams)
|
||||
}
|
||||
if streams.FIFODir == "" {
|
||||
streams.FIFODir = defaults.DefaultFIFODir
|
||||
}
|
||||
return func(id string) (IO, error) {
|
||||
fifos, err := NewFIFOSetInDir(streams.FIFODir, id, streams.Terminal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if streams.Stdin == nil {
|
||||
fifos.Stdin = ""
|
||||
}
|
||||
if streams.Stdout == nil {
|
||||
fifos.Stdout = ""
|
||||
}
|
||||
if streams.Stderr == nil {
|
||||
fifos.Stderr = ""
|
||||
}
|
||||
return copyIO(fifos, streams)
|
||||
}
|
||||
}
|
||||
|
||||
// NewAttach attaches the existing io for a task to the provided io.Reader/Writers
|
||||
func NewAttach(opts ...Opt) Attach {
|
||||
streams := &Streams{}
|
||||
for _, opt := range opts {
|
||||
opt(streams)
|
||||
}
|
||||
return func(fifos *FIFOSet) (IO, error) {
|
||||
if fifos == nil {
|
||||
return nil, fmt.Errorf("cannot attach, missing fifos")
|
||||
}
|
||||
return copyIO(fifos, streams)
|
||||
}
|
||||
}
|
||||
|
||||
// NullIO redirects the container's IO into /dev/null
|
||||
func NullIO(_ string) (IO, error) {
|
||||
return &cio{}, nil
|
||||
}
|
||||
|
||||
// cio is a basic container IO implementation.
|
||||
type cio struct {
|
||||
config Config
|
||||
wg *sync.WaitGroup
|
||||
closers []io.Closer
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func (c *cio) Config() Config {
|
||||
return c.config
|
||||
}
|
||||
|
||||
func (c *cio) Wait() {
|
||||
if c.wg != nil {
|
||||
c.wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cio) Close() error {
|
||||
var lastErr error
|
||||
for _, closer := range c.closers {
|
||||
if closer == nil {
|
||||
continue
|
||||
}
|
||||
if err := closer.Close(); err != nil {
|
||||
lastErr = err
|
||||
}
|
||||
}
|
||||
return lastErr
|
||||
}
|
||||
|
||||
func (c *cio) Cancel() {
|
||||
if c.cancel != nil {
|
||||
c.cancel()
|
||||
}
|
||||
}
|
||||
|
||||
type pipes struct {
|
||||
Stdin io.WriteCloser
|
||||
Stdout io.ReadCloser
|
||||
Stderr io.ReadCloser
|
||||
}
|
||||
|
||||
// DirectIO allows task IO to be handled externally by the caller
|
||||
type DirectIO struct {
|
||||
pipes
|
||||
cio
|
||||
}
|
||||
|
||||
var (
|
||||
_ IO = &DirectIO{}
|
||||
_ IO = &logURI{}
|
||||
)
|
||||
|
||||
// LogURI provides the raw logging URI
|
||||
func LogURI(uri *url.URL) Creator {
|
||||
return func(_ string) (IO, error) {
|
||||
return &logURI{
|
||||
config: Config{
|
||||
Stdout: uri.String(),
|
||||
Stderr: uri.String(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// BinaryIO forwards container STDOUT|STDERR directly to a logging binary
|
||||
func BinaryIO(binary string, args map[string]string) Creator {
|
||||
return func(_ string) (IO, error) {
|
||||
uri, err := LogURIGenerator("binary", binary, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := uri.String()
|
||||
return &logURI{
|
||||
config: Config{
|
||||
Stdout: res,
|
||||
Stderr: res,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// TerminalBinaryIO forwards container STDOUT|STDERR directly to a logging binary
|
||||
// It also sets the terminal option to true
|
||||
func TerminalBinaryIO(binary string, args map[string]string) Creator {
|
||||
return func(_ string) (IO, error) {
|
||||
uri, err := LogURIGenerator("binary", binary, args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := uri.String()
|
||||
return &logURI{
|
||||
config: Config{
|
||||
Stdout: res,
|
||||
Stderr: res,
|
||||
Terminal: true,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// LogFile creates a file on disk that logs the task's STDOUT,STDERR.
|
||||
// If the log file already exists, the logs will be appended to the file.
|
||||
func LogFile(path string) Creator {
|
||||
return func(_ string) (IO, error) {
|
||||
uri, err := LogURIGenerator("file", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := uri.String()
|
||||
return &logURI{
|
||||
config: Config{
|
||||
Stdout: res,
|
||||
Stderr: res,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// LogURIGenerator is the helper to generate log uri with specific scheme.
|
||||
func LogURIGenerator(scheme string, path string, args map[string]string) (*url.URL, error) {
|
||||
path = filepath.Clean(path)
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
return nil, errors.New("absolute path needed")
|
||||
}
|
||||
|
||||
uri := &url.URL{
|
||||
Scheme: scheme,
|
||||
Path: path,
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
return uri, nil
|
||||
}
|
||||
|
||||
q := uri.Query()
|
||||
for k, v := range args {
|
||||
q.Set(k, v)
|
||||
}
|
||||
uri.RawQuery = q.Encode()
|
||||
return uri, nil
|
||||
}
|
||||
|
||||
type logURI struct {
|
||||
config Config
|
||||
}
|
||||
|
||||
func (l *logURI) Config() Config {
|
||||
return l.config
|
||||
}
|
||||
|
||||
func (l *logURI) Cancel() {
|
||||
|
||||
}
|
||||
|
||||
func (l *logURI) Wait() {
|
||||
|
||||
}
|
||||
|
||||
func (l *logURI) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Load the io for a container but do not attach
|
||||
//
|
||||
// Allows io to be loaded on the task for deletion without
|
||||
// starting copy routines
|
||||
func Load(set *FIFOSet) (IO, error) {
|
||||
return &cio{
|
||||
config: set.Config,
|
||||
closers: []io.Closer{set},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *pipes) closers() []io.Closer {
|
||||
return []io.Closer{p.Stdin, p.Stdout, p.Stderr}
|
||||
}
|
154
src/runtime/vendor/github.com/containerd/containerd/cio/io_unix.go
generated
vendored
154
src/runtime/vendor/github.com/containerd/containerd/cio/io_unix.go
generated
vendored
@ -1,154 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/fifo"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root
|
||||
func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) {
|
||||
if root != "" {
|
||||
if err := os.MkdirAll(root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
dir, err := ioutil.TempDir(root, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
closer := func() error {
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
return NewFIFOSet(Config{
|
||||
Stdin: filepath.Join(dir, id+"-stdin"),
|
||||
Stdout: filepath.Join(dir, id+"-stdout"),
|
||||
Stderr: filepath.Join(dir, id+"-stderr"),
|
||||
Terminal: terminal,
|
||||
}, closer), nil
|
||||
}
|
||||
|
||||
func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) {
|
||||
var ctx, cancel = context.WithCancel(context.Background())
|
||||
pipes, err := openFifos(ctx, fifos)
|
||||
if err != nil {
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fifos.Stdin != "" {
|
||||
go func() {
|
||||
p := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(p)
|
||||
|
||||
io.CopyBuffer(pipes.Stdin, ioset.Stdin, *p)
|
||||
pipes.Stdin.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
var wg = &sync.WaitGroup{}
|
||||
if fifos.Stdout != "" {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
p := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(p)
|
||||
|
||||
io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p)
|
||||
pipes.Stdout.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
if !fifos.Terminal && fifos.Stderr != "" {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
p := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(p)
|
||||
|
||||
io.CopyBuffer(ioset.Stderr, pipes.Stderr, *p)
|
||||
pipes.Stderr.Close()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
return &cio{
|
||||
config: fifos.Config,
|
||||
wg: wg,
|
||||
closers: append(pipes.closers(), fifos),
|
||||
cancel: cancel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func openFifos(ctx context.Context, fifos *FIFOSet) (f pipes, retErr error) {
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
fifos.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if fifos.Stdin != "" {
|
||||
if f.Stdin, retErr = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
|
||||
return f, errors.Wrapf(retErr, "failed to open stdin fifo")
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil && f.Stdin != nil {
|
||||
f.Stdin.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if fifos.Stdout != "" {
|
||||
if f.Stdout, retErr = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
|
||||
return f, errors.Wrapf(retErr, "failed to open stdout fifo")
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil && f.Stdout != nil {
|
||||
f.Stdout.Close()
|
||||
}
|
||||
}()
|
||||
}
|
||||
if !fifos.Terminal && fifos.Stderr != "" {
|
||||
if f.Stderr, retErr = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); retErr != nil {
|
||||
return f, errors.Wrapf(retErr, "failed to open stderr fifo")
|
||||
}
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// NewDirectIO returns an IO implementation that exposes the IO streams as io.ReadCloser
|
||||
// and io.WriteCloser.
|
||||
func NewDirectIO(ctx context.Context, fifos *FIFOSet) (*DirectIO, error) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
pipes, err := openFifos(ctx, fifos)
|
||||
return &DirectIO{
|
||||
pipes: pipes,
|
||||
cio: cio{
|
||||
config: fifos.Config,
|
||||
closers: append(pipes.closers(), fifos),
|
||||
cancel: cancel,
|
||||
},
|
||||
}, err
|
||||
}
|
158
src/runtime/vendor/github.com/containerd/containerd/cio/io_windows.go
generated
vendored
158
src/runtime/vendor/github.com/containerd/containerd/cio/io_windows.go
generated
vendored
@ -1,158 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cio
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const pipeRoot = `\\.\pipe`
|
||||
|
||||
// NewFIFOSetInDir returns a new set of fifos for the task
|
||||
func NewFIFOSetInDir(_, id string, terminal bool) (*FIFOSet, error) {
|
||||
stderrPipe := ""
|
||||
if !terminal {
|
||||
stderrPipe = fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id)
|
||||
}
|
||||
return NewFIFOSet(Config{
|
||||
Terminal: terminal,
|
||||
Stdin: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id),
|
||||
Stdout: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id),
|
||||
Stderr: stderrPipe,
|
||||
}, nil), nil
|
||||
}
|
||||
|
||||
func copyIO(fifos *FIFOSet, ioset *Streams) (_ *cio, retErr error) {
|
||||
cios := &cio{config: fifos.Config}
|
||||
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
_ = cios.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if fifos.Stdin != "" {
|
||||
l, err := winio.ListenPipe(fifos.Stdin, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin)
|
||||
}
|
||||
cios.closers = append(cios.closers, l)
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
log.L.WithError(err).Errorf("failed to accept stdin connection on %s", fifos.Stdin)
|
||||
return
|
||||
}
|
||||
|
||||
p := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(p)
|
||||
|
||||
io.CopyBuffer(c, ioset.Stdin, *p)
|
||||
c.Close()
|
||||
l.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
if fifos.Stdout != "" {
|
||||
l, err := winio.ListenPipe(fifos.Stdout, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create stdout pipe %s", fifos.Stdout)
|
||||
}
|
||||
cios.closers = append(cios.closers, l)
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
log.L.WithError(err).Errorf("failed to accept stdout connection on %s", fifos.Stdout)
|
||||
return
|
||||
}
|
||||
|
||||
p := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(p)
|
||||
|
||||
io.CopyBuffer(ioset.Stdout, c, *p)
|
||||
c.Close()
|
||||
l.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
if fifos.Stderr != "" {
|
||||
l, err := winio.ListenPipe(fifos.Stderr, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr)
|
||||
}
|
||||
cios.closers = append(cios.closers, l)
|
||||
|
||||
go func() {
|
||||
c, err := l.Accept()
|
||||
if err != nil {
|
||||
log.L.WithError(err).Errorf("failed to accept stderr connection on %s", fifos.Stderr)
|
||||
return
|
||||
}
|
||||
|
||||
p := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(p)
|
||||
|
||||
io.CopyBuffer(ioset.Stderr, c, *p)
|
||||
c.Close()
|
||||
l.Close()
|
||||
}()
|
||||
}
|
||||
|
||||
return cios, nil
|
||||
}
|
||||
|
||||
// NewDirectIO returns an IO implementation that exposes the IO streams as io.ReadCloser
|
||||
// and io.WriteCloser.
|
||||
func NewDirectIO(stdin io.WriteCloser, stdout, stderr io.ReadCloser, terminal bool) *DirectIO {
|
||||
return &DirectIO{
|
||||
pipes: pipes{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
},
|
||||
cio: cio{
|
||||
config: Config{Terminal: terminal},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewDirectIOFromFIFOSet returns an IO implementation that exposes the IO streams as io.ReadCloser
|
||||
// and io.WriteCloser.
|
||||
func NewDirectIOFromFIFOSet(ctx context.Context, stdin io.WriteCloser, stdout, stderr io.ReadCloser, fifos *FIFOSet) *DirectIO {
|
||||
_, cancel := context.WithCancel(ctx)
|
||||
pipes := pipes{
|
||||
Stdin: stdin,
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
}
|
||||
return &DirectIO{
|
||||
pipes: pipes,
|
||||
cio: cio{
|
||||
config: fifos.Config,
|
||||
closers: append(pipes.closers(), fifos),
|
||||
cancel: cancel,
|
||||
},
|
||||
}
|
||||
}
|
833
src/runtime/vendor/github.com/containerd/containerd/client.go
generated
vendored
833
src/runtime/vendor/github.com/containerd/containerd/client.go
generated
vendored
@ -1,833 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
containersapi "github.com/containerd/containerd/api/services/containers/v1"
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
diffapi "github.com/containerd/containerd/api/services/diff/v1"
|
||||
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
||||
imagesapi "github.com/containerd/containerd/api/services/images/v1"
|
||||
introspectionapi "github.com/containerd/containerd/api/services/introspection/v1"
|
||||
leasesapi "github.com/containerd/containerd/api/services/leases/v1"
|
||||
namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1"
|
||||
snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1"
|
||||
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||
versionservice "github.com/containerd/containerd/api/services/version/v1"
|
||||
apitypes "github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
contentproxy "github.com/containerd/containerd/content/proxy"
|
||||
"github.com/containerd/containerd/defaults"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/leases"
|
||||
leasesproxy "github.com/containerd/containerd/leases/proxy"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/pkg/dialer"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/services/introspection"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
snproxy "github.com/containerd/containerd/snapshots/proxy"
|
||||
"github.com/containerd/typeurl"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
const prefix = "types.containerd.io"
|
||||
// register TypeUrls for commonly marshaled external types
|
||||
major := strconv.Itoa(specs.VersionMajor)
|
||||
typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
|
||||
typeurl.Register(&specs.Process{}, prefix, "opencontainers/runtime-spec", major, "Process")
|
||||
typeurl.Register(&specs.LinuxResources{}, prefix, "opencontainers/runtime-spec", major, "LinuxResources")
|
||||
typeurl.Register(&specs.WindowsResources{}, prefix, "opencontainers/runtime-spec", major, "WindowsResources")
|
||||
}
|
||||
|
||||
// New returns a new containerd client that is connected to the containerd
|
||||
// instance provided by address
|
||||
func New(address string, opts ...ClientOpt) (*Client, error) {
|
||||
var copts clientOpts
|
||||
for _, o := range opts {
|
||||
if err := o(&copts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if copts.timeout == 0 {
|
||||
copts.timeout = 10 * time.Second
|
||||
}
|
||||
|
||||
c := &Client{
|
||||
defaultns: copts.defaultns,
|
||||
}
|
||||
|
||||
if copts.defaultRuntime != "" {
|
||||
c.runtime = copts.defaultRuntime
|
||||
} else {
|
||||
c.runtime = defaults.DefaultRuntime
|
||||
}
|
||||
|
||||
if copts.defaultPlatform != nil {
|
||||
c.platform = copts.defaultPlatform
|
||||
} else {
|
||||
c.platform = platforms.Default()
|
||||
}
|
||||
|
||||
if copts.services != nil {
|
||||
c.services = *copts.services
|
||||
}
|
||||
if address != "" {
|
||||
backoffConfig := backoff.DefaultConfig
|
||||
backoffConfig.MaxDelay = 3 * time.Second
|
||||
connParams := grpc.ConnectParams{
|
||||
Backoff: backoffConfig,
|
||||
}
|
||||
gopts := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
grpc.WithInsecure(),
|
||||
grpc.FailOnNonTempDialError(true),
|
||||
grpc.WithConnectParams(connParams),
|
||||
grpc.WithContextDialer(dialer.ContextDialer),
|
||||
|
||||
// TODO(stevvooe): We may need to allow configuration of this on the client.
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
|
||||
}
|
||||
if len(copts.dialOptions) > 0 {
|
||||
gopts = copts.dialOptions
|
||||
}
|
||||
if copts.defaultns != "" {
|
||||
unary, stream := newNSInterceptors(copts.defaultns)
|
||||
gopts = append(gopts,
|
||||
grpc.WithUnaryInterceptor(unary),
|
||||
grpc.WithStreamInterceptor(stream),
|
||||
)
|
||||
}
|
||||
connector := func() (*grpc.ClientConn, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), copts.timeout)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, dialer.DialAddress(address), gopts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
conn, err := connector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.conn, c.connector = conn, connector
|
||||
}
|
||||
if copts.services == nil && c.conn == nil {
|
||||
return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available")
|
||||
}
|
||||
|
||||
// check namespace labels for default runtime
|
||||
if copts.defaultRuntime == "" && c.defaultns != "" {
|
||||
if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil {
|
||||
return nil, err
|
||||
} else if label != "" {
|
||||
c.runtime = label
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewWithConn returns a new containerd client that is connected to the containerd
|
||||
// instance provided by the connection
|
||||
func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
|
||||
var copts clientOpts
|
||||
for _, o := range opts {
|
||||
if err := o(&copts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
c := &Client{
|
||||
defaultns: copts.defaultns,
|
||||
conn: conn,
|
||||
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
|
||||
}
|
||||
|
||||
// check namespace labels for default runtime
|
||||
if copts.defaultRuntime == "" && c.defaultns != "" {
|
||||
if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil {
|
||||
return nil, err
|
||||
} else if label != "" {
|
||||
c.runtime = label
|
||||
}
|
||||
}
|
||||
|
||||
if copts.services != nil {
|
||||
c.services = *copts.services
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Client is the client to interact with containerd and its various services
|
||||
// using a uniform interface
|
||||
type Client struct {
|
||||
services
|
||||
connMu sync.Mutex
|
||||
conn *grpc.ClientConn
|
||||
runtime string
|
||||
defaultns string
|
||||
platform platforms.MatchComparer
|
||||
connector func() (*grpc.ClientConn, error)
|
||||
}
|
||||
|
||||
// Reconnect re-establishes the GRPC connection to the containerd daemon
|
||||
func (c *Client) Reconnect() error {
|
||||
if c.connector == nil {
|
||||
return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available")
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
c.conn.Close()
|
||||
conn, err := c.connector()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.conn = conn
|
||||
return nil
|
||||
}
|
||||
|
||||
// Runtime returns the name of the runtime being used
|
||||
func (c *Client) Runtime() string {
|
||||
return c.runtime
|
||||
}
|
||||
|
||||
// IsServing returns true if the client can successfully connect to the
|
||||
// containerd daemon and the healthcheck service returns the SERVING
|
||||
// response.
|
||||
// This call will block if a transient error is encountered during
|
||||
// connection. A timeout can be set in the context to ensure it returns
|
||||
// early.
|
||||
func (c *Client) IsServing(ctx context.Context) (bool, error) {
|
||||
c.connMu.Lock()
|
||||
if c.conn == nil {
|
||||
c.connMu.Unlock()
|
||||
return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return r.Status == grpc_health_v1.HealthCheckResponse_SERVING, nil
|
||||
}
|
||||
|
||||
// Containers returns all containers created in containerd
|
||||
func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container, error) {
|
||||
r, err := c.ContainerService().List(ctx, filters...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var out []Container
|
||||
for _, container := range r {
|
||||
out = append(out, containerFromRecord(c, container))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// NewContainer will create a new container in container with the provided id
|
||||
// the id must be unique within the namespace
|
||||
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
|
||||
ctx, done, err := c.WithLease(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer done(ctx)
|
||||
|
||||
container := containers.Container{
|
||||
ID: id,
|
||||
Runtime: containers.RuntimeInfo{
|
||||
Name: c.runtime,
|
||||
},
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, c, &container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
r, err := c.ContainerService().Create(ctx, container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return containerFromRecord(c, r), nil
|
||||
}
|
||||
|
||||
// LoadContainer loads an existing container from metadata
|
||||
func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) {
|
||||
r, err := c.ContainerService().Get(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return containerFromRecord(c, r), nil
|
||||
}
|
||||
|
||||
// RemoteContext is used to configure object resolutions and transfers with
|
||||
// remote content stores and image providers.
|
||||
type RemoteContext struct {
|
||||
// Resolver is used to resolve names to objects, fetchers, and pushers.
|
||||
// If no resolver is provided, defaults to Docker registry resolver.
|
||||
Resolver remotes.Resolver
|
||||
|
||||
// PlatformMatcher is used to match the platforms for an image
|
||||
// operation and define the preference when a single match is required
|
||||
// from multiple platforms.
|
||||
PlatformMatcher platforms.MatchComparer
|
||||
|
||||
// Unpack is done after an image is pulled to extract into a snapshotter.
|
||||
// It is done simultaneously for schema 2 images when they are pulled.
|
||||
// If an image is not unpacked on pull, it can be unpacked any time
|
||||
// afterwards. Unpacking is required to run an image.
|
||||
Unpack bool
|
||||
|
||||
// UnpackOpts handles options to the unpack call.
|
||||
UnpackOpts []UnpackOpt
|
||||
|
||||
// Snapshotter used for unpacking
|
||||
Snapshotter string
|
||||
|
||||
// SnapshotterOpts are additional options to be passed to a snapshotter during pull
|
||||
SnapshotterOpts []snapshots.Opt
|
||||
|
||||
// Labels to be applied to the created image
|
||||
Labels map[string]string
|
||||
|
||||
// BaseHandlers are a set of handlers which get are called on dispatch.
|
||||
// These handlers always get called before any operation specific
|
||||
// handlers.
|
||||
BaseHandlers []images.Handler
|
||||
|
||||
// HandlerWrapper wraps the handler which gets sent to dispatch.
|
||||
// Unlike BaseHandlers, this can run before and after the built
|
||||
// in handlers, allowing operations to run on the descriptor
|
||||
// after it has completed transferring.
|
||||
HandlerWrapper func(images.Handler) images.Handler
|
||||
|
||||
// ConvertSchema1 is whether to convert Docker registry schema 1
|
||||
// manifests. If this option is false then any image which resolves
|
||||
// to schema 1 will return an error since schema 1 is not supported.
|
||||
ConvertSchema1 bool
|
||||
|
||||
// Platforms defines which platforms to handle when doing the image operation.
|
||||
// Platforms is ignored when a PlatformMatcher is set, otherwise the
|
||||
// platforms will be used to create a PlatformMatcher with no ordering
|
||||
// preference.
|
||||
Platforms []string
|
||||
|
||||
// MaxConcurrentDownloads is the max concurrent content downloads for each pull.
|
||||
MaxConcurrentDownloads int
|
||||
|
||||
// MaxConcurrentUploadedLayers is the max concurrent uploaded layers for each push.
|
||||
MaxConcurrentUploadedLayers int
|
||||
|
||||
// AllMetadata downloads all manifests and known-configuration files
|
||||
AllMetadata bool
|
||||
|
||||
// ChildLabelMap sets the labels used to reference child objects in the content
|
||||
// store. By default, all GC reference labels will be set for all fetched content.
|
||||
ChildLabelMap func(ocispec.Descriptor) []string
|
||||
}
|
||||
|
||||
func defaultRemoteContext() *RemoteContext {
|
||||
return &RemoteContext{
|
||||
Resolver: docker.NewResolver(docker.ResolverOptions{
|
||||
Client: http.DefaultClient,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch downloads the provided content into containerd's content store
|
||||
// and returns a non-platform specific image reference
|
||||
func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (images.Image, error) {
|
||||
fetchCtx := defaultRemoteContext()
|
||||
for _, o := range opts {
|
||||
if err := o(c, fetchCtx); err != nil {
|
||||
return images.Image{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if fetchCtx.Unpack {
|
||||
return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull")
|
||||
}
|
||||
|
||||
if fetchCtx.PlatformMatcher == nil {
|
||||
if len(fetchCtx.Platforms) == 0 {
|
||||
fetchCtx.PlatformMatcher = platforms.All
|
||||
} else {
|
||||
var ps []ocispec.Platform
|
||||
for _, s := range fetchCtx.Platforms {
|
||||
p, err := platforms.Parse(s)
|
||||
if err != nil {
|
||||
return images.Image{}, errors.Wrapf(err, "invalid platform %s", s)
|
||||
}
|
||||
ps = append(ps, p)
|
||||
}
|
||||
|
||||
fetchCtx.PlatformMatcher = platforms.Any(ps...)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, done, err := c.WithLease(ctx)
|
||||
if err != nil {
|
||||
return images.Image{}, err
|
||||
}
|
||||
defer done(ctx)
|
||||
|
||||
img, err := c.fetch(ctx, fetchCtx, ref, 0)
|
||||
if err != nil {
|
||||
return images.Image{}, err
|
||||
}
|
||||
return c.createNewImage(ctx, img)
|
||||
}
|
||||
|
||||
// Push uploads the provided content to a remote resource
|
||||
func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, opts ...RemoteOpt) error {
|
||||
pushCtx := defaultRemoteContext()
|
||||
for _, o := range opts {
|
||||
if err := o(c, pushCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if pushCtx.PlatformMatcher == nil {
|
||||
if len(pushCtx.Platforms) > 0 {
|
||||
var ps []ocispec.Platform
|
||||
for _, platform := range pushCtx.Platforms {
|
||||
p, err := platforms.Parse(platform)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "invalid platform %s", platform)
|
||||
}
|
||||
ps = append(ps, p)
|
||||
}
|
||||
pushCtx.PlatformMatcher = platforms.Any(ps...)
|
||||
} else {
|
||||
pushCtx.PlatformMatcher = platforms.All
|
||||
}
|
||||
}
|
||||
|
||||
// Annotate ref with digest to push only push tag for single digest
|
||||
if !strings.Contains(ref, "@") {
|
||||
ref = ref + "@" + desc.Digest.String()
|
||||
}
|
||||
|
||||
pusher, err := pushCtx.Resolver.Pusher(ctx, ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wrapper func(images.Handler) images.Handler
|
||||
|
||||
if len(pushCtx.BaseHandlers) > 0 {
|
||||
wrapper = func(h images.Handler) images.Handler {
|
||||
h = images.Handlers(append(pushCtx.BaseHandlers, h)...)
|
||||
if pushCtx.HandlerWrapper != nil {
|
||||
h = pushCtx.HandlerWrapper(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
} else if pushCtx.HandlerWrapper != nil {
|
||||
wrapper = pushCtx.HandlerWrapper
|
||||
}
|
||||
|
||||
var limiter *semaphore.Weighted
|
||||
if pushCtx.MaxConcurrentUploadedLayers > 0 {
|
||||
limiter = semaphore.NewWeighted(int64(pushCtx.MaxConcurrentUploadedLayers))
|
||||
}
|
||||
|
||||
return remotes.PushContent(ctx, pusher, desc, c.ContentStore(), limiter, pushCtx.PlatformMatcher, wrapper)
|
||||
}
|
||||
|
||||
// GetImage returns an existing image
|
||||
func (c *Client) GetImage(ctx context.Context, ref string) (Image, error) {
|
||||
i, err := c.ImageService().Get(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewImage(c, i), nil
|
||||
}
|
||||
|
||||
// ListImages returns all existing images
|
||||
func (c *Client) ListImages(ctx context.Context, filters ...string) ([]Image, error) {
|
||||
imgs, err := c.ImageService().List(ctx, filters...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
images := make([]Image, len(imgs))
|
||||
for i, img := range imgs {
|
||||
images[i] = NewImage(c, img)
|
||||
}
|
||||
return images, nil
|
||||
}
|
||||
|
||||
// Restore restores a container from a checkpoint
|
||||
func (c *Client) Restore(ctx context.Context, id string, checkpoint Image, opts ...RestoreOpts) (Container, error) {
|
||||
store := c.ContentStore()
|
||||
index, err := decodeIndex(ctx, store, checkpoint.Target())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, done, err := c.WithLease(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer done(ctx)
|
||||
|
||||
copts := []NewContainerOpts{}
|
||||
for _, o := range opts {
|
||||
copts = append(copts, o(ctx, id, c, checkpoint, index))
|
||||
}
|
||||
|
||||
ctr, err := c.NewContainer(ctx, id, copts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ctr, nil
|
||||
}
|
||||
|
||||
func writeIndex(ctx context.Context, index *ocispec.Index, client *Client, ref string) (d ocispec.Descriptor, err error) {
|
||||
labels := map[string]string{}
|
||||
for i, m := range index.Manifests {
|
||||
labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = m.Digest.String()
|
||||
}
|
||||
data, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
return writeContent(ctx, client.ContentStore(), ocispec.MediaTypeImageIndex, ref, bytes.NewReader(data), content.WithLabels(labels))
|
||||
}
|
||||
|
||||
// GetLabel gets a label value from namespace store
|
||||
// If there is no default label, an empty string returned with nil error
|
||||
func (c *Client) GetLabel(ctx context.Context, label string) (string, error) {
|
||||
ns, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
if c.defaultns == "" {
|
||||
return "", err
|
||||
}
|
||||
ns = c.defaultns
|
||||
}
|
||||
|
||||
srv := c.NamespaceService()
|
||||
labels, err := srv.Labels(ctx, ns)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
value := labels[label]
|
||||
return value, nil
|
||||
}
|
||||
|
||||
// Subscribe to events that match one or more of the provided filters.
|
||||
//
|
||||
// Callers should listen on both the envelope and errs channels. If the errs
|
||||
// channel returns nil or an error, the subscriber should terminate.
|
||||
//
|
||||
// The subscriber can stop receiving events by canceling the provided context.
|
||||
// The errs channel will be closed and return a nil error.
|
||||
func (c *Client) Subscribe(ctx context.Context, filters ...string) (ch <-chan *events.Envelope, errs <-chan error) {
|
||||
return c.EventService().Subscribe(ctx, filters...)
|
||||
}
|
||||
|
||||
// Close closes the clients connection to containerd
|
||||
func (c *Client) Close() error {
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
if c.conn != nil {
|
||||
return c.conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NamespaceService returns the underlying Namespaces Store
|
||||
func (c *Client) NamespaceService() namespaces.Store {
|
||||
if c.namespaceStore != nil {
|
||||
return c.namespaceStore
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return NewNamespaceStoreFromClient(namespacesapi.NewNamespacesClient(c.conn))
|
||||
}
|
||||
|
||||
// ContainerService returns the underlying container Store
|
||||
func (c *Client) ContainerService() containers.Store {
|
||||
if c.containerStore != nil {
|
||||
return c.containerStore
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return NewRemoteContainerStore(containersapi.NewContainersClient(c.conn))
|
||||
}
|
||||
|
||||
// ContentStore returns the underlying content Store
|
||||
func (c *Client) ContentStore() content.Store {
|
||||
if c.contentStore != nil {
|
||||
return c.contentStore
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return contentproxy.NewContentStore(contentapi.NewContentClient(c.conn))
|
||||
}
|
||||
|
||||
// SnapshotService returns the underlying snapshotter for the provided snapshotter name
|
||||
func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter {
|
||||
snapshotterName, err := c.resolveSnapshotterName(context.Background(), snapshotterName)
|
||||
if err != nil {
|
||||
snapshotterName = DefaultSnapshotter
|
||||
}
|
||||
if c.snapshotters != nil {
|
||||
return c.snapshotters[snapshotterName]
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return snproxy.NewSnapshotter(snapshotsapi.NewSnapshotsClient(c.conn), snapshotterName)
|
||||
}
|
||||
|
||||
// TaskService returns the underlying TasksClient
|
||||
func (c *Client) TaskService() tasks.TasksClient {
|
||||
if c.taskService != nil {
|
||||
return c.taskService
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return tasks.NewTasksClient(c.conn)
|
||||
}
|
||||
|
||||
// ImageService returns the underlying image Store
|
||||
func (c *Client) ImageService() images.Store {
|
||||
if c.imageStore != nil {
|
||||
return c.imageStore
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return NewImageStoreFromClient(imagesapi.NewImagesClient(c.conn))
|
||||
}
|
||||
|
||||
// DiffService returns the underlying Differ
|
||||
func (c *Client) DiffService() DiffService {
|
||||
if c.diffService != nil {
|
||||
return c.diffService
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn))
|
||||
}
|
||||
|
||||
// IntrospectionService returns the underlying Introspection Client
|
||||
func (c *Client) IntrospectionService() introspection.Service {
|
||||
if c.introspectionService != nil {
|
||||
return c.introspectionService
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return introspection.NewIntrospectionServiceFromClient(introspectionapi.NewIntrospectionClient(c.conn))
|
||||
}
|
||||
|
||||
// LeasesService returns the underlying Leases Client
|
||||
func (c *Client) LeasesService() leases.Manager {
|
||||
if c.leasesService != nil {
|
||||
return c.leasesService
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return leasesproxy.NewLeaseManager(leasesapi.NewLeasesClient(c.conn))
|
||||
}
|
||||
|
||||
// HealthService returns the underlying GRPC HealthClient
|
||||
func (c *Client) HealthService() grpc_health_v1.HealthClient {
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return grpc_health_v1.NewHealthClient(c.conn)
|
||||
}
|
||||
|
||||
// EventService returns the underlying event service
|
||||
func (c *Client) EventService() EventService {
|
||||
if c.eventService != nil {
|
||||
return c.eventService
|
||||
}
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return NewEventServiceFromClient(eventsapi.NewEventsClient(c.conn))
|
||||
}
|
||||
|
||||
// VersionService returns the underlying VersionClient
|
||||
func (c *Client) VersionService() versionservice.VersionClient {
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return versionservice.NewVersionClient(c.conn)
|
||||
}
|
||||
|
||||
// Conn returns the underlying GRPC connection object
|
||||
func (c *Client) Conn() *grpc.ClientConn {
|
||||
c.connMu.Lock()
|
||||
defer c.connMu.Unlock()
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Version of containerd
|
||||
type Version struct {
|
||||
// Version number
|
||||
Version string
|
||||
// Revision from git that was built
|
||||
Revision string
|
||||
}
|
||||
|
||||
// Version returns the version of containerd that the client is connected to
|
||||
func (c *Client) Version(ctx context.Context) (Version, error) {
|
||||
c.connMu.Lock()
|
||||
if c.conn == nil {
|
||||
c.connMu.Unlock()
|
||||
return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
return Version{
|
||||
Version: response.Version,
|
||||
Revision: response.Revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ServerInfo represents the introspected server information
|
||||
type ServerInfo struct {
|
||||
UUID string
|
||||
}
|
||||
|
||||
// Server returns server information from the introspection service
|
||||
func (c *Client) Server(ctx context.Context) (ServerInfo, error) {
|
||||
c.connMu.Lock()
|
||||
if c.conn == nil {
|
||||
c.connMu.Unlock()
|
||||
return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
|
||||
}
|
||||
c.connMu.Unlock()
|
||||
|
||||
response, err := c.IntrospectionService().Server(ctx, &ptypes.Empty{})
|
||||
if err != nil {
|
||||
return ServerInfo{}, err
|
||||
}
|
||||
return ServerInfo{
|
||||
UUID: response.UUID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) resolveSnapshotterName(ctx context.Context, name string) (string, error) {
|
||||
if name == "" {
|
||||
label, err := c.GetLabel(ctx, defaults.DefaultSnapshotterNSLabel)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if label != "" {
|
||||
name = label
|
||||
} else {
|
||||
name = DefaultSnapshotter
|
||||
}
|
||||
}
|
||||
|
||||
return name, nil
|
||||
}
|
||||
|
||||
func (c *Client) getSnapshotter(ctx context.Context, name string) (snapshots.Snapshotter, error) {
|
||||
name, err := c.resolveSnapshotterName(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := c.SnapshotService(name)
|
||||
if s == nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "snapshotter %s was not found", name)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// CheckRuntime returns true if the current runtime matches the expected
|
||||
// runtime. Providing various parts of the runtime schema will match those
|
||||
// parts of the expected runtime
|
||||
func CheckRuntime(current, expected string) bool {
|
||||
cp := strings.Split(current, ".")
|
||||
l := len(cp)
|
||||
for i, p := range strings.Split(expected, ".") {
|
||||
if i > l {
|
||||
return false
|
||||
}
|
||||
if p != cp[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetSnapshotterSupportedPlatforms returns a platform matchers which represents the
|
||||
// supported platforms for the given snapshotters
|
||||
func (c *Client) GetSnapshotterSupportedPlatforms(ctx context.Context, snapshotterName string) (platforms.MatchComparer, error) {
|
||||
filters := []string{fmt.Sprintf("type==%s, id==%s", plugin.SnapshotPlugin, snapshotterName)}
|
||||
in := c.IntrospectionService()
|
||||
|
||||
resp, err := in.Plugins(ctx, filters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(resp.Plugins) <= 0 {
|
||||
return nil, fmt.Errorf("inspection service could not find snapshotter %s plugin", snapshotterName)
|
||||
}
|
||||
|
||||
sn := resp.Plugins[0]
|
||||
snPlatforms := toPlatforms(sn.Platforms)
|
||||
return platforms.Any(snPlatforms...), nil
|
||||
}
|
||||
|
||||
func toPlatforms(pt []apitypes.Platform) []ocispec.Platform {
|
||||
platforms := make([]ocispec.Platform, len(pt))
|
||||
for i, p := range pt {
|
||||
platforms[i] = ocispec.Platform{
|
||||
Architecture: p.Architecture,
|
||||
OS: p.OS,
|
||||
Variant: p.Variant,
|
||||
}
|
||||
}
|
||||
return platforms
|
||||
}
|
245
src/runtime/vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
245
src/runtime/vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
@ -1,245 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type clientOpts struct {
|
||||
defaultns string
|
||||
defaultRuntime string
|
||||
defaultPlatform platforms.MatchComparer
|
||||
services *services
|
||||
dialOptions []grpc.DialOption
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// ClientOpt allows callers to set options on the containerd client
|
||||
type ClientOpt func(c *clientOpts) error
|
||||
|
||||
// WithDefaultNamespace sets the default namespace on the client
|
||||
//
|
||||
// Any operation that does not have a namespace set on the context will
|
||||
// be provided the default namespace
|
||||
func WithDefaultNamespace(ns string) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.defaultns = ns
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDefaultRuntime sets the default runtime on the client
|
||||
func WithDefaultRuntime(rt string) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.defaultRuntime = rt
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDefaultPlatform sets the default platform matcher on the client
|
||||
func WithDefaultPlatform(platform platforms.MatchComparer) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.defaultPlatform = platform
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDialOpts allows grpc.DialOptions to be set on the connection
|
||||
func WithDialOpts(opts []grpc.DialOption) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.dialOptions = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithServices sets services used by the client.
|
||||
func WithServices(opts ...ServicesOpt) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.services = &services{}
|
||||
for _, o := range opts {
|
||||
o(c.services)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTimeout sets the connection timeout for the client
|
||||
func WithTimeout(d time.Duration) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.timeout = d
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteOpt allows the caller to set distribution options for a remote
|
||||
type RemoteOpt func(*Client, *RemoteContext) error
|
||||
|
||||
// WithPlatform allows the caller to specify a platform to retrieve
|
||||
// content for
|
||||
func WithPlatform(platform string) RemoteOpt {
|
||||
if platform == "" {
|
||||
platform = platforms.DefaultString()
|
||||
}
|
||||
return func(_ *Client, c *RemoteContext) error {
|
||||
for _, p := range c.Platforms {
|
||||
if p == platform {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
c.Platforms = append(c.Platforms, platform)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPlatformMatcher specifies the matcher to use for
|
||||
// determining which platforms to pull content for.
|
||||
// This value supersedes anything set with `WithPlatform`.
|
||||
func WithPlatformMatcher(m platforms.MatchComparer) RemoteOpt {
|
||||
return func(_ *Client, c *RemoteContext) error {
|
||||
c.PlatformMatcher = m
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPullUnpack is used to unpack an image after pull. This
|
||||
// uses the snapshotter, content store, and diff service
|
||||
// configured for the client.
|
||||
func WithPullUnpack(_ *Client, c *RemoteContext) error {
|
||||
c.Unpack = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithUnpackOpts is used to add unpack options to the unpacker.
|
||||
func WithUnpackOpts(opts []UnpackOpt) RemoteOpt {
|
||||
return func(_ *Client, c *RemoteContext) error {
|
||||
c.UnpackOpts = append(c.UnpackOpts, opts...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPullSnapshotter specifies snapshotter name used for unpacking.
|
||||
func WithPullSnapshotter(snapshotterName string, opts ...snapshots.Opt) RemoteOpt {
|
||||
return func(_ *Client, c *RemoteContext) error {
|
||||
c.Snapshotter = snapshotterName
|
||||
c.SnapshotterOpts = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPullLabel sets a label to be associated with a pulled reference
|
||||
func WithPullLabel(key, value string) RemoteOpt {
|
||||
return func(_ *Client, rc *RemoteContext) error {
|
||||
if rc.Labels == nil {
|
||||
rc.Labels = make(map[string]string)
|
||||
}
|
||||
|
||||
rc.Labels[key] = value
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPullLabels associates a set of labels to a pulled reference
|
||||
func WithPullLabels(labels map[string]string) RemoteOpt {
|
||||
return func(_ *Client, rc *RemoteContext) error {
|
||||
if rc.Labels == nil {
|
||||
rc.Labels = make(map[string]string)
|
||||
}
|
||||
|
||||
for k, v := range labels {
|
||||
rc.Labels[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithChildLabelMap sets the map function used to define the labels set
|
||||
// on referenced child content in the content store. This can be used
|
||||
// to overwrite the default GC labels or filter which labels get set
|
||||
// for content.
|
||||
// The default is `images.ChildGCLabels`.
|
||||
func WithChildLabelMap(fn func(ocispec.Descriptor) []string) RemoteOpt {
|
||||
return func(_ *Client, c *RemoteContext) error {
|
||||
c.ChildLabelMap = fn
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSchema1Conversion is used to convert Docker registry schema 1
|
||||
// manifests to oci manifests on pull. Without this option schema 1
|
||||
// manifests will return a not supported error.
|
||||
func WithSchema1Conversion(client *Client, c *RemoteContext) error {
|
||||
c.ConvertSchema1 = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithResolver specifies the resolver to use.
|
||||
func WithResolver(resolver remotes.Resolver) RemoteOpt {
|
||||
return func(client *Client, c *RemoteContext) error {
|
||||
c.Resolver = resolver
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImageHandler adds a base handler to be called on dispatch.
|
||||
func WithImageHandler(h images.Handler) RemoteOpt {
|
||||
return func(client *Client, c *RemoteContext) error {
|
||||
c.BaseHandlers = append(c.BaseHandlers, h)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImageHandlerWrapper wraps the handlers to be called on dispatch.
|
||||
func WithImageHandlerWrapper(w func(images.Handler) images.Handler) RemoteOpt {
|
||||
return func(client *Client, c *RemoteContext) error {
|
||||
c.HandlerWrapper = w
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxConcurrentDownloads sets max concurrent download limit.
|
||||
func WithMaxConcurrentDownloads(max int) RemoteOpt {
|
||||
return func(client *Client, c *RemoteContext) error {
|
||||
c.MaxConcurrentDownloads = max
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxConcurrentUploadedLayers sets max concurrent uploaded layer limit.
|
||||
func WithMaxConcurrentUploadedLayers(max int) RemoteOpt {
|
||||
return func(client *Client, c *RemoteContext) error {
|
||||
c.MaxConcurrentUploadedLayers = max
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAllMetadata downloads all manifests and known-configuration files
|
||||
func WithAllMetadata() RemoteOpt {
|
||||
return func(_ *Client, c *RemoteContext) error {
|
||||
c.AllMetadata = true
|
||||
return nil
|
||||
}
|
||||
}
|
3
src/runtime/vendor/github.com/containerd/containerd/code-of-conduct.md
generated
vendored
3
src/runtime/vendor/github.com/containerd/containerd/code-of-conduct.md
generated
vendored
@ -1,3 +0,0 @@
|
||||
## containerd Community Code of Conduct
|
||||
|
||||
containerd follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
1
src/runtime/vendor/github.com/containerd/containerd/codecov.yml
generated
vendored
1
src/runtime/vendor/github.com/containerd/containerd/codecov.yml
generated
vendored
@ -1 +0,0 @@
|
||||
comment: false
|
461
src/runtime/vendor/github.com/containerd/containerd/container.go
generated
vendored
461
src/runtime/vendor/github.com/containerd/containerd/container.go
generated
vendored
@ -1,461 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||
"github.com/containerd/containerd/api/types"
|
||||
tasktypes "github.com/containerd/containerd/api/types/task"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/containerd/containerd/runtime/v2/runc/options"
|
||||
"github.com/containerd/fifo"
|
||||
"github.com/containerd/typeurl"
|
||||
prototypes "github.com/gogo/protobuf/types"
|
||||
ver "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
checkpointImageNameLabel = "org.opencontainers.image.ref.name"
|
||||
checkpointRuntimeNameLabel = "io.containerd.checkpoint.runtime"
|
||||
checkpointSnapshotterNameLabel = "io.containerd.checkpoint.snapshotter"
|
||||
)
|
||||
|
||||
// Container is a metadata object for container resources and task creation
|
||||
type Container interface {
|
||||
// ID identifies the container
|
||||
ID() string
|
||||
// Info returns the underlying container record type
|
||||
Info(context.Context, ...InfoOpts) (containers.Container, error)
|
||||
// Delete removes the container
|
||||
Delete(context.Context, ...DeleteOpts) error
|
||||
// NewTask creates a new task based on the container metadata
|
||||
NewTask(context.Context, cio.Creator, ...NewTaskOpts) (Task, error)
|
||||
// Spec returns the OCI runtime specification
|
||||
Spec(context.Context) (*oci.Spec, error)
|
||||
// Task returns the current task for the container
|
||||
//
|
||||
// If cio.Attach options are passed the client will reattach to the IO for the running
|
||||
// task. If no task exists for the container a NotFound error is returned
|
||||
//
|
||||
// Clients must make sure that only one reader is attached to the task and consuming
|
||||
// the output from the task's fifos
|
||||
Task(context.Context, cio.Attach) (Task, error)
|
||||
// Image returns the image that the container is based on
|
||||
Image(context.Context) (Image, error)
|
||||
// Labels returns the labels set on the container
|
||||
Labels(context.Context) (map[string]string, error)
|
||||
// SetLabels sets the provided labels for the container and returns the final label set
|
||||
SetLabels(context.Context, map[string]string) (map[string]string, error)
|
||||
// Extensions returns the extensions set on the container
|
||||
Extensions(context.Context) (map[string]prototypes.Any, error)
|
||||
// Update a container
|
||||
Update(context.Context, ...UpdateContainerOpts) error
|
||||
// Checkpoint creates a checkpoint image of the current container
|
||||
Checkpoint(context.Context, string, ...CheckpointOpts) (Image, error)
|
||||
}
|
||||
|
||||
func containerFromRecord(client *Client, c containers.Container) *container {
|
||||
return &container{
|
||||
client: client,
|
||||
id: c.ID,
|
||||
metadata: c,
|
||||
}
|
||||
}
|
||||
|
||||
var _ = (Container)(&container{})
|
||||
|
||||
type container struct {
|
||||
client *Client
|
||||
id string
|
||||
metadata containers.Container
|
||||
}
|
||||
|
||||
// ID returns the container's unique id
|
||||
func (c *container) ID() string {
|
||||
return c.id
|
||||
}
|
||||
|
||||
func (c *container) Info(ctx context.Context, opts ...InfoOpts) (containers.Container, error) {
|
||||
i := &InfoConfig{
|
||||
// default to refreshing the container's local metadata
|
||||
Refresh: true,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(i)
|
||||
}
|
||||
if i.Refresh {
|
||||
metadata, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return c.metadata, err
|
||||
}
|
||||
c.metadata = metadata
|
||||
}
|
||||
return c.metadata, nil
|
||||
}
|
||||
|
||||
func (c *container) Extensions(ctx context.Context) (map[string]prototypes.Any, error) {
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Extensions, nil
|
||||
}
|
||||
|
||||
func (c *container) Labels(ctx context.Context) (map[string]string, error) {
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Labels, nil
|
||||
}
|
||||
|
||||
func (c *container) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
|
||||
container := containers.Container{
|
||||
ID: c.id,
|
||||
Labels: labels,
|
||||
}
|
||||
|
||||
var paths []string
|
||||
// mask off paths so we only muck with the labels encountered in labels.
|
||||
// Labels not in the passed in argument will be left alone.
|
||||
for k := range labels {
|
||||
paths = append(paths, strings.Join([]string{"labels", k}, "."))
|
||||
}
|
||||
|
||||
r, err := c.client.ContainerService().Update(ctx, container, paths...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Labels, nil
|
||||
}
|
||||
|
||||
// Spec returns the current OCI specification for the container
|
||||
func (c *container) Spec(ctx context.Context) (*oci.Spec, error) {
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var s oci.Spec
|
||||
if err := json.Unmarshal(r.Spec.Value, &s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// Delete deletes an existing container
|
||||
// an error is returned if the container has running tasks
|
||||
func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) error {
|
||||
if _, err := c.loadTask(ctx, nil); err == nil {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.id)
|
||||
}
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, c.client, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return c.client.ContainerService().Delete(ctx, c.id)
|
||||
}
|
||||
|
||||
func (c *container) Task(ctx context.Context, attach cio.Attach) (Task, error) {
|
||||
return c.loadTask(ctx, attach)
|
||||
}
|
||||
|
||||
// Image returns the image that the container is based on
|
||||
func (c *container) Image(ctx context.Context) (Image, error) {
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.Image == "" {
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "container not created from an image")
|
||||
}
|
||||
i, err := c.client.ImageService().Get(ctx, r.Image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get image %s for container", r.Image)
|
||||
}
|
||||
return NewImage(c.client, i), nil
|
||||
}
|
||||
|
||||
func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...NewTaskOpts) (_ Task, err error) {
|
||||
i, err := ioCreate(c.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && i != nil {
|
||||
i.Cancel()
|
||||
i.Close()
|
||||
}
|
||||
}()
|
||||
cfg := i.Config()
|
||||
request := &tasks.CreateTaskRequest{
|
||||
ContainerID: c.id,
|
||||
Terminal: cfg.Terminal,
|
||||
Stdin: cfg.Stdin,
|
||||
Stdout: cfg.Stdout,
|
||||
Stderr: cfg.Stderr,
|
||||
}
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.SnapshotKey != "" {
|
||||
if r.Snapshotter == "" {
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "unable to resolve rootfs mounts without snapshotter on container")
|
||||
}
|
||||
|
||||
// get the rootfs from the snapshotter and add it to the request
|
||||
s, err := c.client.getSnapshotter(ctx, r.Snapshotter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mounts, err := s.Mounts(ctx, r.SnapshotKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
spec, err := c.Spec(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, m := range mounts {
|
||||
if spec.Linux != nil && spec.Linux.MountLabel != "" {
|
||||
context := label.FormatMountLabel("", spec.Linux.MountLabel)
|
||||
if context != "" {
|
||||
m.Options = append(m.Options, context)
|
||||
}
|
||||
}
|
||||
request.Rootfs = append(request.Rootfs, &types.Mount{
|
||||
Type: m.Type,
|
||||
Source: m.Source,
|
||||
Options: m.Options,
|
||||
})
|
||||
}
|
||||
}
|
||||
info := TaskInfo{
|
||||
runtime: r.Runtime.Name,
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, c.client, &info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if info.RootFS != nil {
|
||||
for _, m := range info.RootFS {
|
||||
request.Rootfs = append(request.Rootfs, &types.Mount{
|
||||
Type: m.Type,
|
||||
Source: m.Source,
|
||||
Options: m.Options,
|
||||
})
|
||||
}
|
||||
}
|
||||
if info.Options != nil {
|
||||
any, err := typeurl.MarshalAny(info.Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
request.Options = any
|
||||
}
|
||||
t := &task{
|
||||
client: c.client,
|
||||
io: i,
|
||||
id: c.id,
|
||||
c: c,
|
||||
}
|
||||
if info.Checkpoint != nil {
|
||||
request.Checkpoint = info.Checkpoint
|
||||
}
|
||||
response, err := c.client.TaskService().Create(ctx, request)
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
t.pid = response.Pid
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (c *container) Update(ctx context.Context, opts ...UpdateContainerOpts) error {
|
||||
// fetch the current container config before updating it
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, c.client, &r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := c.client.ContainerService().Update(ctx, r); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *container) Checkpoint(ctx context.Context, ref string, opts ...CheckpointOpts) (Image, error) {
|
||||
index := &ocispec.Index{
|
||||
Versioned: ver.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Annotations: make(map[string]string),
|
||||
}
|
||||
copts := &options.CheckpointOptions{
|
||||
Exit: false,
|
||||
OpenTcp: false,
|
||||
ExternalUnixSockets: false,
|
||||
Terminal: false,
|
||||
FileLocks: true,
|
||||
EmptyNamespaces: nil,
|
||||
}
|
||||
info, err := c.Info(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
img, err := c.Image(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, done, err := c.client.WithLease(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer done(ctx)
|
||||
|
||||
// add image name to manifest
|
||||
index.Annotations[checkpointImageNameLabel] = img.Name()
|
||||
// add runtime info to index
|
||||
index.Annotations[checkpointRuntimeNameLabel] = info.Runtime.Name
|
||||
// add snapshotter info to index
|
||||
index.Annotations[checkpointSnapshotterNameLabel] = info.Snapshotter
|
||||
|
||||
// process remaining opts
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, c.client, &info, index, copts); err != nil {
|
||||
err = errdefs.FromGRPC(err)
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
desc, err := writeIndex(ctx, index, c.client, c.ID()+"index")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
i := images.Image{
|
||||
Name: ref,
|
||||
Target: desc,
|
||||
}
|
||||
checkpoint, err := c.client.ImageService().Create(ctx, i)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewImage(c.client, checkpoint), nil
|
||||
}
|
||||
|
||||
func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, error) {
|
||||
response, err := c.client.TaskService().Get(ctx, &tasks.GetRequest{
|
||||
ContainerID: c.id,
|
||||
})
|
||||
if err != nil {
|
||||
err = errdefs.FromGRPC(err)
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "no running task found")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var i cio.IO
|
||||
if ioAttach != nil && response.Process.Status != tasktypes.StatusUnknown {
|
||||
// Do not attach IO for task in unknown state, because there
|
||||
// are no fifo paths anyway.
|
||||
if i, err = attachExistingIO(response, ioAttach); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
t := &task{
|
||||
client: c.client,
|
||||
io: i,
|
||||
id: response.Process.ID,
|
||||
pid: response.Process.Pid,
|
||||
c: c,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (c *container) get(ctx context.Context) (containers.Container, error) {
|
||||
return c.client.ContainerService().Get(ctx, c.id)
|
||||
}
|
||||
|
||||
// get the existing fifo paths from the task information stored by the daemon
|
||||
func attachExistingIO(response *tasks.GetResponse, ioAttach cio.Attach) (cio.IO, error) {
|
||||
fifoSet := loadFifos(response)
|
||||
return ioAttach(fifoSet)
|
||||
}
|
||||
|
||||
// loadFifos loads the containers fifos
|
||||
func loadFifos(response *tasks.GetResponse) *cio.FIFOSet {
|
||||
fifos := []string{
|
||||
response.Process.Stdin,
|
||||
response.Process.Stdout,
|
||||
response.Process.Stderr,
|
||||
}
|
||||
closer := func() error {
|
||||
var (
|
||||
err error
|
||||
dirs = map[string]struct{}{}
|
||||
)
|
||||
for _, f := range fifos {
|
||||
if isFifo, _ := fifo.IsFifo(f); isFifo {
|
||||
if rerr := os.Remove(f); err == nil {
|
||||
err = rerr
|
||||
}
|
||||
dirs[filepath.Dir(f)] = struct{}{}
|
||||
}
|
||||
}
|
||||
for dir := range dirs {
|
||||
// we ignore errors here because we don't
|
||||
// want to remove the directory if it isn't
|
||||
// empty
|
||||
os.Remove(dir)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return cio.NewFIFOSet(cio.Config{
|
||||
Stdin: response.Process.Stdin,
|
||||
Stdout: response.Process.Stdout,
|
||||
Stderr: response.Process.Stderr,
|
||||
Terminal: response.Process.Terminal,
|
||||
}, closer)
|
||||
}
|
156
src/runtime/vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
156
src/runtime/vendor/github.com/containerd/containerd/container_checkpoint_opts.go
generated
vendored
@ -1,156 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
tasks "github.com/containerd/containerd/api/services/tasks/v1"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/diff"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/containerd/containerd/runtime/v2/runc/options"
|
||||
"github.com/containerd/typeurl"
|
||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCheckpointRWUnsupported is returned if the container runtime does not support checkpoint
|
||||
ErrCheckpointRWUnsupported = errors.New("rw checkpoint is only supported on v2 runtimes")
|
||||
// ErrMediaTypeNotFound returns an error when a media type in the manifest is unknown
|
||||
ErrMediaTypeNotFound = errors.New("media type not found")
|
||||
)
|
||||
|
||||
// CheckpointOpts are options to manage the checkpoint operation
|
||||
type CheckpointOpts func(context.Context, *Client, *containers.Container, *imagespec.Index, *options.CheckpointOptions) error
|
||||
|
||||
// WithCheckpointImage includes the container image in the checkpoint
|
||||
func WithCheckpointImage(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
|
||||
ir, err := client.ImageService().Get(ctx, c.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
index.Manifests = append(index.Manifests, ir.Target)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithCheckpointTask includes the running task
|
||||
func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
|
||||
any, err := typeurl.MarshalAny(copts)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
task, err := client.TaskService().Checkpoint(ctx, &tasks.CheckpointTaskRequest{
|
||||
ContainerID: c.ID,
|
||||
Options: any,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, d := range task.Descriptors {
|
||||
platformSpec := platforms.DefaultSpec()
|
||||
index.Manifests = append(index.Manifests, imagespec.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Size: d.Size_,
|
||||
Digest: d.Digest,
|
||||
Platform: &platformSpec,
|
||||
Annotations: d.Annotations,
|
||||
})
|
||||
}
|
||||
// save copts
|
||||
data, err := any.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bytes.NewReader(data)
|
||||
desc, err := writeContent(ctx, client.ContentStore(), images.MediaTypeContainerd1CheckpointOptions, c.ID+"-checkpoint-options", r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
desc.Platform = &imagespec.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
}
|
||||
index.Manifests = append(index.Manifests, desc)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithCheckpointRuntime includes the container runtime info
|
||||
func WithCheckpointRuntime(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
|
||||
if c.Runtime.Options != nil {
|
||||
data, err := c.Runtime.Options.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := bytes.NewReader(data)
|
||||
desc, err := writeContent(ctx, client.ContentStore(), images.MediaTypeContainerd1CheckpointRuntimeOptions, c.ID+"-runtime-options", r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
desc.Platform = &imagespec.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
}
|
||||
index.Manifests = append(index.Manifests, desc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithCheckpointRW includes the rw in the checkpoint
|
||||
func WithCheckpointRW(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
|
||||
diffOpts := []diff.Opt{
|
||||
diff.WithReference(fmt.Sprintf("checkpoint-rw-%s", c.SnapshotKey)),
|
||||
}
|
||||
rw, err := rootfs.CreateDiff(ctx,
|
||||
c.SnapshotKey,
|
||||
client.SnapshotService(c.Snapshotter),
|
||||
client.DiffService(),
|
||||
diffOpts...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
rw.Platform = &imagespec.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
}
|
||||
index.Manifests = append(index.Manifests, rw)
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithCheckpointTaskExit causes the task to exit after checkpoint
|
||||
func WithCheckpointTaskExit(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error {
|
||||
copts.Exit = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetIndexByMediaType returns the index in a manifest for the specified media type
|
||||
func GetIndexByMediaType(index *imagespec.Index, mt string) (*imagespec.Descriptor, error) {
|
||||
for _, d := range index.Manifests {
|
||||
if d.MediaType == mt {
|
||||
return &d, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrMediaTypeNotFound
|
||||
}
|
288
src/runtime/vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
288
src/runtime/vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
@ -1,288 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/containerd/typeurl"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DeleteOpts allows the caller to set options for the deletion of a container
|
||||
type DeleteOpts func(ctx context.Context, client *Client, c containers.Container) error
|
||||
|
||||
// NewContainerOpts allows the caller to set additional options when creating a container
|
||||
type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
|
||||
|
||||
// UpdateContainerOpts allows the caller to set additional options when updating a container
|
||||
type UpdateContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
|
||||
|
||||
// InfoOpts controls how container metadata is fetched and returned
|
||||
type InfoOpts func(*InfoConfig)
|
||||
|
||||
// InfoConfig specifies how container metadata is fetched
|
||||
type InfoConfig struct {
|
||||
// Refresh will to a fetch of the latest container metadata
|
||||
Refresh bool
|
||||
}
|
||||
|
||||
// WithRuntime allows a user to specify the runtime name and additional options that should
|
||||
// be used to create tasks for the container
|
||||
func WithRuntime(name string, options interface{}) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
var (
|
||||
any *types.Any
|
||||
err error
|
||||
)
|
||||
if options != nil {
|
||||
any, err = typeurl.MarshalAny(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.Runtime = containers.RuntimeInfo{
|
||||
Name: name,
|
||||
Options: any,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImage sets the provided image as the base for the container
|
||||
func WithImage(i Image) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImageName allows setting the image name as the base for the container
|
||||
func WithImageName(n string) NewContainerOpts {
|
||||
return func(ctx context.Context, _ *Client, c *containers.Container) error {
|
||||
c.Image = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithContainerLabels sets the provided labels to the container.
|
||||
// The existing labels are cleared.
|
||||
// Use WithAdditionalContainerLabels to preserve the existing labels.
|
||||
func WithContainerLabels(labels map[string]string) NewContainerOpts {
|
||||
return func(_ context.Context, _ *Client, c *containers.Container) error {
|
||||
c.Labels = labels
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAdditionalContainerLabels adds the provided labels to the container
|
||||
// The existing labels are preserved as long as they do not conflict with the added labels.
|
||||
func WithAdditionalContainerLabels(labels map[string]string) NewContainerOpts {
|
||||
return func(_ context.Context, _ *Client, c *containers.Container) error {
|
||||
if c.Labels == nil {
|
||||
c.Labels = labels
|
||||
return nil
|
||||
}
|
||||
for k, v := range labels {
|
||||
c.Labels[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImageStopSignal sets a well-known containerd label (StopSignalLabel)
|
||||
// on the container for storing the stop signal specified in the OCI image
|
||||
// config
|
||||
func WithImageStopSignal(image Image, defaultSignal string) NewContainerOpts {
|
||||
return func(ctx context.Context, _ *Client, c *containers.Container) error {
|
||||
if c.Labels == nil {
|
||||
c.Labels = make(map[string]string)
|
||||
}
|
||||
stopSignal, err := GetOCIStopSignal(ctx, image, defaultSignal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Labels[StopSignalLabel] = stopSignal
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSnapshotter sets the provided snapshotter for use by the container
|
||||
//
|
||||
// This option must appear before other snapshotter options to have an effect.
|
||||
func WithSnapshotter(name string) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
c.Snapshotter = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSnapshot uses an existing root filesystem for the container
|
||||
func WithSnapshot(id string) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
// check that the snapshot exists, if not, fail on creation
|
||||
var err error
|
||||
c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.Mounts(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = id
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNewSnapshot allocates a new snapshot to be used by the container as the
|
||||
// root filesystem in read-write mode
|
||||
func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
diffIDs, err := i.RootFS(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.Prepare(ctx, id, parent, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSnapshotCleanup deletes the rootfs snapshot allocated for the container
|
||||
func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
|
||||
if c.SnapshotKey != "" {
|
||||
if c.Snapshotter == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
|
||||
}
|
||||
s, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.Remove(ctx, c.SnapshotKey); err != nil && !errdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithNewSnapshotView allocates a new snapshot to be used by the container as the
|
||||
// root filesystem in read-only mode
|
||||
func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.View(ctx, id, parent, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithContainerExtension appends extension data to the container object.
|
||||
// Use this to decorate the container object with additional data for the client
|
||||
// integration.
|
||||
//
|
||||
// Make sure to register the type of `extension` in the typeurl package via
|
||||
// `typeurl.Register` or container creation may fail.
|
||||
func WithContainerExtension(name string, extension interface{}) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
if name == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "extension key must not be zero-length")
|
||||
}
|
||||
|
||||
any, err := typeurl.MarshalAny(extension)
|
||||
if err != nil {
|
||||
if errors.Is(err, typeurl.ErrNotFound) {
|
||||
return errors.Wrapf(err, "extension %q is not registered with the typeurl package, see `typeurl.Register`", name)
|
||||
}
|
||||
return errors.Wrap(err, "error marshalling extension")
|
||||
}
|
||||
|
||||
if c.Extensions == nil {
|
||||
c.Extensions = make(map[string]types.Any)
|
||||
}
|
||||
c.Extensions[name] = *any
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNewSpec generates a new spec for a new container
|
||||
func WithNewSpec(opts ...oci.SpecOpts) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
s, err := oci.GenerateSpec(ctx, client, c, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Spec, err = typeurl.MarshalAny(s)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// WithSpec sets the provided spec on the container
|
||||
func WithSpec(s *oci.Spec, opts ...oci.SpecOpts) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
if err := oci.ApplyOpts(ctx, client, c, s, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
c.Spec, err = typeurl.MarshalAny(s)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// WithoutRefreshedMetadata will use the current metadata attached to the container object
|
||||
func WithoutRefreshedMetadata(i *InfoConfig) {
|
||||
i.Refresh = false
|
||||
}
|
116
src/runtime/vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
116
src/runtime/vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
@ -1,116 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
)
|
||||
|
||||
// WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
|
||||
// filesystem to be used by a container with user namespaces
|
||||
func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {
|
||||
return withRemappedSnapshotBase(id, i, uid, gid, false)
|
||||
}
|
||||
|
||||
// WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only.
|
||||
func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts {
|
||||
return withRemappedSnapshotBase(id, i, uid, gid, true)
|
||||
}
|
||||
|
||||
func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
parent = identity.ChainID(diffIDs).String()
|
||||
usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
|
||||
)
|
||||
c.Snapshotter, err = client.resolveSnapshotterName(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
snapshotter, err := client.getSnapshotter(ctx, c.Snapshotter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.Stat(ctx, usernsID); err == nil {
|
||||
if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil {
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
} else if !errdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := remapRootFS(ctx, mounts, uid, gid); err != nil {
|
||||
snapshotter.Remove(ctx, usernsID)
|
||||
return err
|
||||
}
|
||||
if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil {
|
||||
return err
|
||||
}
|
||||
if readonly {
|
||||
_, err = snapshotter.View(ctx, id, usernsID)
|
||||
} else {
|
||||
_, err = snapshotter.Prepare(ctx, id, usernsID)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func remapRootFS(ctx context.Context, mounts []mount.Mount, uid, gid uint32) error {
|
||||
return mount.WithTempMount(ctx, mounts, func(root string) error {
|
||||
return filepath.Walk(root, incrementFS(root, uid, gid))
|
||||
})
|
||||
}
|
||||
|
||||
func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
stat = info.Sys().(*syscall.Stat_t)
|
||||
u, g = int(stat.Uid + uidInc), int(stat.Gid + gidInc)
|
||||
)
|
||||
// be sure the lchown the path as to not de-reference the symlink to a host file
|
||||
return os.Lchown(path, u, g)
|
||||
}
|
||||
}
|
149
src/runtime/vendor/github.com/containerd/containerd/container_restore_opts.go
generated
vendored
149
src/runtime/vendor/github.com/containerd/containerd/container_restore_opts.go
generated
vendored
@ -1,149 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
imagespec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrImageNameNotFoundInIndex is returned when the image name is not found in the index
|
||||
ErrImageNameNotFoundInIndex = errors.New("image name not found in index")
|
||||
// ErrRuntimeNameNotFoundInIndex is returned when the runtime is not found in the index
|
||||
ErrRuntimeNameNotFoundInIndex = errors.New("runtime not found in index")
|
||||
// ErrSnapshotterNameNotFoundInIndex is returned when the snapshotter is not found in the index
|
||||
ErrSnapshotterNameNotFoundInIndex = errors.New("snapshotter not found in index")
|
||||
)
|
||||
|
||||
// RestoreOpts are options to manage the restore operation
|
||||
type RestoreOpts func(context.Context, string, *Client, Image, *imagespec.Index) NewContainerOpts
|
||||
|
||||
// WithRestoreImage restores the image for the container
|
||||
func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint Image, index *imagespec.Index) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
name, ok := index.Annotations[checkpointImageNameLabel]
|
||||
if !ok || name == "" {
|
||||
return ErrRuntimeNameNotFoundInIndex
|
||||
}
|
||||
snapshotter, ok := index.Annotations[checkpointSnapshotterNameLabel]
|
||||
if !ok || name == "" {
|
||||
return ErrSnapshotterNameNotFoundInIndex
|
||||
}
|
||||
i, err := client.GetImage(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
if _, err := client.SnapshotService(snapshotter).Prepare(ctx, id, parent); err != nil {
|
||||
return err
|
||||
}
|
||||
c.Image = i.Name()
|
||||
c.SnapshotKey = id
|
||||
c.Snapshotter = snapshotter
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRestoreRuntime restores the runtime for the container
|
||||
func WithRestoreRuntime(ctx context.Context, id string, client *Client, checkpoint Image, index *imagespec.Index) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
name, ok := index.Annotations[checkpointRuntimeNameLabel]
|
||||
if !ok {
|
||||
return ErrRuntimeNameNotFoundInIndex
|
||||
}
|
||||
|
||||
// restore options if present
|
||||
m, err := GetIndexByMediaType(index, images.MediaTypeContainerd1CheckpointRuntimeOptions)
|
||||
if err != nil {
|
||||
if err != ErrMediaTypeNotFound {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var options ptypes.Any
|
||||
if m != nil {
|
||||
store := client.ContentStore()
|
||||
data, err := content.ReadBlob(ctx, store, *m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to read checkpoint runtime")
|
||||
}
|
||||
if err := proto.Unmarshal(data, &options); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
c.Runtime = containers.RuntimeInfo{
|
||||
Name: name,
|
||||
Options: &options,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRestoreSpec restores the spec from the checkpoint for the container
|
||||
func WithRestoreSpec(ctx context.Context, id string, client *Client, checkpoint Image, index *imagespec.Index) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
m, err := GetIndexByMediaType(index, images.MediaTypeContainerd1CheckpointConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
store := client.ContentStore()
|
||||
data, err := content.ReadBlob(ctx, store, *m)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to read checkpoint config")
|
||||
}
|
||||
var any ptypes.Any
|
||||
if err := proto.Unmarshal(data, &any); err != nil {
|
||||
return err
|
||||
}
|
||||
c.Spec = &any
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRestoreRW restores the rw layer from the checkpoint for the container
|
||||
func WithRestoreRW(ctx context.Context, id string, client *Client, checkpoint Image, index *imagespec.Index) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
// apply rw layer
|
||||
rw, err := GetIndexByMediaType(index, imagespec.MediaTypeImageLayerGzip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, c.SnapshotKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := client.DiffService().Apply(ctx, *rw, mounts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
40
src/runtime/vendor/github.com/containerd/containerd/containerd.service
generated
vendored
40
src/runtime/vendor/github.com/containerd/containerd/containerd.service
generated
vendored
@ -1,40 +0,0 @@
|
||||
# Copyright The containerd Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[Unit]
|
||||
Description=containerd container runtime
|
||||
Documentation=https://containerd.io
|
||||
After=network.target local-fs.target
|
||||
|
||||
[Service]
|
||||
ExecStartPre=-/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/containerd
|
||||
|
||||
Type=notify
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
LimitNOFILE=infinity
|
||||
# Comment TasksMax if your systemd version does not supports it.
|
||||
# Only systemd 226 and above support this version.
|
||||
TasksMax=infinity
|
||||
OOMScoreAdjust=-999
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
112
src/runtime/vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
112
src/runtime/vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
@ -1,112 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
||||
// Container represents the set of data pinned by a container. Unless otherwise
|
||||
// noted, the resources here are considered in use by the container.
|
||||
//
|
||||
// The resources specified in this object are used to create tasks from the container.
|
||||
type Container struct {
|
||||
// ID uniquely identifies the container in a namespace.
|
||||
//
|
||||
// This property is required and cannot be changed after creation.
|
||||
ID string
|
||||
|
||||
// Labels provide metadata extension for a container.
|
||||
//
|
||||
// These are optional and fully mutable.
|
||||
Labels map[string]string
|
||||
|
||||
// Image specifies the image reference used for a container.
|
||||
//
|
||||
// This property is optional and mutable.
|
||||
Image string
|
||||
|
||||
// Runtime specifies which runtime should be used when launching container
|
||||
// tasks.
|
||||
//
|
||||
// This property is required and immutable.
|
||||
Runtime RuntimeInfo
|
||||
|
||||
// Spec should carry the runtime specification used to implement the
|
||||
// container.
|
||||
//
|
||||
// This field is required but mutable.
|
||||
Spec *types.Any
|
||||
|
||||
// SnapshotKey specifies the snapshot key to use for the container's root
|
||||
// filesystem. When starting a task from this container, a caller should
|
||||
// look up the mounts from the snapshot service and include those on the
|
||||
// task create request.
|
||||
//
|
||||
// This field is not required but mutable.
|
||||
SnapshotKey string
|
||||
|
||||
// Snapshotter specifies the snapshotter name used for rootfs
|
||||
//
|
||||
// This field is not required but immutable.
|
||||
Snapshotter string
|
||||
|
||||
// CreatedAt is the time at which the container was created.
|
||||
CreatedAt time.Time
|
||||
|
||||
// UpdatedAt is the time at which the container was updated.
|
||||
UpdatedAt time.Time
|
||||
|
||||
// Extensions stores client-specified metadata
|
||||
Extensions map[string]types.Any
|
||||
}
|
||||
|
||||
// RuntimeInfo holds runtime specific information
|
||||
type RuntimeInfo struct {
|
||||
Name string
|
||||
Options *types.Any
|
||||
}
|
||||
|
||||
// Store interacts with the underlying container storage
|
||||
type Store interface {
|
||||
// Get a container using the id.
|
||||
//
|
||||
// Container object is returned on success. If the id is not known to the
|
||||
// store, an error will be returned.
|
||||
Get(ctx context.Context, id string) (Container, error)
|
||||
|
||||
// List returns containers that match one or more of the provided filters.
|
||||
List(ctx context.Context, filters ...string) ([]Container, error)
|
||||
|
||||
// Create a container in the store from the provided container.
|
||||
Create(ctx context.Context, container Container) (Container, error)
|
||||
|
||||
// Update the container with the provided container object. ID must be set.
|
||||
//
|
||||
// If one or more fieldpaths are provided, only the field corresponding to
|
||||
// the fieldpaths will be mutated.
|
||||
Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error)
|
||||
|
||||
// Delete a container using the id.
|
||||
//
|
||||
// nil will be returned on success. If the container is not known to the
|
||||
// store, ErrNotFound will be returned.
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
196
src/runtime/vendor/github.com/containerd/containerd/containerstore.go
generated
vendored
196
src/runtime/vendor/github.com/containerd/containerd/containerstore.go
generated
vendored
@ -1,196 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
containersapi "github.com/containerd/containerd/api/services/containers/v1"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type remoteContainers struct {
|
||||
client containersapi.ContainersClient
|
||||
}
|
||||
|
||||
var _ containers.Store = &remoteContainers{}
|
||||
|
||||
// NewRemoteContainerStore returns the container Store connected with the provided client
|
||||
func NewRemoteContainerStore(client containersapi.ContainersClient) containers.Store {
|
||||
return &remoteContainers{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *remoteContainers) Get(ctx context.Context, id string) (containers.Container, error) {
|
||||
resp, err := r.client.Get(ctx, &containersapi.GetContainerRequest{
|
||||
ID: id,
|
||||
})
|
||||
if err != nil {
|
||||
return containers.Container{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return containerFromProto(&resp.Container), nil
|
||||
}
|
||||
|
||||
func (r *remoteContainers) List(ctx context.Context, filters ...string) ([]containers.Container, error) {
|
||||
containers, err := r.stream(ctx, filters...)
|
||||
if err != nil {
|
||||
if err == errStreamNotAvailable {
|
||||
return r.list(ctx, filters...)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return containers, nil
|
||||
}
|
||||
|
||||
func (r *remoteContainers) list(ctx context.Context, filters ...string) ([]containers.Container, error) {
|
||||
resp, err := r.client.List(ctx, &containersapi.ListContainersRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
return containersFromProto(resp.Containers), nil
|
||||
}
|
||||
|
||||
var errStreamNotAvailable = errors.New("streaming api not available")
|
||||
|
||||
func (r *remoteContainers) stream(ctx context.Context, filters ...string) ([]containers.Container, error) {
|
||||
session, err := r.client.ListStream(ctx, &containersapi.ListContainersRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
var containers []containers.Container
|
||||
for {
|
||||
c, err := session.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return containers, nil
|
||||
}
|
||||
if s, ok := status.FromError(err); ok {
|
||||
if s.Code() == codes.Unimplemented {
|
||||
return nil, errStreamNotAvailable
|
||||
}
|
||||
}
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return containers, ctx.Err()
|
||||
default:
|
||||
containers = append(containers, containerFromProto(c.Container))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *remoteContainers) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
|
||||
created, err := r.client.Create(ctx, &containersapi.CreateContainerRequest{
|
||||
Container: containerToProto(&container),
|
||||
})
|
||||
if err != nil {
|
||||
return containers.Container{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return containerFromProto(&created.Container), nil
|
||||
|
||||
}
|
||||
|
||||
func (r *remoteContainers) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) {
|
||||
var updateMask *ptypes.FieldMask
|
||||
if len(fieldpaths) > 0 {
|
||||
updateMask = &ptypes.FieldMask{
|
||||
Paths: fieldpaths,
|
||||
}
|
||||
}
|
||||
|
||||
updated, err := r.client.Update(ctx, &containersapi.UpdateContainerRequest{
|
||||
Container: containerToProto(&container),
|
||||
UpdateMask: updateMask,
|
||||
})
|
||||
if err != nil {
|
||||
return containers.Container{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return containerFromProto(&updated.Container), nil
|
||||
|
||||
}
|
||||
|
||||
func (r *remoteContainers) Delete(ctx context.Context, id string) error {
|
||||
_, err := r.client.Delete(ctx, &containersapi.DeleteContainerRequest{
|
||||
ID: id,
|
||||
})
|
||||
|
||||
return errdefs.FromGRPC(err)
|
||||
|
||||
}
|
||||
|
||||
func containerToProto(container *containers.Container) containersapi.Container {
|
||||
return containersapi.Container{
|
||||
ID: container.ID,
|
||||
Labels: container.Labels,
|
||||
Image: container.Image,
|
||||
Runtime: &containersapi.Container_Runtime{
|
||||
Name: container.Runtime.Name,
|
||||
Options: container.Runtime.Options,
|
||||
},
|
||||
Spec: container.Spec,
|
||||
Snapshotter: container.Snapshotter,
|
||||
SnapshotKey: container.SnapshotKey,
|
||||
Extensions: container.Extensions,
|
||||
}
|
||||
}
|
||||
|
||||
func containerFromProto(containerpb *containersapi.Container) containers.Container {
|
||||
var runtime containers.RuntimeInfo
|
||||
if containerpb.Runtime != nil {
|
||||
runtime = containers.RuntimeInfo{
|
||||
Name: containerpb.Runtime.Name,
|
||||
Options: containerpb.Runtime.Options,
|
||||
}
|
||||
}
|
||||
return containers.Container{
|
||||
ID: containerpb.ID,
|
||||
Labels: containerpb.Labels,
|
||||
Image: containerpb.Image,
|
||||
Runtime: runtime,
|
||||
Spec: containerpb.Spec,
|
||||
Snapshotter: containerpb.Snapshotter,
|
||||
SnapshotKey: containerpb.SnapshotKey,
|
||||
CreatedAt: containerpb.CreatedAt,
|
||||
UpdatedAt: containerpb.UpdatedAt,
|
||||
Extensions: containerpb.Extensions,
|
||||
}
|
||||
}
|
||||
|
||||
func containersFromProto(containerspb []containersapi.Container) []containers.Container {
|
||||
var containers []containers.Container
|
||||
|
||||
for _, container := range containerspb {
|
||||
containers = append(containers, containerFromProto(&container))
|
||||
}
|
||||
|
||||
return containers
|
||||
}
|
52
src/runtime/vendor/github.com/containerd/containerd/content/adaptor.go
generated
vendored
52
src/runtime/vendor/github.com/containerd/containerd/content/adaptor.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package content
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/filters"
|
||||
)
|
||||
|
||||
// AdaptInfo returns `filters.Adaptor` that handles `content.Info`.
|
||||
func AdaptInfo(info Info) filters.Adaptor {
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
if len(fieldpath) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
switch fieldpath[0] {
|
||||
case "digest":
|
||||
return info.Digest.String(), true
|
||||
case "size":
|
||||
// TODO: support size based filtering
|
||||
case "labels":
|
||||
return checkMap(fieldpath[1:], info.Labels)
|
||||
}
|
||||
|
||||
return "", false
|
||||
})
|
||||
}
|
||||
|
||||
func checkMap(fieldpath []string, m map[string]string) (string, bool) {
|
||||
if len(m) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
value, ok := m[strings.Join(fieldpath, ".")]
|
||||
return value, ok
|
||||
}
|
182
src/runtime/vendor/github.com/containerd/containerd/content/content.go
generated
vendored
182
src/runtime/vendor/github.com/containerd/containerd/content/content.go
generated
vendored
@ -1,182 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer
|
||||
type ReaderAt interface {
|
||||
io.ReaderAt
|
||||
io.Closer
|
||||
Size() int64
|
||||
}
|
||||
|
||||
// Provider provides a reader interface for specific content
|
||||
type Provider interface {
|
||||
// ReaderAt only requires desc.Digest to be set.
|
||||
// Other fields in the descriptor may be used internally for resolving
|
||||
// the location of the actual data.
|
||||
ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error)
|
||||
}
|
||||
|
||||
// Ingester writes content
|
||||
type Ingester interface {
|
||||
// Some implementations require WithRef to be included in opts.
|
||||
Writer(ctx context.Context, opts ...WriterOpt) (Writer, error)
|
||||
}
|
||||
|
||||
// Info holds content specific information
|
||||
//
|
||||
// TODO(stevvooe): Consider a very different name for this struct. Info is way
|
||||
// to general. It also reads very weird in certain context, like pluralization.
|
||||
type Info struct {
|
||||
Digest digest.Digest
|
||||
Size int64
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// Status of a content operation
|
||||
type Status struct {
|
||||
Ref string
|
||||
Offset int64
|
||||
Total int64
|
||||
Expected digest.Digest
|
||||
StartedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// WalkFunc defines the callback for a blob walk.
|
||||
type WalkFunc func(Info) error
|
||||
|
||||
// Manager provides methods for inspecting, listing and removing content.
|
||||
type Manager interface {
|
||||
// Info will return metadata about content available in the content store.
|
||||
//
|
||||
// If the content is not present, ErrNotFound will be returned.
|
||||
Info(ctx context.Context, dgst digest.Digest) (Info, error)
|
||||
|
||||
// Update updates mutable information related to content.
|
||||
// If one or more fieldpaths are provided, only those
|
||||
// fields will be updated.
|
||||
// Mutable fields:
|
||||
// labels.*
|
||||
Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
|
||||
|
||||
// Walk will call fn for each item in the content store which
|
||||
// match the provided filters. If no filters are given all
|
||||
// items will be walked.
|
||||
Walk(ctx context.Context, fn WalkFunc, filters ...string) error
|
||||
|
||||
// Delete removes the content from the store.
|
||||
Delete(ctx context.Context, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// IngestManager provides methods for managing ingests.
|
||||
type IngestManager interface {
|
||||
// Status returns the status of the provided ref.
|
||||
Status(ctx context.Context, ref string) (Status, error)
|
||||
|
||||
// ListStatuses returns the status of any active ingestions whose ref match the
|
||||
// provided regular expression. If empty, all active ingestions will be
|
||||
// returned.
|
||||
ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
|
||||
|
||||
// Abort completely cancels the ingest operation targeted by ref.
|
||||
Abort(ctx context.Context, ref string) error
|
||||
}
|
||||
|
||||
// Writer handles the write of content into a content store
|
||||
type Writer interface {
|
||||
// Close closes the writer, if the writer has not been
|
||||
// committed this allows resuming or aborting.
|
||||
// Calling Close on a closed writer will not error.
|
||||
io.WriteCloser
|
||||
|
||||
// Digest may return empty digest or panics until committed.
|
||||
Digest() digest.Digest
|
||||
|
||||
// Commit commits the blob (but no roll-back is guaranteed on an error).
|
||||
// size and expected can be zero-value when unknown.
|
||||
// Commit always closes the writer, even on error.
|
||||
// ErrAlreadyExists aborts the writer.
|
||||
Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error
|
||||
|
||||
// Status returns the current state of write
|
||||
Status() (Status, error)
|
||||
|
||||
// Truncate updates the size of the target blob
|
||||
Truncate(size int64) error
|
||||
}
|
||||
|
||||
// Store combines the methods of content-oriented interfaces into a set that
|
||||
// are commonly provided by complete implementations.
|
||||
type Store interface {
|
||||
Manager
|
||||
Provider
|
||||
IngestManager
|
||||
Ingester
|
||||
}
|
||||
|
||||
// Opt is used to alter the mutable properties of content
|
||||
type Opt func(*Info) error
|
||||
|
||||
// WithLabels allows labels to be set on content
|
||||
func WithLabels(labels map[string]string) Opt {
|
||||
return func(info *Info) error {
|
||||
info.Labels = labels
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WriterOpts is internally used by WriterOpt.
|
||||
type WriterOpts struct {
|
||||
Ref string
|
||||
Desc ocispec.Descriptor
|
||||
}
|
||||
|
||||
// WriterOpt is used for passing options to Ingester.Writer.
|
||||
type WriterOpt func(*WriterOpts) error
|
||||
|
||||
// WithDescriptor specifies an OCI descriptor.
|
||||
// Writer may optionally use the descriptor internally for resolving
|
||||
// the location of the actual data.
|
||||
// Write does not require any field of desc to be set.
|
||||
// If the data size is unknown, desc.Size should be set to 0.
|
||||
// Some implementations may also accept negative values as "unknown".
|
||||
func WithDescriptor(desc ocispec.Descriptor) WriterOpt {
|
||||
return func(opts *WriterOpts) error {
|
||||
opts.Desc = desc
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithRef specifies a ref string.
|
||||
func WithRef(ref string) WriterOpt {
|
||||
return func(opts *WriterOpts) error {
|
||||
opts.Ref = ref
|
||||
return nil
|
||||
}
|
||||
}
|
275
src/runtime/vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
275
src/runtime/vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@ -1,275 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 1<<20)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
// NewReader returns a io.Reader from a ReaderAt
|
||||
func NewReader(ra ReaderAt) io.Reader {
|
||||
rd := io.NewSectionReader(ra, 0, ra.Size())
|
||||
return rd
|
||||
}
|
||||
|
||||
// ReadBlob retrieves the entire contents of the blob from the provider.
|
||||
//
|
||||
// Avoid using this for large blobs, such as layers.
|
||||
func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) {
|
||||
ra, err := provider.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
p := make([]byte, ra.Size())
|
||||
|
||||
n, err := ra.ReadAt(p, 0)
|
||||
if err == io.EOF {
|
||||
if int64(n) != ra.Size() {
|
||||
err = io.ErrUnexpectedEOF
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return p, err
|
||||
}
|
||||
|
||||
// WriteBlob writes data with the expected digest into the content store. If
|
||||
// expected already exists, the method returns immediately and the reader will
|
||||
// not be consumed.
|
||||
//
|
||||
// This is useful when the digest and size are known beforehand.
|
||||
//
|
||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error {
|
||||
cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrap(err, "failed to open writer")
|
||||
}
|
||||
|
||||
return nil // all ready present
|
||||
}
|
||||
defer cw.Close()
|
||||
|
||||
return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...)
|
||||
}
|
||||
|
||||
// OpenWriter opens a new writer for the given reference, retrying if the writer
|
||||
// is locked until the reference is available or returns an error.
|
||||
func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) {
|
||||
var (
|
||||
cw Writer
|
||||
err error
|
||||
retry = 16
|
||||
)
|
||||
for {
|
||||
cw, err = cs.Writer(ctx, opts...)
|
||||
if err != nil {
|
||||
if !errdefs.IsUnavailable(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO: Check status to determine if the writer is active,
|
||||
// continue waiting while active, otherwise return lock
|
||||
// error or abort. Requires asserting for an ingest manager
|
||||
|
||||
select {
|
||||
case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))):
|
||||
if retry < 2048 {
|
||||
retry = retry << 1
|
||||
}
|
||||
continue
|
||||
case <-ctx.Done():
|
||||
// Propagate lock error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return cw, err
|
||||
}
|
||||
|
||||
// Copy copies data with the expected digest from the reader into the
|
||||
// provided content store writer. This copy commits the writer.
|
||||
//
|
||||
// This is useful when the digest and size are known beforehand. When
|
||||
// the size or digest is unknown, these values may be empty.
|
||||
//
|
||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get status")
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, size)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := copyWithBuffer(cw, r); err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
}
|
||||
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyReaderAt copies to a writer from a given reader at for the given
|
||||
// number of bytes. This copy does not commit the writer.
|
||||
func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
|
||||
return err
|
||||
}
|
||||
|
||||
// CopyReader copies to a writer from a given reader, returning
|
||||
// the number of bytes copied.
|
||||
// Note: if the writer has a non-zero offset, the total number
|
||||
// of bytes read may be greater than those copied if the reader
|
||||
// is not an io.Seeker.
|
||||
// This copy does not commit the writer.
|
||||
func CopyReader(cw Writer, r io.Reader) (int64, error) {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get status")
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, 0)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
return copyWithBuffer(cw, r)
|
||||
}
|
||||
|
||||
// seekReader attempts to seek the reader to the given offset, either by
|
||||
// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
|
||||
// up to the given offset.
|
||||
func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
||||
// attempt to resolve r as a seeker and setup the offset.
|
||||
seeker, ok := r.(io.Seeker)
|
||||
if ok {
|
||||
nn, err := seeker.Seek(offset, io.SeekStart)
|
||||
if nn != offset {
|
||||
return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ok, let's try io.ReaderAt!
|
||||
readerAt, ok := r.(io.ReaderAt)
|
||||
if ok && size > offset {
|
||||
sr := io.NewSectionReader(readerAt, offset, size)
|
||||
return sr, nil
|
||||
}
|
||||
|
||||
// well then, let's just discard up to the offset
|
||||
n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to discard to offset")
|
||||
}
|
||||
if n != offset {
|
||||
return nil, errors.Errorf("unable to discard to offset")
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer
|
||||
// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have
|
||||
// a full buffer before we do a write operation to dst to reduce overheads associated
|
||||
// with the write operations of small buffers.
|
||||
func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
// If the reader has a WriteTo method, use it to do the copy.
|
||||
// Avoids an allocation and a copy.
|
||||
if wt, ok := src.(io.WriterTo); ok {
|
||||
return wt.WriteTo(dst)
|
||||
}
|
||||
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
|
||||
if rt, ok := dst.(io.ReaderFrom); ok {
|
||||
return rt.ReadFrom(src)
|
||||
}
|
||||
bufRef := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(bufRef)
|
||||
buf := *bufRef
|
||||
for {
|
||||
nr, er := io.ReadAtLeast(src, buf, len(buf))
|
||||
if nr > 0 {
|
||||
nw, ew := dst.Write(buf[0:nr])
|
||||
if nw > 0 {
|
||||
written += int64(nw)
|
||||
}
|
||||
if ew != nil {
|
||||
err = ew
|
||||
break
|
||||
}
|
||||
if nr != nw {
|
||||
err = io.ErrShortWrite
|
||||
break
|
||||
}
|
||||
}
|
||||
if er != nil {
|
||||
// If an EOF happens after reading fewer than the requested bytes,
|
||||
// ReadAtLeast returns ErrUnexpectedEOF.
|
||||
if er != io.EOF && er != io.ErrUnexpectedEOF {
|
||||
err = er
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
71
src/runtime/vendor/github.com/containerd/containerd/content/proxy/content_reader.go
generated
vendored
71
src/runtime/vendor/github.com/containerd/containerd/content/proxy/content_reader.go
generated
vendored
@ -1,71 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type remoteReaderAt struct {
|
||||
ctx context.Context
|
||||
digest digest.Digest
|
||||
size int64
|
||||
client contentapi.ContentClient
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) Size() int64 {
|
||||
return ra.size
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
rr := &contentapi.ReadContentRequest{
|
||||
Digest: ra.digest,
|
||||
Offset: off,
|
||||
Size_: int64(len(p)),
|
||||
}
|
||||
// we need a child context with cancel, or the eventually called
|
||||
// grpc.NewStream will leak the goroutine until the whole thing is cleared.
|
||||
// See comment at https://godoc.org/google.golang.org/grpc#ClientConn.NewStream
|
||||
childCtx, cancel := context.WithCancel(ra.ctx)
|
||||
// we MUST cancel the child context; see comment above
|
||||
defer cancel()
|
||||
rc, err := ra.client.Read(childCtx, rr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for len(p) > 0 {
|
||||
var resp *contentapi.ReadContentResponse
|
||||
// fill our buffer up until we can fill p.
|
||||
resp, err = rc.Recv()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
copied := copy(p, resp.Data)
|
||||
n += copied
|
||||
p = p[copied:]
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) Close() error {
|
||||
return nil
|
||||
}
|
234
src/runtime/vendor/github.com/containerd/containerd/content/proxy/content_store.go
generated
vendored
234
src/runtime/vendor/github.com/containerd/containerd/content/proxy/content_store.go
generated
vendored
@ -1,234 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
protobuftypes "github.com/gogo/protobuf/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type proxyContentStore struct {
|
||||
client contentapi.ContentClient
|
||||
}
|
||||
|
||||
// NewContentStore returns a new content store which communicates over a GRPC
|
||||
// connection using the containerd content GRPC API.
|
||||
func NewContentStore(client contentapi.ContentClient) content.Store {
|
||||
return &proxyContentStore{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
||||
resp, err := pcs.client.Info(ctx, &contentapi.InfoRequest{
|
||||
Digest: dgst,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Info{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return infoFromGRPC(resp.Info), nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
|
||||
session, err := pcs.client.List(ctx, &contentapi.ListContentRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
for {
|
||||
msg, err := session.Recv()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
for _, info := range msg.Info {
|
||||
if err := fn(infoFromGRPC(info)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
if _, err := pcs.client.Delete(ctx, &contentapi.DeleteContentRequest{
|
||||
Digest: dgst,
|
||||
}); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReaderAt ignores MediaType.
|
||||
func (pcs *proxyContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
i, err := pcs.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &remoteReaderAt{
|
||||
ctx: ctx,
|
||||
digest: desc.Digest,
|
||||
size: i.Size,
|
||||
client: pcs.client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Status(ctx context.Context, ref string) (content.Status, error) {
|
||||
resp, err := pcs.client.Status(ctx, &contentapi.StatusRequest{
|
||||
Ref: ref,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
status := resp.Status
|
||||
return content.Status{
|
||||
Ref: status.Ref,
|
||||
StartedAt: status.StartedAt,
|
||||
UpdatedAt: status.UpdatedAt,
|
||||
Offset: status.Offset,
|
||||
Total: status.Total,
|
||||
Expected: status.Expected,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
||||
resp, err := pcs.client.Update(ctx, &contentapi.UpdateRequest{
|
||||
Info: infoToGRPC(info),
|
||||
UpdateMask: &protobuftypes.FieldMask{
|
||||
Paths: fieldpaths,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return content.Info{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
return infoFromGRPC(resp.Info), nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
|
||||
resp, err := pcs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
var statuses []content.Status
|
||||
for _, status := range resp.Statuses {
|
||||
statuses = append(statuses, content.Status{
|
||||
Ref: status.Ref,
|
||||
StartedAt: status.StartedAt,
|
||||
UpdatedAt: status.UpdatedAt,
|
||||
Offset: status.Offset,
|
||||
Total: status.Total,
|
||||
Expected: status.Expected,
|
||||
})
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
// Writer ignores MediaType.
|
||||
func (pcs *proxyContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
||||
var wOpts content.WriterOpts
|
||||
for _, opt := range opts {
|
||||
if err := opt(&wOpts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
wrclient, offset, err := pcs.negotiate(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return &remoteWriter{
|
||||
ref: wOpts.Ref,
|
||||
client: wrclient,
|
||||
offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Abort implements asynchronous abort. It starts a new write session on the ref l
|
||||
func (pcs *proxyContentStore) Abort(ctx context.Context, ref string) error {
|
||||
if _, err := pcs.client.Abort(ctx, &contentapi.AbortRequest{
|
||||
Ref: ref,
|
||||
}); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
|
||||
wrclient, err := pcs.client.Write(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if err := wrclient.Send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionStat,
|
||||
Ref: ref,
|
||||
Total: size,
|
||||
Expected: expected,
|
||||
}); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
resp, err := wrclient.Recv()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return wrclient, resp.Offset, nil
|
||||
}
|
||||
|
||||
func infoToGRPC(info content.Info) contentapi.Info {
|
||||
return contentapi.Info{
|
||||
Digest: info.Digest,
|
||||
Size_: info.Size,
|
||||
CreatedAt: info.CreatedAt,
|
||||
UpdatedAt: info.UpdatedAt,
|
||||
Labels: info.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
func infoFromGRPC(info contentapi.Info) content.Info {
|
||||
return content.Info{
|
||||
Digest: info.Digest,
|
||||
Size: info.Size_,
|
||||
CreatedAt: info.CreatedAt,
|
||||
UpdatedAt: info.UpdatedAt,
|
||||
Labels: info.Labels,
|
||||
}
|
||||
}
|
146
src/runtime/vendor/github.com/containerd/containerd/content/proxy/content_writer.go
generated
vendored
146
src/runtime/vendor/github.com/containerd/containerd/content/proxy/content_writer.go
generated
vendored
@ -1,146 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type remoteWriter struct {
|
||||
ref string
|
||||
client contentapi.Content_WriteClient
|
||||
offset int64
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
// send performs a synchronous req-resp cycle on the client.
|
||||
func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.WriteContentResponse, error) {
|
||||
if err := rw.client.Send(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := rw.client.Recv()
|
||||
|
||||
if err == nil {
|
||||
// try to keep these in sync
|
||||
if resp.Digest != "" {
|
||||
rw.digest = resp.Digest
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Status() (content.Status, error) {
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionStat,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status")
|
||||
}
|
||||
|
||||
return content.Status{
|
||||
Ref: rw.ref,
|
||||
Offset: resp.Offset,
|
||||
Total: resp.Total,
|
||||
StartedAt: resp.StartedAt,
|
||||
UpdatedAt: resp.UpdatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Digest() digest.Digest {
|
||||
return rw.digest
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
||||
offset := rw.offset
|
||||
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionWrite,
|
||||
Offset: offset,
|
||||
Data: p,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write")
|
||||
}
|
||||
|
||||
n = int(resp.Offset - offset)
|
||||
if n < len(p) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
|
||||
rw.offset += int64(n)
|
||||
if resp.Digest != "" {
|
||||
rw.digest = resp.Digest
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) (err error) {
|
||||
defer func() {
|
||||
err1 := rw.Close()
|
||||
if err == nil {
|
||||
err = err1
|
||||
}
|
||||
}()
|
||||
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionCommit,
|
||||
Total: size,
|
||||
Offset: rw.offset,
|
||||
Expected: expected,
|
||||
Labels: base.Labels,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(errdefs.FromGRPC(err), "commit failed")
|
||||
}
|
||||
|
||||
if size != 0 && resp.Offset != size {
|
||||
return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
|
||||
}
|
||||
|
||||
if expected != "" && resp.Digest != expected {
|
||||
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
||||
}
|
||||
|
||||
rw.digest = resp.Digest
|
||||
rw.offset = resp.Offset
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Truncate(size int64) error {
|
||||
// This truncation won't actually be validated until a write is issued.
|
||||
rw.offset = size
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Close() error {
|
||||
return rw.client.CloseSend()
|
||||
}
|
32
src/runtime/vendor/github.com/containerd/containerd/defaults/defaults.go
generated
vendored
32
src/runtime/vendor/github.com/containerd/containerd/defaults/defaults.go
generated
vendored
@ -1,32 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultMaxRecvMsgSize defines the default maximum message size for
|
||||
// receiving protobufs passed over the GRPC API.
|
||||
DefaultMaxRecvMsgSize = 16 << 20
|
||||
// DefaultMaxSendMsgSize defines the default maximum message size for
|
||||
// sending protobufs passed over the GRPC API.
|
||||
DefaultMaxSendMsgSize = 16 << 20
|
||||
// DefaultRuntimeNSLabel defines the namespace label to check for the
|
||||
// default runtime
|
||||
DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
|
||||
// DefaultSnapshotterNSLabel defines the namespace label to check for the
|
||||
// default snapshotter
|
||||
DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
|
||||
)
|
39
src/runtime/vendor/github.com/containerd/containerd/defaults/defaults_unix.go
generated
vendored
39
src/runtime/vendor/github.com/containerd/containerd/defaults/defaults_unix.go
generated
vendored
@ -1,39 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultRootDir is the default location used by containerd to store
|
||||
// persistent data
|
||||
DefaultRootDir = "/var/lib/containerd"
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = "/run/containerd"
|
||||
// DefaultAddress is the default unix socket address
|
||||
DefaultAddress = "/run/containerd/containerd.sock"
|
||||
// DefaultDebugAddress is the default unix socket address for pprof data
|
||||
DefaultDebugAddress = "/run/containerd/debug.sock"
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs.
|
||||
DefaultFIFODir = "/run/containerd/fifo"
|
||||
// DefaultRuntime is the default linux runtime
|
||||
DefaultRuntime = "io.containerd.runc.v2"
|
||||
// DefaultConfigDir is the default location for config files.
|
||||
DefaultConfigDir = "/etc/containerd"
|
||||
)
|
48
src/runtime/vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
48
src/runtime/vendor/github.com/containerd/containerd/defaults/defaults_windows.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultRootDir is the default location used by containerd to store
|
||||
// persistent data
|
||||
DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
|
||||
|
||||
// DefaultConfigDir is the default location for config files.
|
||||
DefaultConfigDir = filepath.Join(os.Getenv("programfiles"), "containerd")
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultAddress is the default winpipe address
|
||||
DefaultAddress = `\\.\pipe\containerd-containerd`
|
||||
// DefaultDebugAddress is the default winpipe address for pprof data
|
||||
DefaultDebugAddress = `\\.\pipe\containerd-debug`
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs. Unused on Windows.
|
||||
DefaultFIFODir = ""
|
||||
// DefaultRuntime is the default windows runtime
|
||||
DefaultRuntime = "io.containerd.runhcs.v1"
|
||||
)
|
19
src/runtime/vendor/github.com/containerd/containerd/defaults/doc.go
generated
vendored
19
src/runtime/vendor/github.com/containerd/containerd/defaults/doc.go
generated
vendored
@ -1,19 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package defaults provides several common defaults for interacting with
|
||||
// containerd. These can be used on the client-side or server-side.
|
||||
package defaults
|
116
src/runtime/vendor/github.com/containerd/containerd/diff.go
generated
vendored
116
src/runtime/vendor/github.com/containerd/containerd/diff.go
generated
vendored
@ -1,116 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
diffapi "github.com/containerd/containerd/api/services/diff/v1"
|
||||
"github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/diff"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/mount"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// DiffService handles the computation and application of diffs
|
||||
type DiffService interface {
|
||||
diff.Comparer
|
||||
diff.Applier
|
||||
}
|
||||
|
||||
// NewDiffServiceFromClient returns a new diff service which communicates
|
||||
// over a GRPC connection.
|
||||
func NewDiffServiceFromClient(client diffapi.DiffClient) DiffService {
|
||||
return &diffRemote{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
type diffRemote struct {
|
||||
client diffapi.DiffClient
|
||||
}
|
||||
|
||||
func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) {
|
||||
var config diff.ApplyConfig
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, desc, &config); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
req := &diffapi.ApplyRequest{
|
||||
Diff: fromDescriptor(desc),
|
||||
Mounts: fromMounts(mounts),
|
||||
Payloads: config.ProcessorPayloads,
|
||||
}
|
||||
resp, err := r.client.Apply(ctx, req)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
return toDescriptor(resp.Applied), nil
|
||||
}
|
||||
|
||||
func (r *diffRemote) Compare(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) {
|
||||
var config diff.Config
|
||||
for _, opt := range opts {
|
||||
if err := opt(&config); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
req := &diffapi.DiffRequest{
|
||||
Left: fromMounts(a),
|
||||
Right: fromMounts(b),
|
||||
MediaType: config.MediaType,
|
||||
Ref: config.Reference,
|
||||
Labels: config.Labels,
|
||||
}
|
||||
resp, err := r.client.Diff(ctx, req)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
return toDescriptor(resp.Diff), nil
|
||||
}
|
||||
|
||||
func toDescriptor(d *types.Descriptor) ocispec.Descriptor {
|
||||
return ocispec.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size: d.Size_,
|
||||
Annotations: d.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
func fromDescriptor(d ocispec.Descriptor) *types.Descriptor {
|
||||
return &types.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size_: d.Size,
|
||||
Annotations: d.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
func fromMounts(mounts []mount.Mount) []*types.Mount {
|
||||
apiMounts := make([]*types.Mount, len(mounts))
|
||||
for i, m := range mounts {
|
||||
apiMounts[i] = &types.Mount{
|
||||
Type: m.Type,
|
||||
Source: m.Source,
|
||||
Options: m.Options,
|
||||
}
|
||||
}
|
||||
return apiMounts
|
||||
}
|
107
src/runtime/vendor/github.com/containerd/containerd/diff/diff.go
generated
vendored
107
src/runtime/vendor/github.com/containerd/containerd/diff/diff.go
generated
vendored
@ -1,107 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Config is used to hold parameters needed for a diff operation
|
||||
type Config struct {
|
||||
// MediaType is the type of diff to generate
|
||||
// Default depends on the differ,
|
||||
// i.e. application/vnd.oci.image.layer.v1.tar+gzip
|
||||
MediaType string
|
||||
|
||||
// Reference is the content upload reference
|
||||
// Default will use a random reference string
|
||||
Reference string
|
||||
|
||||
// Labels are the labels to apply to the generated content
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// Opt is used to configure a diff operation
|
||||
type Opt func(*Config) error
|
||||
|
||||
// Comparer allows creation of filesystem diffs between mounts
|
||||
type Comparer interface {
|
||||
// Compare computes the difference between two mounts and returns a
|
||||
// descriptor for the computed diff. The options can provide
|
||||
// a ref which can be used to track the content creation of the diff.
|
||||
// The media type which is used to determine the format of the created
|
||||
// content can also be provided as an option.
|
||||
Compare(ctx context.Context, lower, upper []mount.Mount, opts ...Opt) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
// ApplyConfig is used to hold parameters needed for a apply operation
|
||||
type ApplyConfig struct {
|
||||
// ProcessorPayloads specifies the payload sent to various processors
|
||||
ProcessorPayloads map[string]*types.Any
|
||||
}
|
||||
|
||||
// ApplyOpt is used to configure an Apply operation
|
||||
type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error
|
||||
|
||||
// Applier allows applying diffs between mounts
|
||||
type Applier interface {
|
||||
// Apply applies the content referred to by the given descriptor to
|
||||
// the provided mount. The method of applying is based on the
|
||||
// implementation and content descriptor. For example, in the common
|
||||
// case the descriptor is a file system difference in tar format,
|
||||
// that tar would be applied on top of the mounts.
|
||||
Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount, opts ...ApplyOpt) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
// WithMediaType sets the media type to use for creating the diff, without
|
||||
// specifying the differ will choose a default.
|
||||
func WithMediaType(m string) Opt {
|
||||
return func(c *Config) error {
|
||||
c.MediaType = m
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithReference is used to set the content upload reference used by
|
||||
// the diff operation. This allows the caller to track the upload through
|
||||
// the content store.
|
||||
func WithReference(ref string) Opt {
|
||||
return func(c *Config) error {
|
||||
c.Reference = ref
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLabels is used to set content labels on the created diff content.
|
||||
func WithLabels(labels map[string]string) Opt {
|
||||
return func(c *Config) error {
|
||||
c.Labels = labels
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPayloads sets the apply processor payloads to the config
|
||||
func WithPayloads(payloads map[string]*types.Any) ApplyOpt {
|
||||
return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error {
|
||||
c.ProcessorPayloads = payloads
|
||||
return nil
|
||||
}
|
||||
}
|
191
src/runtime/vendor/github.com/containerd/containerd/diff/stream.go
generated
vendored
191
src/runtime/vendor/github.com/containerd/containerd/diff/stream.go
generated
vendored
@ -1,191 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
handlers []Handler
|
||||
|
||||
// ErrNoProcessor is returned when no stream processor is available for a media-type
|
||||
ErrNoProcessor = errors.New("no processor for media-type")
|
||||
)
|
||||
|
||||
func init() {
|
||||
// register the default compression handler
|
||||
RegisterProcessor(compressedHandler)
|
||||
}
|
||||
|
||||
// RegisterProcessor registers a stream processor for media-types
|
||||
func RegisterProcessor(handler Handler) {
|
||||
handlers = append(handlers, handler)
|
||||
}
|
||||
|
||||
// GetProcessor returns the processor for a media-type
|
||||
func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||
// reverse this list so that user configured handlers come up first
|
||||
for i := len(handlers) - 1; i >= 0; i-- {
|
||||
processor, ok := handlers[i](ctx, stream.MediaType())
|
||||
if ok {
|
||||
return processor(ctx, stream, payloads)
|
||||
}
|
||||
}
|
||||
return nil, ErrNoProcessor
|
||||
}
|
||||
|
||||
// Handler checks a media-type and initializes the processor
|
||||
type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool)
|
||||
|
||||
// StaticHandler returns the processor init func for a static media-type
|
||||
func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler {
|
||||
return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
|
||||
if mediaType == expectedMediaType {
|
||||
return fn, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
// StreamProcessorInit returns the initialized stream processor
|
||||
type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error)
|
||||
|
||||
// RawProcessor provides access to direct fd for processing
|
||||
type RawProcessor interface {
|
||||
// File returns the fd for the read stream of the underlying processor
|
||||
File() *os.File
|
||||
}
|
||||
|
||||
// StreamProcessor handles processing a content stream and transforming it into a different media-type
|
||||
type StreamProcessor interface {
|
||||
io.ReadCloser
|
||||
|
||||
// MediaType is the resulting media-type that the processor processes the stream into
|
||||
MediaType() string
|
||||
}
|
||||
|
||||
func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
|
||||
compressed, err := images.DiffCompression(ctx, mediaType)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
if compressed != "" {
|
||||
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||
ds, err := compression.DecompressStream(stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &compressedProcessor{
|
||||
rc: ds,
|
||||
}, nil
|
||||
}, true
|
||||
}
|
||||
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||
return &stdProcessor{
|
||||
rc: stream,
|
||||
}, nil
|
||||
}, true
|
||||
}
|
||||
|
||||
// NewProcessorChain initialized the root StreamProcessor
|
||||
func NewProcessorChain(mt string, r io.Reader) StreamProcessor {
|
||||
return &processorChain{
|
||||
mt: mt,
|
||||
rc: r,
|
||||
}
|
||||
}
|
||||
|
||||
type processorChain struct {
|
||||
mt string
|
||||
rc io.Reader
|
||||
}
|
||||
|
||||
func (c *processorChain) MediaType() string {
|
||||
return c.mt
|
||||
}
|
||||
|
||||
func (c *processorChain) Read(p []byte) (int, error) {
|
||||
return c.rc.Read(p)
|
||||
}
|
||||
|
||||
func (c *processorChain) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type stdProcessor struct {
|
||||
rc StreamProcessor
|
||||
}
|
||||
|
||||
func (c *stdProcessor) MediaType() string {
|
||||
return ocispec.MediaTypeImageLayer
|
||||
}
|
||||
|
||||
func (c *stdProcessor) Read(p []byte) (int, error) {
|
||||
return c.rc.Read(p)
|
||||
}
|
||||
|
||||
func (c *stdProcessor) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type compressedProcessor struct {
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
func (c *compressedProcessor) MediaType() string {
|
||||
return ocispec.MediaTypeImageLayer
|
||||
}
|
||||
|
||||
func (c *compressedProcessor) Read(p []byte) (int, error) {
|
||||
return c.rc.Read(p)
|
||||
}
|
||||
|
||||
func (c *compressedProcessor) Close() error {
|
||||
return c.rc.Close()
|
||||
}
|
||||
|
||||
// BinaryHandler creates a new stream processor handler which calls out to the given binary.
|
||||
// The id is used to identify the stream processor and allows the caller to send
|
||||
// payloads specific for that stream processor (i.e. decryption keys for decrypt stream processor).
|
||||
// The binary will be called for the provided mediaTypes and return the given media type.
|
||||
func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args, env []string) Handler {
|
||||
set := make(map[string]struct{}, len(mediaTypes))
|
||||
for _, m := range mediaTypes {
|
||||
set[m] = struct{}{}
|
||||
}
|
||||
return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) {
|
||||
if _, ok := set[mediaType]; ok {
|
||||
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
|
||||
payload := payloads[id]
|
||||
return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, env, payload)
|
||||
}, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE"
|
147
src/runtime/vendor/github.com/containerd/containerd/diff/stream_unix.go
generated
vendored
147
src/runtime/vendor/github.com/containerd/containerd/diff/stream_unix.go
generated
vendored
@ -1,147 +0,0 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"sync"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewBinaryProcessor returns a binary processor for use with processing content streams
|
||||
func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload *types.Any) (StreamProcessor, error) {
|
||||
cmd := exec.CommandContext(ctx, name, args...)
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, env...)
|
||||
|
||||
var payloadC io.Closer
|
||||
if payload != nil {
|
||||
data, err := proto.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
io.Copy(w, bytes.NewReader(data))
|
||||
w.Close()
|
||||
}()
|
||||
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
|
||||
payloadC = r
|
||||
}
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
|
||||
var (
|
||||
stdin io.Reader
|
||||
closer func() error
|
||||
err error
|
||||
)
|
||||
if f, ok := stream.(RawProcessor); ok {
|
||||
stdin = f.File()
|
||||
closer = f.File().Close
|
||||
} else {
|
||||
stdin = stream
|
||||
}
|
||||
cmd.Stdin = stdin
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd.Stdout = w
|
||||
|
||||
stderr := bytes.NewBuffer(nil)
|
||||
cmd.Stderr = stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := &binaryProcessor{
|
||||
cmd: cmd,
|
||||
r: r,
|
||||
mt: rmt,
|
||||
stderr: stderr,
|
||||
}
|
||||
go p.wait()
|
||||
|
||||
// close after start and dup
|
||||
w.Close()
|
||||
if closer != nil {
|
||||
closer()
|
||||
}
|
||||
if payloadC != nil {
|
||||
payloadC.Close()
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type binaryProcessor struct {
|
||||
cmd *exec.Cmd
|
||||
r *os.File
|
||||
mt string
|
||||
stderr *bytes.Buffer
|
||||
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) wait() {
|
||||
if err := c.cmd.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); ok {
|
||||
c.mu.Lock()
|
||||
c.err = errors.New(c.stderr.String())
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) File() *os.File {
|
||||
return c.r
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) MediaType() string {
|
||||
return c.mt
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Read(p []byte) (int, error) {
|
||||
return c.r.Read(p)
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Close() error {
|
||||
err := c.r.Close()
|
||||
if kerr := c.cmd.Process.Kill(); err == nil {
|
||||
err = kerr
|
||||
}
|
||||
return err
|
||||
}
|
166
src/runtime/vendor/github.com/containerd/containerd/diff/stream_windows.go
generated
vendored
166
src/runtime/vendor/github.com/containerd/containerd/diff/stream_windows.go
generated
vendored
@ -1,166 +0,0 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
winio "github.com/Microsoft/go-winio"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const processorPipe = "STREAM_PROCESSOR_PIPE"
|
||||
|
||||
// NewBinaryProcessor returns a binary processor for use with processing content streams
|
||||
func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload *types.Any) (StreamProcessor, error) {
|
||||
cmd := exec.CommandContext(ctx, name, args...)
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, env...)
|
||||
|
||||
if payload != nil {
|
||||
data, err := proto.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up, err := getUiqPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up)
|
||||
l, err := winio.ListenPipe(path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go func() {
|
||||
defer l.Close()
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("accept npipe connection")
|
||||
return
|
||||
}
|
||||
io.Copy(conn, bytes.NewReader(data))
|
||||
conn.Close()
|
||||
}()
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path))
|
||||
}
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
|
||||
var (
|
||||
stdin io.Reader
|
||||
closer func() error
|
||||
err error
|
||||
)
|
||||
if f, ok := stream.(RawProcessor); ok {
|
||||
stdin = f.File()
|
||||
closer = f.File().Close
|
||||
} else {
|
||||
stdin = stream
|
||||
}
|
||||
cmd.Stdin = stdin
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cmd.Stdout = w
|
||||
stderr := bytes.NewBuffer(nil)
|
||||
cmd.Stderr = stderr
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := &binaryProcessor{
|
||||
cmd: cmd,
|
||||
r: r,
|
||||
mt: rmt,
|
||||
stderr: stderr,
|
||||
}
|
||||
go p.wait()
|
||||
|
||||
// close after start and dup
|
||||
w.Close()
|
||||
if closer != nil {
|
||||
closer()
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type binaryProcessor struct {
|
||||
cmd *exec.Cmd
|
||||
r *os.File
|
||||
mt string
|
||||
stderr *bytes.Buffer
|
||||
|
||||
mu sync.Mutex
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Err() error {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.err
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) wait() {
|
||||
if err := c.cmd.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); ok {
|
||||
c.mu.Lock()
|
||||
c.err = errors.New(c.stderr.String())
|
||||
c.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) File() *os.File {
|
||||
return c.r
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) MediaType() string {
|
||||
return c.mt
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Read(p []byte) (int, error) {
|
||||
return c.r.Read(p)
|
||||
}
|
||||
|
||||
func (c *binaryProcessor) Close() error {
|
||||
err := c.r.Close()
|
||||
if kerr := c.cmd.Process.Kill(); err == nil {
|
||||
err = kerr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func getUiqPath() (string, error) {
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
os.Remove(dir)
|
||||
return filepath.Base(dir), nil
|
||||
}
|
122
src/runtime/vendor/github.com/containerd/containerd/events.go
generated
vendored
122
src/runtime/vendor/github.com/containerd/containerd/events.go
generated
vendored
@ -1,122 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events"
|
||||
"github.com/containerd/typeurl"
|
||||
)
|
||||
|
||||
// EventService handles the publish, forward and subscribe of events.
|
||||
type EventService interface {
|
||||
events.Publisher
|
||||
events.Forwarder
|
||||
events.Subscriber
|
||||
}
|
||||
|
||||
// NewEventServiceFromClient returns a new event service which communicates
|
||||
// over a GRPC connection.
|
||||
func NewEventServiceFromClient(client eventsapi.EventsClient) EventService {
|
||||
return &eventRemote{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
type eventRemote struct {
|
||||
client eventsapi.EventsClient
|
||||
}
|
||||
|
||||
func (e *eventRemote) Publish(ctx context.Context, topic string, event events.Event) error {
|
||||
any, err := typeurl.MarshalAny(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := &eventsapi.PublishRequest{
|
||||
Topic: topic,
|
||||
Event: any,
|
||||
}
|
||||
if _, err := e.client.Publish(ctx, req); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *eventRemote) Forward(ctx context.Context, envelope *events.Envelope) error {
|
||||
req := &eventsapi.ForwardRequest{
|
||||
Envelope: &eventsapi.Envelope{
|
||||
Timestamp: envelope.Timestamp,
|
||||
Namespace: envelope.Namespace,
|
||||
Topic: envelope.Topic,
|
||||
Event: envelope.Event,
|
||||
},
|
||||
}
|
||||
if _, err := e.client.Forward(ctx, req); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *eventRemote) Subscribe(ctx context.Context, filters ...string) (ch <-chan *events.Envelope, errs <-chan error) {
|
||||
var (
|
||||
evq = make(chan *events.Envelope)
|
||||
errq = make(chan error, 1)
|
||||
)
|
||||
|
||||
errs = errq
|
||||
ch = evq
|
||||
|
||||
session, err := e.client.Subscribe(ctx, &eventsapi.SubscribeRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
errq <- err
|
||||
close(errq)
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(errq)
|
||||
|
||||
for {
|
||||
ev, err := session.Recv()
|
||||
if err != nil {
|
||||
errq <- err
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case evq <- &events.Envelope{
|
||||
Timestamp: ev.Timestamp,
|
||||
Namespace: ev.Namespace,
|
||||
Topic: ev.Topic,
|
||||
Event: ev.Event,
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
if cerr := ctx.Err(); cerr != context.Canceled {
|
||||
errq <- cerr
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, errs
|
||||
}
|
251
src/runtime/vendor/github.com/containerd/containerd/events/exchange/exchange.go
generated
vendored
251
src/runtime/vendor/github.com/containerd/containerd/events/exchange/exchange.go
generated
vendored
@ -1,251 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package exchange
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/events"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/identifiers"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/typeurl"
|
||||
goevents "github.com/docker/go-events"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Exchange broadcasts events
|
||||
type Exchange struct {
|
||||
broadcaster *goevents.Broadcaster
|
||||
}
|
||||
|
||||
// NewExchange returns a new event Exchange
|
||||
func NewExchange() *Exchange {
|
||||
return &Exchange{
|
||||
broadcaster: goevents.NewBroadcaster(),
|
||||
}
|
||||
}
|
||||
|
||||
var _ events.Publisher = &Exchange{}
|
||||
var _ events.Forwarder = &Exchange{}
|
||||
var _ events.Subscriber = &Exchange{}
|
||||
|
||||
// Forward accepts an envelope to be directly distributed on the exchange.
|
||||
//
|
||||
// This is useful when an event is forwarded on behalf of another namespace or
|
||||
// when the event is propagated on behalf of another publisher.
|
||||
func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) {
|
||||
if err := validateEnvelope(envelope); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
logger := log.G(ctx).WithFields(logrus.Fields{
|
||||
"topic": envelope.Topic,
|
||||
"ns": envelope.Namespace,
|
||||
"type": envelope.Event.TypeUrl,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("error forwarding event")
|
||||
} else {
|
||||
logger.Debug("event forwarded")
|
||||
}
|
||||
}()
|
||||
|
||||
return e.broadcaster.Write(envelope)
|
||||
}
|
||||
|
||||
// Publish packages and sends an event. The caller will be considered the
|
||||
// initial publisher of the event. This means the timestamp will be calculated
|
||||
// at this point and this method may read from the calling context.
|
||||
func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) {
|
||||
var (
|
||||
namespace string
|
||||
encoded *types.Any
|
||||
envelope events.Envelope
|
||||
)
|
||||
|
||||
namespace, err = namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed publishing event")
|
||||
}
|
||||
if err := validateTopic(topic); err != nil {
|
||||
return errors.Wrapf(err, "envelope topic %q", topic)
|
||||
}
|
||||
|
||||
encoded, err = typeurl.MarshalAny(event)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
envelope.Timestamp = time.Now().UTC()
|
||||
envelope.Namespace = namespace
|
||||
envelope.Topic = topic
|
||||
envelope.Event = encoded
|
||||
|
||||
defer func() {
|
||||
logger := log.G(ctx).WithFields(logrus.Fields{
|
||||
"topic": envelope.Topic,
|
||||
"ns": envelope.Namespace,
|
||||
"type": envelope.Event.TypeUrl,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
logger.WithError(err).Error("error publishing event")
|
||||
} else {
|
||||
logger.Debug("event published")
|
||||
}
|
||||
}()
|
||||
|
||||
return e.broadcaster.Write(&envelope)
|
||||
}
|
||||
|
||||
// Subscribe to events on the exchange. Events are sent through the returned
|
||||
// channel ch. If an error is encountered, it will be sent on channel errs and
|
||||
// errs will be closed. To end the subscription, cancel the provided context.
|
||||
//
|
||||
// Zero or more filters may be provided as strings. Only events that match
|
||||
// *any* of the provided filters will be sent on the channel. The filters use
|
||||
// the standard containerd filters package syntax.
|
||||
func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) {
|
||||
var (
|
||||
evch = make(chan *events.Envelope)
|
||||
errq = make(chan error, 1)
|
||||
channel = goevents.NewChannel(0)
|
||||
queue = goevents.NewQueue(channel)
|
||||
dst goevents.Sink = queue
|
||||
)
|
||||
|
||||
closeAll := func() {
|
||||
channel.Close()
|
||||
queue.Close()
|
||||
e.broadcaster.Remove(dst)
|
||||
close(errq)
|
||||
}
|
||||
|
||||
ch = evch
|
||||
errs = errq
|
||||
|
||||
if len(fs) > 0 {
|
||||
filter, err := filters.ParseAll(fs...)
|
||||
if err != nil {
|
||||
errq <- errors.Wrapf(err, "failed parsing subscription filters")
|
||||
closeAll()
|
||||
return
|
||||
}
|
||||
|
||||
dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool {
|
||||
return filter.Match(adapt(gev))
|
||||
}))
|
||||
}
|
||||
|
||||
e.broadcaster.Add(dst)
|
||||
|
||||
go func() {
|
||||
defer closeAll()
|
||||
|
||||
var err error
|
||||
loop:
|
||||
for {
|
||||
select {
|
||||
case ev := <-channel.C:
|
||||
env, ok := ev.(*events.Envelope)
|
||||
if !ok {
|
||||
// TODO(stevvooe): For the most part, we are well protected
|
||||
// from this condition. Both Forward and Publish protect
|
||||
// from this.
|
||||
err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev)
|
||||
break
|
||||
}
|
||||
|
||||
select {
|
||||
case evch <- env:
|
||||
case <-ctx.Done():
|
||||
break loop
|
||||
}
|
||||
case <-ctx.Done():
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if cerr := ctx.Err(); cerr != context.Canceled {
|
||||
err = cerr
|
||||
}
|
||||
}
|
||||
|
||||
errq <- err
|
||||
}()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func validateTopic(topic string) error {
|
||||
if topic == "" {
|
||||
return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty")
|
||||
}
|
||||
|
||||
if topic[0] != '/' {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'")
|
||||
}
|
||||
|
||||
if len(topic) == 1 {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component")
|
||||
}
|
||||
|
||||
components := strings.Split(topic[1:], "/")
|
||||
for _, component := range components {
|
||||
if err := identifiers.Validate(component); err != nil {
|
||||
return errors.Wrapf(err, "failed validation on component %q", component)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateEnvelope(envelope *events.Envelope) error {
|
||||
if err := identifiers.Validate(envelope.Namespace); err != nil {
|
||||
return errors.Wrapf(err, "event envelope has invalid namespace")
|
||||
}
|
||||
|
||||
if err := validateTopic(envelope.Topic); err != nil {
|
||||
return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
|
||||
}
|
||||
|
||||
if envelope.Timestamp.IsZero() {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "timestamp must be set on forwarded event")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func adapt(ev interface{}) filters.Adaptor {
|
||||
if adaptor, ok := ev.(filters.Adaptor); ok {
|
||||
return adaptor
|
||||
}
|
||||
|
||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
||||
return "", false
|
||||
})
|
||||
}
|
31
src/runtime/vendor/github.com/containerd/containerd/export.go
generated
vendored
31
src/runtime/vendor/github.com/containerd/containerd/export.go
generated
vendored
@ -1,31 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/images/archive"
|
||||
)
|
||||
|
||||
// Export exports images to a Tar stream.
|
||||
// The tar archive is in OCI format with a Docker compatible manifest
|
||||
// when a single target platform is given.
|
||||
func (c *Client) Export(ctx context.Context, w io.Writer, opts ...archive.ExportOpt) error {
|
||||
return archive.Export(ctx, c.ContentStore(), w, opts...)
|
||||
}
|
33
src/runtime/vendor/github.com/containerd/containerd/filters/adaptor.go
generated
vendored
33
src/runtime/vendor/github.com/containerd/containerd/filters/adaptor.go
generated
vendored
@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
// Adaptor specifies the mapping of fieldpaths to a type. For the given field
|
||||
// path, the value and whether it is present should be returned. The mapping of
|
||||
// the fieldpath to a field is deferred to the adaptor implementation, but
|
||||
// should generally follow protobuf field path/mask semantics.
|
||||
type Adaptor interface {
|
||||
Field(fieldpath []string) (value string, present bool)
|
||||
}
|
||||
|
||||
// AdapterFunc allows implementation specific matching of fieldpaths
|
||||
type AdapterFunc func(fieldpath []string) (string, bool)
|
||||
|
||||
// Field returns the field name and true if it exists
|
||||
func (fn AdapterFunc) Field(fieldpath []string) (string, bool) {
|
||||
return fn(fieldpath)
|
||||
}
|
179
src/runtime/vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
179
src/runtime/vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
@ -1,179 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package filters defines a syntax and parser that can be used for the
|
||||
// filtration of items across the containerd API. The core is built on the
|
||||
// concept of protobuf field paths, with quoting. Several operators allow the
|
||||
// user to flexibly select items based on field presence, equality, inequality
|
||||
// and regular expressions. Flexible adaptors support working with any type.
|
||||
//
|
||||
// The syntax is fairly familiar, if you've used container ecosystem
|
||||
// projects. At the core, we base it on the concept of protobuf field
|
||||
// paths, augmenting with the ability to quote portions of the field path
|
||||
// to match arbitrary labels. These "selectors" come in the following
|
||||
// syntax:
|
||||
//
|
||||
// ```
|
||||
// <fieldpath>[<operator><value>]
|
||||
// ```
|
||||
//
|
||||
// A basic example is as follows:
|
||||
//
|
||||
// ```
|
||||
// name==foo
|
||||
// ```
|
||||
//
|
||||
// This would match all objects that have a field `name` with the value
|
||||
// `foo`. If we only want to test if the field is present, we can omit the
|
||||
// operator. This is most useful for matching labels in containerd. The
|
||||
// following will match objects that have the field "labels" and have the
|
||||
// label "foo" defined:
|
||||
//
|
||||
// ```
|
||||
// labels.foo
|
||||
// ```
|
||||
//
|
||||
// We also allow for quoting of parts of the field path to allow matching
|
||||
// of arbitrary items:
|
||||
//
|
||||
// ```
|
||||
// labels."very complex label"==something
|
||||
// ```
|
||||
//
|
||||
// We also define `!=` and `~=` as operators. The `!=` will match all
|
||||
// objects that don't match the value for a field and `~=` will compile the
|
||||
// target value as a regular expression and match the field value against that.
|
||||
//
|
||||
// Selectors can be combined using a comma, such that the resulting
|
||||
// selector will require all selectors are matched for the object to match.
|
||||
// The following example will match objects that are named `foo` and have
|
||||
// the label `bar`:
|
||||
//
|
||||
// ```
|
||||
// name==foo,labels.bar
|
||||
// ```
|
||||
//
|
||||
package filters
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
)
|
||||
|
||||
// Filter matches specific resources based the provided filter
|
||||
type Filter interface {
|
||||
Match(adaptor Adaptor) bool
|
||||
}
|
||||
|
||||
// FilterFunc is a function that handles matching with an adaptor
|
||||
type FilterFunc func(Adaptor) bool
|
||||
|
||||
// Match matches the FilterFunc returning true if the object matches the filter
|
||||
func (fn FilterFunc) Match(adaptor Adaptor) bool {
|
||||
return fn(adaptor)
|
||||
}
|
||||
|
||||
// Always is a filter that always returns true for any type of object
|
||||
var Always FilterFunc = func(adaptor Adaptor) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Any allows multiple filters to be matched against the object
|
||||
type Any []Filter
|
||||
|
||||
// Match returns true if any of the provided filters are true
|
||||
func (m Any) Match(adaptor Adaptor) bool {
|
||||
for _, m := range m {
|
||||
if m.Match(adaptor) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// All allows multiple filters to be matched against the object
|
||||
type All []Filter
|
||||
|
||||
// Match only returns true if all filters match the object
|
||||
func (m All) Match(adaptor Adaptor) bool {
|
||||
for _, m := range m {
|
||||
if !m.Match(adaptor) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type operator int
|
||||
|
||||
const (
|
||||
operatorPresent = iota
|
||||
operatorEqual
|
||||
operatorNotEqual
|
||||
operatorMatches
|
||||
)
|
||||
|
||||
func (op operator) String() string {
|
||||
switch op {
|
||||
case operatorPresent:
|
||||
return "?"
|
||||
case operatorEqual:
|
||||
return "=="
|
||||
case operatorNotEqual:
|
||||
return "!="
|
||||
case operatorMatches:
|
||||
return "~="
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
type selector struct {
|
||||
fieldpath []string
|
||||
operator operator
|
||||
value string
|
||||
re *regexp.Regexp
|
||||
}
|
||||
|
||||
func (m selector) Match(adaptor Adaptor) bool {
|
||||
value, present := adaptor.Field(m.fieldpath)
|
||||
|
||||
switch m.operator {
|
||||
case operatorPresent:
|
||||
return present
|
||||
case operatorEqual:
|
||||
return present && value == m.value
|
||||
case operatorNotEqual:
|
||||
return value != m.value
|
||||
case operatorMatches:
|
||||
if m.re == nil {
|
||||
r, err := regexp.Compile(m.value)
|
||||
if err != nil {
|
||||
log.L.Errorf("error compiling regexp %q", m.value)
|
||||
return false
|
||||
}
|
||||
|
||||
m.re = r
|
||||
}
|
||||
|
||||
return m.re.MatchString(value)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
292
src/runtime/vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
292
src/runtime/vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
@ -1,292 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
/*
|
||||
Parse the strings into a filter that may be used with an adaptor.
|
||||
|
||||
The filter is made up of zero or more selectors.
|
||||
|
||||
The format is a comma separated list of expressions, in the form of
|
||||
`<fieldpath><op><value>`, known as selectors. All selectors must match the
|
||||
target object for the filter to be true.
|
||||
|
||||
We define the operators "==" for equality, "!=" for not equal and "~=" for a
|
||||
regular expression. If the operator and value are not present, the matcher will
|
||||
test for the presence of a value, as defined by the target object.
|
||||
|
||||
The formal grammar is as follows:
|
||||
|
||||
selectors := selector ("," selector)*
|
||||
selector := fieldpath (operator value)
|
||||
fieldpath := field ('.' field)*
|
||||
field := quoted | [A-Za-z] [A-Za-z0-9_]+
|
||||
operator := "==" | "!=" | "~="
|
||||
value := quoted | [^\s,]+
|
||||
quoted := <go string syntax>
|
||||
|
||||
*/
|
||||
func Parse(s string) (Filter, error) {
|
||||
// special case empty to match all
|
||||
if s == "" {
|
||||
return Always, nil
|
||||
}
|
||||
|
||||
p := parser{input: s}
|
||||
return p.parse()
|
||||
}
|
||||
|
||||
// ParseAll parses each filter in ss and returns a filter that will return true
|
||||
// if any filter matches the expression.
|
||||
//
|
||||
// If no filters are provided, the filter will match anything.
|
||||
func ParseAll(ss ...string) (Filter, error) {
|
||||
if len(ss) == 0 {
|
||||
return Always, nil
|
||||
}
|
||||
|
||||
var fs []Filter
|
||||
for _, s := range ss {
|
||||
f, err := Parse(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
}
|
||||
|
||||
return Any(fs), nil
|
||||
}
|
||||
|
||||
type parser struct {
|
||||
input string
|
||||
scanner scanner
|
||||
}
|
||||
|
||||
func (p *parser) parse() (Filter, error) {
|
||||
p.scanner.init(p.input)
|
||||
|
||||
ss, err := p.selectors()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filters")
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (p *parser) selectors() (Filter, error) {
|
||||
s, err := p.selector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss := All{s}
|
||||
|
||||
loop:
|
||||
for {
|
||||
tok := p.scanner.peek()
|
||||
switch tok {
|
||||
case ',':
|
||||
pos, tok, _ := p.scanner.scan()
|
||||
if tok != tokenSeparator {
|
||||
return nil, p.mkerr(pos, "expected a separator")
|
||||
}
|
||||
|
||||
s, err := p.selector()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ss = append(ss, s)
|
||||
case tokenEOF:
|
||||
break loop
|
||||
default:
|
||||
return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok))
|
||||
}
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
}
|
||||
|
||||
func (p *parser) selector() (selector, error) {
|
||||
fieldpath, err := p.fieldpath()
|
||||
if err != nil {
|
||||
return selector{}, err
|
||||
}
|
||||
|
||||
switch p.scanner.peek() {
|
||||
case ',', tokenSeparator, tokenEOF:
|
||||
return selector{
|
||||
fieldpath: fieldpath,
|
||||
operator: operatorPresent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
op, err := p.operator()
|
||||
if err != nil {
|
||||
return selector{}, err
|
||||
}
|
||||
|
||||
var allowAltQuotes bool
|
||||
if op == operatorMatches {
|
||||
allowAltQuotes = true
|
||||
}
|
||||
|
||||
value, err := p.value(allowAltQuotes)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return selector{}, io.ErrUnexpectedEOF
|
||||
}
|
||||
return selector{}, err
|
||||
}
|
||||
|
||||
return selector{
|
||||
fieldpath: fieldpath,
|
||||
value: value,
|
||||
operator: op,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *parser) fieldpath() ([]string, error) {
|
||||
f, err := p.field()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := []string{f}
|
||||
loop:
|
||||
for {
|
||||
tok := p.scanner.peek() // lookahead to consume field separator
|
||||
|
||||
switch tok {
|
||||
case '.':
|
||||
pos, tok, _ := p.scanner.scan() // consume separator
|
||||
if tok != tokenSeparator {
|
||||
return nil, p.mkerr(pos, "expected a field separator (`.`)")
|
||||
}
|
||||
|
||||
f, err := p.field()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
default:
|
||||
// let the layer above handle the other bad cases.
|
||||
break loop
|
||||
}
|
||||
}
|
||||
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (p *parser) field() (string, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
switch tok {
|
||||
case tokenField:
|
||||
return s, nil
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s, false)
|
||||
case tokenIllegal:
|
||||
return "", p.mkerr(pos, p.scanner.err)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected field or quoted")
|
||||
}
|
||||
|
||||
func (p *parser) operator() (operator, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
switch tok {
|
||||
case tokenOperator:
|
||||
switch s {
|
||||
case "==":
|
||||
return operatorEqual, nil
|
||||
case "!=":
|
||||
return operatorNotEqual, nil
|
||||
case "~=":
|
||||
return operatorMatches, nil
|
||||
default:
|
||||
return 0, p.mkerr(pos, "unsupported operator %q", s)
|
||||
}
|
||||
case tokenIllegal:
|
||||
return 0, p.mkerr(pos, p.scanner.err)
|
||||
}
|
||||
|
||||
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
|
||||
}
|
||||
|
||||
func (p *parser) value(allowAltQuotes bool) (string, error) {
|
||||
pos, tok, s := p.scanner.scan()
|
||||
|
||||
switch tok {
|
||||
case tokenValue, tokenField:
|
||||
return s, nil
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s, allowAltQuotes)
|
||||
case tokenIllegal:
|
||||
return "", p.mkerr(pos, p.scanner.err)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected value or quoted")
|
||||
}
|
||||
|
||||
func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
|
||||
if !allowAlts && s[0] != '\'' && s[0] != '"' {
|
||||
return "", p.mkerr(pos, "invalid quote encountered")
|
||||
}
|
||||
|
||||
uq, err := unquote(s)
|
||||
if err != nil {
|
||||
return "", p.mkerr(pos, "unquoting failed: %v", err)
|
||||
}
|
||||
|
||||
return uq, nil
|
||||
}
|
||||
|
||||
type parseError struct {
|
||||
input string
|
||||
pos int
|
||||
msg string
|
||||
}
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
if pe.pos < len(pe.input) {
|
||||
before := pe.input[:pe.pos]
|
||||
location := pe.input[pe.pos : pe.pos+1] // need to handle end
|
||||
after := pe.input[pe.pos+1:]
|
||||
|
||||
return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("[%s]: %v", pe.input, pe.msg)
|
||||
}
|
||||
|
||||
func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
|
||||
return errors.Wrap(parseError{
|
||||
input: p.input,
|
||||
pos: pos,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}, "parse error")
|
||||
}
|
253
src/runtime/vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
253
src/runtime/vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
@ -1,253 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package filters
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
|
||||
// strconv package and modified to be able to handle quoting with `/` and `|`
|
||||
// as delimiters. The copyright is held by the Go authors.
|
||||
|
||||
var errQuoteSyntax = errors.New("quote syntax error")
|
||||
|
||||
// UnquoteChar decodes the first character or byte in the escaped string
|
||||
// or character literal represented by the string s.
|
||||
// It returns four values:
|
||||
//
|
||||
// 1) value, the decoded Unicode code point or byte value;
|
||||
// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
|
||||
// 3) tail, the remainder of the string after the character; and
|
||||
// 4) an error that will be nil if the character is syntactically valid.
|
||||
//
|
||||
// The second argument, quote, specifies the type of literal being parsed
|
||||
// and therefore which escaped quote character is permitted.
|
||||
// If set to a single quote, it permits the sequence \' and disallows unescaped '.
|
||||
// If set to a double quote, it permits \" and disallows unescaped ".
|
||||
// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
|
||||
//
|
||||
// This is from Go strconv package, modified to support `|` and `/` as double
|
||||
// quotes for use with regular expressions.
|
||||
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
||||
// easy cases
|
||||
switch c := s[0]; {
|
||||
case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
case c >= utf8.RuneSelf:
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
return r, true, s[size:], nil
|
||||
case c != '\\':
|
||||
return rune(s[0]), false, s[1:], nil
|
||||
}
|
||||
|
||||
// hard case: c is backslash
|
||||
if len(s) <= 1 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
c := s[1]
|
||||
s = s[2:]
|
||||
|
||||
switch c {
|
||||
case 'a':
|
||||
value = '\a'
|
||||
case 'b':
|
||||
value = '\b'
|
||||
case 'f':
|
||||
value = '\f'
|
||||
case 'n':
|
||||
value = '\n'
|
||||
case 'r':
|
||||
value = '\r'
|
||||
case 't':
|
||||
value = '\t'
|
||||
case 'v':
|
||||
value = '\v'
|
||||
case 'x', 'u', 'U':
|
||||
n := 0
|
||||
switch c {
|
||||
case 'x':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
var v rune
|
||||
if len(s) < n {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < n; j++ {
|
||||
x, ok := unhex(s[j])
|
||||
if !ok {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
v = v<<4 | x
|
||||
}
|
||||
s = s[n:]
|
||||
if c == 'x' {
|
||||
// single-byte string, possibly not UTF-8
|
||||
value = v
|
||||
break
|
||||
}
|
||||
if v > utf8.MaxRune {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
multibyte = true
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
v := rune(c) - '0'
|
||||
if len(s) < 2 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
for j := 0; j < 2; j++ { // one digit already; two more
|
||||
x := rune(s[j]) - '0'
|
||||
if x < 0 || x > 7 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
v = (v << 3) | x
|
||||
}
|
||||
s = s[2:]
|
||||
if v > 255 {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = v
|
||||
case '\\':
|
||||
value = '\\'
|
||||
case '\'', '"', '|', '/':
|
||||
if c != quote {
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
value = rune(c)
|
||||
default:
|
||||
err = errQuoteSyntax
|
||||
return
|
||||
}
|
||||
tail = s
|
||||
return
|
||||
}
|
||||
|
||||
// unquote interprets s as a single-quoted, double-quoted,
|
||||
// or backquoted Go string literal, returning the string value
|
||||
// that s quotes. (If s is single-quoted, it would be a Go
|
||||
// character literal; Unquote returns the corresponding
|
||||
// one-character string.)
|
||||
//
|
||||
// This is modified from the standard library to support `|` and `/` as quote
|
||||
// characters for use with regular expressions.
|
||||
func unquote(s string) (string, error) {
|
||||
n := len(s)
|
||||
if n < 2 {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
quote := s[0]
|
||||
if quote != s[n-1] {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
s = s[1 : n-1]
|
||||
|
||||
if quote == '`' {
|
||||
if contains(s, '`') {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
if contains(s, '\r') {
|
||||
// -1 because we know there is at least one \r to remove.
|
||||
buf := make([]byte, 0, len(s)-1)
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] != '\r' {
|
||||
buf = append(buf, s[i])
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
if contains(s, '\n') {
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
|
||||
// Is it trivial? Avoid allocation.
|
||||
if !contains(s, '\\') && !contains(s, quote) {
|
||||
switch quote {
|
||||
case '"', '/', '|': // pipe and slash are treated like double quote
|
||||
return s, nil
|
||||
case '\'':
|
||||
r, size := utf8.DecodeRuneInString(s)
|
||||
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var runeTmp [utf8.UTFMax]byte
|
||||
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
||||
for len(s) > 0 {
|
||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s = ss
|
||||
if c < utf8.RuneSelf || !multibyte {
|
||||
buf = append(buf, byte(c))
|
||||
} else {
|
||||
n := utf8.EncodeRune(runeTmp[:], c)
|
||||
buf = append(buf, runeTmp[:n]...)
|
||||
}
|
||||
if quote == '\'' && len(s) != 0 {
|
||||
// single-quoted must be single character
|
||||
return "", errQuoteSyntax
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// contains reports whether the string contains the byte c.
|
||||
func contains(s string, c byte) bool {
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func unhex(b byte) (v rune, ok bool) {
|
||||
c := rune(b)
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0', true
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10, true
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10, true
|
||||
}
|
||||
return
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user