Compare commits

...

2 Commits

Author SHA1 Message Date
Ettore Di Giacinto
0f7ba8ce05 Update vendor 2021-10-29 16:12:26 +02:00
Ettore Di Giacinto
eac621aaf7 Validate container image hashes with mtree 2021-10-29 16:12:26 +02:00
70 changed files with 4807 additions and 3422 deletions

1
go.mod
View File

@@ -59,6 +59,7 @@ require (
github.com/spf13/cobra v1.2.1
github.com/spf13/viper v1.8.1
github.com/theupdateframework/notary v0.7.0
github.com/vbatts/go-mtree v0.5.0
go.etcd.io/bbolt v1.3.5
go.uber.org/multierr v1.6.0
go.uber.org/zap v1.17.0

8
go.sum
View File

@@ -390,6 +390,7 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
@@ -745,8 +746,11 @@ github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0 h1:c1oKPqtIulBHw
github.com/marcsauter/single v0.0.0-20181104081128-f8bf46f26ec0/go.mod h1:uUA07IN7rYmbr5YlZM5nDVLyoxiqqpprFlXBrjqI24A=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-oci8 v0.0.7/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
@@ -1001,6 +1005,7 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@@ -1079,6 +1084,8 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/go-mtree v0.5.0 h1:dM+5XZdqH0j9CSZeerhoN/tAySdwnmevaZHO1XGW2Vc=
github.com/vbatts/go-mtree v0.5.0/go.mod h1:7JbaNHyBMng+RP8C3Q4E+4Ca8JnGQA2R/MB+jb4tSOk=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
@@ -1311,6 +1318,7 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@@ -104,13 +104,13 @@ func NewPackageArtifactFromYaml(data []byte) (*PackageArtifact, error) {
return p, err
}
func (a *PackageArtifact) Hash() error {
return a.Checksums.Generate(a)
func (a *PackageArtifact) Hash(t ...HashImplementation) error {
return a.Checksums.Generate(a, t...)
}
func (a *PackageArtifact) Verify() error {
func (a *PackageArtifact) Verify(t ...HashImplementation) error {
sum := Checksums{}
if err := sum.Generate(a); err != nil {
if err := sum.Generate(a, t...); err != nil {
return err
}
@@ -123,7 +123,7 @@ func (a *PackageArtifact) Verify() error {
func (a *PackageArtifact) WriteYAML(dst string) error {
// First compute checksum of artifact. When we write the yaml we want to write up-to-date informations.
err := a.Hash()
err := a.Hash(TarHashing...)
if err != nil {
return errors.Wrap(err, "Failed generating checksums for artifact")
}

View File

@@ -19,23 +19,46 @@ import (
//"strconv"
"bytes"
"crypto/sha256"
"encoding/base64"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
"sort"
mtree "github.com/vbatts/go-mtree"
// . "github.com/mudler/luet/pkg/logger"
containerdCompression "github.com/containerd/containerd/archive/compression"
"github.com/pkg/errors"
)
type HashImplementation string
const (
// SHA256 Implementation
SHA256 HashImplementation = "sha256"
// MTREE Implementation
MTREE HashImplementation = "mtree"
)
// FileHashing is the hashing set reserved to files
var FileHashing = []HashImplementation{SHA256}
// TarHashing is the hashing set reserved to archives
var TarHashing = []HashImplementation{SHA256, MTREE}
// default set
var defaultHashing = []HashImplementation{SHA256, MTREE}
var mtreeKeywords []mtree.Keyword = []mtree.Keyword{
"type",
"sha512digest",
}
type Checksums map[string]string
type HashOptions struct {
@@ -55,37 +78,137 @@ func (c Checksums) List() (res [][]string) {
return
}
func (c Checksums) Only(t ...HashImplementation) Checksums {
newc := Checksums{}
for k, v := range c {
if Hashes(t).Exist(HashImplementation(k)) {
newc[k] = v
}
}
return newc
}
type Hashes []HashImplementation
func (h Hashes) Exist(t HashImplementation) bool {
for _, tt := range h {
if tt == t {
return true
}
}
return false
}
// Generate generates all Checksums supported for the artifact
func (c *Checksums) Generate(a *PackageArtifact) error {
return c.generateSHA256(a)
func (c *Checksums) Generate(a *PackageArtifact, t ...HashImplementation) (err error) {
f, err := os.Open(a.Path)
if err != nil {
return err
}
if len(t) == 0 {
t = defaultHashing
}
for _, h := range t {
sum, err := h.Sum(f)
if err != nil {
return err
}
(*c)[string(h)] = sum
}
return
}
func (c Checksums) Compare(d Checksums) error {
for t, sum := range d {
if v, ok := c[t]; ok && v != sum {
return errors.New("Checksum mismsatch")
if t == string(MTREE) {
sum2, exists := c[t]
if !exists {
continue
}
b1, err := base64.RawStdEncoding.DecodeString(sum)
if err != nil {
return err
}
b2, err := base64.RawStdEncoding.DecodeString(sum2)
if err != nil {
return err
}
spec, err := mtree.ParseSpec(bytes.NewReader(b1))
if err != nil {
return err
}
spec2, err := mtree.ParseSpec(bytes.NewReader(b2))
if err != nil {
return err
}
res, err := mtree.Compare(spec, spec2, mtreeKeywords)
if err != nil {
return err
}
if len(res) != 0 {
return errors.New("MTREE mismatch")
}
} else {
if v, ok := c[t]; ok && v != sum {
return errors.New("Checksum mismsatch")
}
}
}
return nil
}
func (c *Checksums) generateSHA256(a *PackageArtifact) error {
return c.generateSum(a, HashOptions{Hasher: sha256.New(), Type: SHA256})
func (t HashImplementation) Sum(r io.ReadCloser) (sum string, err error) {
// defer r.Close()
switch t {
case SHA256:
hasher := sha256.New()
_, err = io.Copy(hasher, r)
if err != nil {
return
}
sum = fmt.Sprintf("%x", hasher.Sum(nil))
case MTREE:
sum, err = mtreeSum(r)
sum = base64.RawStdEncoding.EncodeToString([]byte(sum))
return
}
return
}
func (c *Checksums) generateSum(a *PackageArtifact, opts HashOptions) error {
f, err := os.Open(a.Path)
func mtreeSum(r io.ReadCloser) (string, error) {
decompressed, err := containerdCompression.DecompressStream(r)
if err != nil {
return err
}
defer f.Close()
if _, err := io.Copy(opts.Hasher, f); err != nil {
return err
return "", errors.Wrap(err, "Cannot open stream")
}
sum := fmt.Sprintf("%x", opts.Hasher.Sum(nil))
ts := mtree.NewTarStreamer(decompressed, []mtree.ExcludeFunc{}, mtreeKeywords)
if _, err := io.Copy(ioutil.Discard, ts); err != nil && err != io.EOF {
return "", err
}
if err := ts.Close(); err != nil {
return "", err
}
(*c)[string(opts.Type)] = sum
return nil
stateDh, err := ts.Hierarchy()
if err != nil {
return "", err
}
buf := bytes.NewBufferString("")
_, err = stateDh.WriteTo(buf)
if err != nil {
return "", err
}
return buf.String(), nil
}

View File

@@ -18,6 +18,7 @@ package artifact_test
import (
"io/ioutil"
"os"
"path/filepath"
. "github.com/mudler/luet/pkg/api/core/types/artifact"
@@ -40,22 +41,57 @@ var _ = Describe("Checksum", func() {
Expect(len(definitionsum)).To(Equal(0))
Expect(len(definitionsum2)).To(Equal(0))
err = buildsum.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/build.yaml"))
err = buildsum.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/build.yaml"), SHA256)
Expect(err).ToNot(HaveOccurred())
err = definitionsum.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/definition.yaml"))
err = definitionsum.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/definition.yaml"), SHA256)
Expect(err).ToNot(HaveOccurred())
err = definitionsum2.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/definition.yaml"))
err = definitionsum2.Generate(NewPackageArtifact("../../../../../tests/fixtures/layers/alpine/definition.yaml"), SHA256)
Expect(err).ToNot(HaveOccurred())
Expect(len(buildsum)).To(Equal(1))
Expect(len(definitionsum)).To(Equal(1))
Expect(len(definitionsum2)).To(Equal(1))
// Expect(buildsum.List()).To(Equal(""))
Expect(definitionsum.Compare(buildsum)).To(HaveOccurred())
Expect(definitionsum.Compare(definitionsum2)).ToNot(HaveOccurred())
})
It("Compares successfully", func() {
tmpdir, err := ioutil.TempDir("", "tree")
Expect(err).ToNot(HaveOccurred())
defer os.RemoveAll(tmpdir) // clean up
buildsum := Checksums{}
definitionsum := Checksums{}
definitionsum2 := Checksums{}
Expect(len(buildsum)).To(Equal(0))
Expect(len(definitionsum)).To(Equal(0))
Expect(len(definitionsum2)).To(Equal(0))
art := NewPackageArtifact(filepath.Join(tmpdir, "file.tar"))
art.Compress("../../../../../tests/fixtures/layers/alpine/", 1)
art2 := NewPackageArtifact(filepath.Join(tmpdir, "file2.tar"))
art2.Compress("../../../../../tests/fixtures/layers/", 1)
err = buildsum.Generate(art, MTREE)
Expect(err).ToNot(HaveOccurred())
err = definitionsum.Generate(art2, MTREE)
Expect(err).ToNot(HaveOccurred())
Expect(len(buildsum)).To(Equal(1))
Expect(len(definitionsum)).To(Equal(1))
// Expect(buildsum.List()).To(Equal(""))
Expect(definitionsum.Compare(definitionsum)).ToNot(HaveOccurred())
Expect(definitionsum.Compare(buildsum)).To(HaveOccurred())
})
})
})

View File

@@ -77,15 +77,15 @@ func (c *DockerClient) DownloadArtifact(a *artifact.PackageArtifact) (*artifact.
// is done in such cases (see repository.go)
// We discard checksum, that are checked while during pull and unpack by containerd
resultingArtifact.Checksums = artifact.Checksums{}
resultingArtifact.Checksums = resultingArtifact.Checksums.Only(artifact.MTREE)
// Check if file is already in cache
fileName, err := c.Cache.Get(resultingArtifact)
// Check if file is already in cache
if err == nil {
resultingArtifact = a
resultingArtifact = a.ShallowCopy()
resultingArtifact.Path = fileName
resultingArtifact.Checksums = artifact.Checksums{}
resultingArtifact.Checksums = resultingArtifact.Checksums.Only(artifact.MTREE)
c.context.Debug("Use artifact", artifactName, "from cache.")
} else {

View File

@@ -652,7 +652,7 @@ func (r *LuetSystemRepository) AddRepositoryFile(src, fileKey, repositoryRoot st
return a, errors.Wrap(err, "Error met while creating package archive")
}
err = a.Hash()
err = a.Hash(artifact.FileHashing...)
if err != nil {
return a, errors.Wrap(err, "Failed generating checksums for tree")
}
@@ -784,7 +784,7 @@ func (r *LuetSystemRepository) getRepoFile(c Client, key string) (*artifact.Pack
treeFileArtifact.Checksums = treeFile.GetChecksums()
treeFileArtifact.CompressionType = treeFile.GetCompressionType()
err = treeFileArtifact.Verify()
err = treeFileArtifact.Verify(artifact.FileHashing...)
if err != nil {
return nil, errors.Wrap(err, "file integrity check failure")
}

View File

@@ -1 +0,0 @@
This code provides helper functions for dealing with archive files.

File diff suppressed because it is too large Load Diff

View File

@@ -1,100 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/system"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
if format == OverlayWhiteoutFormat {
if inUserNS {
return nil, errors.New("specifying OverlayWhiteoutFormat is not allowed in userns")
}
return overlayWhiteoutConverter{}, nil
}
return nil, nil
}
type overlayWhiteoutConverter struct {
}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
hdr.Mode = 0600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
if hdr.Xattrs != nil {
delete(hdr.Xattrs, "trusted.overlay.opaque")
}
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
wo = &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
}
}
return
}
func (c overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
if err != nil {
return false, errors.Wrapf(err, "setxattr(%q, trusted.overlay.opaque=y)", dir)
}
// don't write the file itself
return false, err
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
return false, errors.Wrapf(err, "failed to mknod(%q, S_IFCHR, 0)", originalPath)
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View File

@@ -1,7 +0,0 @@
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
func getWhiteoutConverter(format WhiteoutFormat, inUserNS bool) (tarWhiteoutConverter, error) {
return nil, nil
}

View File

@@ -1,115 +0,0 @@
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"errors"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/containerd/containerd/sys"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
return strings.TrimSuffix(srcPath, string(filepath.Separator)) + string(filepath.Separator) + include
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) string {
return p // already unix-style
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
// Currently go does not fill in the major/minors
if s.Mode&unix.S_IFBLK != 0 ||
s.Mode&unix.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
}
}
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if ok {
inode = s.Ino
}
return
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return idtools.Identity{}, errors.New("cannot convert stat value to syscall.Stat_t")
}
return idtools.Identity{UID: int(s.Uid), GID: int(s.Gid)}, nil
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= unix.S_IFBLK
case tar.TypeChar:
mode |= unix.S_IFCHR
case tar.TypeFifo:
mode |= unix.S_IFIFO
}
err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
if errors.Is(err, syscall.EPERM) && sys.RunningInUserNS() {
// In most cases, cannot create a device if running in user namespace
err = nil
}
return err
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
return nil
}

View File

@@ -1,67 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"os"
"path/filepath"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/longpath"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return longpath.AddPrefix(srcPath)
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) string {
return filepath.ToSlash(p)
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
// perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
permPart := perm & os.ModePerm
noPermPart := perm &^ os.ModePerm
// Add the x bit: make everything +x from windows
permPart |= 0111
permPart &= 0755
return noPermPart | permPart
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
// do nothing. no notion of Rdev, Nlink in stat on Windows
return
}
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
// do nothing. no notion of Inode in stat on Windows
return
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (idtools.Identity, error) {
// no notion of file ownership mapping yet on Windows
return idtools.Identity{UID: 0, GID: 0}, nil
}

View File

@@ -1,445 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// ChangeType represents the change type.
type ChangeType int
const (
// ChangeModify represents the modify operation.
ChangeModify = iota
// ChangeAdd represents the add operation.
ChangeAdd
// ChangeDelete represents the delete operation.
ChangeDelete
)
func (c ChangeType) String() string {
switch c {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
}
return ""
}
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
func (c changesByPath) Len() int { return len(c) }
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// Gnu tar doesn't have sub-second mtime precision. The go tar
// writer (1.10+) does when using PAX format, but we round times to seconds
// to ensure archives have the same hashes for backwards compatibility.
// See https://github.com/moby/moby/pull/35739/commits/fb170206ba12752214630b269a40ac7be6115ed4.
//
// Non-sub-second is problematic when we apply changes via tar
// files. We handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a.Equal(b) ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
func sameFsTimeSpec(a, b syscall.Timespec) bool {
return a.Sec == b.Sec &&
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
}
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
)
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
if sc != nil {
if skip, err := sc(path); skip {
return err
}
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
if deletedFile != "" {
change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
}
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
changedDirs[parent] = struct{}{}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
stat *system.StatT
children map[string]*FileInfo
capability []byte
added bool
}
// LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
parent := info
if path == string(os.PathSeparator) {
return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
// As this runs on the daemon side, file paths are OS specific.
return string(os.PathSeparator)
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
info.added = true
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild := oldChildren[name]
if oldChild != nil {
// change?
oldStat := oldChild.stat
newStat := newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if statDifferent(oldStat, newStat) ||
!bytes.Equal(oldChild.capability, newChild.capability) {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
newChild.added = true
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
// If there were changes inside this directory, we need to add it, even if the directory
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
// As this runs on the daemon side, file paths are OS specific.
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
change := Change{
Path: info.path(),
Kind: ChangeModify,
}
// Let's insert the directory entry before the recently added entries located inside this dir
*changes = append(*changes, change) // just to resize the slice, will be overwritten
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
}
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
root := &FileInfo{
name: string(os.PathSeparator),
children: make(map[string]*FileInfo),
}
return root
}
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
var (
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
emptyDir, err := ioutil.TempDir("", "empty")
if err != nil {
return nil, err
}
defer os.Remove(emptyDir)
oldDir = emptyDir
}
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
if err != nil {
return nil, err
}
return newRoot.Changes(oldRoot), nil
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var (
size int64
sf = make(map[uint64]struct{})
)
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, err := os.Lstat(file)
if err != nil {
logrus.Errorf("Can not stat %q: %s", file, err)
continue
}
if fileInfo != nil && !fileInfo.IsDir() {
if hasHardlinks(fileInfo) {
inode := getIno(fileInfo)
if _, ok := sf[inode]; !ok {
size += fileInfo.Size()
sf[inode] = struct{}{}
}
} else {
size += fileInfo.Size()
}
}
}
}
return size
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
sort.Sort(changesByPath(changes))
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
logrus.Debugf("Can't write whiteout header: %s", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
logrus.Debugf("failed close Changes writer: %s", err)
}
}()
return reader, nil
}

View File

@@ -1,286 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"bytes"
"fmt"
"os"
"path/filepath"
"sort"
"syscall"
"unsafe"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
// method in general returns the entire contents of two directory trees, we
// optimize some FS calls out on linux. In particular, we take advantage of the
// fact that getdents(2) returns the inode of each file in the directory being
// walked, which, when walking two trees in parallel to generate a list of
// changes, can be used to prune subtrees without ever having to lstat(2) them
// directly. Eliminating stat calls in this way can save up to seconds on large
// images.
type walker struct {
dir1 string
dir2 string
root1 *FileInfo
root2 *FileInfo
}
// collectFileInfoForChanges returns a complete representation of the trees
// rooted at dir1 and dir2, with one important exception: any subtree or
// leaf where the inode and device numbers are an exact match between dir1
// and dir2 will be pruned from the results. This method is *only* to be used
// to generating a list of changes between the two directories, as it does not
// reflect the full contents.
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
w := &walker{
dir1: dir1,
dir2: dir2,
root1: newRootFileInfo(),
root2: newRootFileInfo(),
}
i1, err := os.Lstat(w.dir1)
if err != nil {
return nil, nil, err
}
i2, err := os.Lstat(w.dir2)
if err != nil {
return nil, nil, err
}
if err := w.walk("/", i1, i2); err != nil {
return nil, nil, err
}
return w.root1, w.root2, nil
}
// Given a FileInfo, its path info, and a reference to the root of the tree
// being constructed, register this file with the tree.
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
if fi == nil {
return nil
}
parent := root.LookUp(filepath.Dir(path))
if parent == nil {
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
}
info := &FileInfo{
name: filepath.Base(path),
children: make(map[string]*FileInfo),
parent: parent,
}
cpath := filepath.Join(dir, path)
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
if err != nil {
return err
}
info.stat = stat
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
parent.children[info.name] = info
return nil
}
// Walk a subtree rooted at the same path in both trees being iterated. For
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
// Register these nodes with the return trees, unless we're still at the
// (already-created) roots:
if path != "/" {
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
return err
}
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
return err
}
}
is1Dir := i1 != nil && i1.IsDir()
is2Dir := i2 != nil && i2.IsDir()
sameDevice := false
if i1 != nil && i2 != nil {
si1 := i1.Sys().(*syscall.Stat_t)
si2 := i2.Sys().(*syscall.Stat_t)
if si1.Dev == si2.Dev {
sameDevice = true
}
}
// If these files are both non-existent, or leaves (non-dirs), we are done.
if !is1Dir && !is2Dir {
return nil
}
// Fetch the names of all the files contained in both directories being walked:
var names1, names2 []nameIno
if is1Dir {
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
if err != nil {
return err
}
}
if is2Dir {
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
if err != nil {
return err
}
}
// We have lists of the files contained in both parallel directories, sorted
// in the same order. Walk them in parallel, generating a unique merged list
// of all items present in either or both directories.
var names []string
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
ni1 := names1[ix1]
ni2 := names2[ix2]
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
case -1: // ni1 < ni2 -- advance ni1
// we will not encounter ni1 in names2
names = append(names, ni1.name)
ix1++
case 0: // ni1 == ni2
if ni1.ino != ni2.ino || !sameDevice {
names = append(names, ni1.name)
}
ix1++
ix2++
case 1: // ni1 > ni2 -- advance ni2
// we will not encounter ni2 in names1
names = append(names, ni2.name)
ix2++
}
}
for ix1 < len(names1) {
names = append(names, names1[ix1].name)
ix1++
}
for ix2 < len(names2) {
names = append(names, names2[ix2].name)
ix2++
}
// For each of the names present in either or both of the directories being
// iterated, stat the name under each root, and recurse the pair of them:
for _, name := range names {
fname := filepath.Join(path, name)
var cInfo1, cInfo2 os.FileInfo
if is1Dir {
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if is2Dir {
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
return err
}
}
return nil
}
// {name,inode} pairs used to support the early-pruning logic of the walker type
type nameIno struct {
name string
ino uint64
}
type nameInoSlice []nameIno
func (s nameInoSlice) Len() int { return len(s) }
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
// numbers further up the stack when reading directory contents. Unlike
// os.Readdirnames, which returns a list of filenames, this function returns a
// list of {filename,inode} pairs.
func readdirnames(dirname string) (names []nameIno, err error) {
var (
size = 100
buf = make([]byte, 4096)
nbuf int
bufp int
nb int
)
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
names = make([]nameIno, 0, size) // Empty with room to grow.
for {
// Refill the buffer if necessary
if bufp >= nbuf {
bufp = 0
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
if nbuf < 0 {
nbuf = 0
}
if err != nil {
return nil, os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
break // EOF
}
}
// Drain the buffer
nb, names = parseDirent(buf[bufp:nbuf], names)
bufp += nb
}
sl := nameInoSlice(names)
sort.Sort(sl)
return sl, nil
}
// parseDirent is a minor modification of unix.ParseDirent (linux version)
// which returns {name,inode} pairs instead of just names.
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
origlen := len(buf)
for len(buf) > 0 {
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
buf = buf[dirent.Reclen:]
if dirent.Ino == 0 { // File absent in directory.
continue
}
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
var name = string(bytes[0:clen(bytes[:])])
if name == "." || name == ".." { // Useless names
continue
}
names = append(names, nameIno{name, dirent.Ino})
}
return origlen - len(buf), names
}
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}

View File

@@ -1,97 +0,0 @@
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/system"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
oldRoot, err1 = collectFileInfo(oldDir)
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, nil, err
}
}
return oldRoot, newRoot, nil
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
relPath = filepath.Join(string(os.PathSeparator), relPath)
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
// Temporary workaround. If the returned path starts with two backslashes,
// trim it down to a single backslash. Only relevant on Windows.
if runtime.GOOS == "windows" {
if strings.HasPrefix(relPath, `\\`) {
relPath = relPath[1:]
}
}
if relPath == string(os.PathSeparator) {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
}
s, err := system.Lstat(path)
if err != nil {
return err
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}

View File

@@ -1,43 +0,0 @@
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
import (
"os"
"syscall"
"github.com/docker/docker/pkg/system"
"golang.org/x/sys/unix"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
oldStat.UID() != newStat.UID() ||
oldStat.GID() != newStat.GID() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size or modification time for dirs, its not a good
// measure of change. See https://github.com/moby/moby/issues/9874
// for a description of the issue with modification time, and
// https://github.com/moby/moby/pull/11422 for the change.
// (Note that in the Windows implementation of this function,
// modification time IS taken as a change). See
// https://github.com/moby/moby/pull/37982 for more information.
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
}
func getIno(fi os.FileInfo) uint64 {
return fi.Sys().(*syscall.Stat_t).Ino
}
func hasHardlinks(fi os.FileInfo) bool {
return fi.Sys().(*syscall.Stat_t).Nlink > 1
}

View File

@@ -1,34 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"os"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Note there is slight difference between the Linux and Windows
// implementations here. Due to https://github.com/moby/moby/issues/9874,
// and the fix at https://github.com/moby/moby/pull/11422, Linux does not
// consider a change to the directory time as a change. Windows on NTFS
// does. See https://github.com/moby/moby/pull/37982 for more information.
if !sameFsTime(oldStat.Mtim(), newStat.Mtim()) ||
oldStat.Mode() != newStat.Mode() ||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode().IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
}
func hasHardlinks(fi os.FileInfo) bool {
return false
}

View File

@@ -1,480 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// Errors used or returned by this file.
var (
ErrNotDirectory = errors.New("not a directory")
ErrDirNotExists = errors.New("no such directory")
ErrCannotCopyDir = errors.New("cannot copy directory")
ErrInvalidCopySource = errors.New("invalid copy source content")
)
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
// processing using any utility functions from the path or filepath stdlib
// packages) and appends a trailing `/.` or `/` if its corresponding original
// path (from before being processed by utility functions from the path or
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in the separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
// Ensure paths are in platform semantics
cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
originalPath = strings.Replace(originalPath, "/", string(sep), -1)
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
if !hasTrailingPathSeparator(cleanedPath, sep) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(sep)
}
cleanedPath += "."
}
if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
cleanedPath += string(sep)
}
return cleanedPath
}
// assertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func assertsDirectory(path string, sep byte) bool {
return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
}
// hasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func hasTrailingPathSeparator(path string, sep byte) bool {
return len(path) > 0 && path[len(path)-1] == sep
}
// specifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func specifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename by first cleaning the path but preserves a trailing "." if the
// original path specified the current directory.
func SplitPathDirEntry(path string) (dir, base string) {
cleanedPath := filepath.Clean(filepath.FromSlash(path))
if specifiesCurrentDir(path) {
cleanedPath += string(os.PathSeparator) + "."
}
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
sourcePath = normalizePath(sourcePath)
if _, err = os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return
}
// Separate the source path between its directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, opts)
}
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
// parameters to be sent to TarWithOptions (the TarOptions struct)
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
filter := []string{sourceBase}
return &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
}
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
RebaseName string
}
// CopyInfoSourcePath stats the given path to create a CopyInfo
// struct representing that resource for the source of an archive copy
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
}
return CopyInfo{
Path: resolvedPath,
Exists: true,
IsDir: stat.IsDir(),
RebaseName: rebaseName,
}, nil
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
path = normalizePath(path)
originalPath := path
stat, err := os.Lstat(path)
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
// The path exists and is not a symlink.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// While the path is a symlink.
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
if n > maxSymlinkIter {
// Don't follow symlinks more than this arbitrary number of times.
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
}
// The path is a symbolic link. We need to evaluate it so that the
// destination of the copy operation is the link target and not the
// link itself. This is notably different than CopyInfoSourcePath which
// only evaluates symlinks before the last appearing path separator.
// Also note that it is okay if the last path element is a broken
// symlink as the copy operation should create the target.
var linkTarget string
linkTarget, err = os.Readlink(path)
if err != nil {
return CopyInfo{}, err
}
if !system.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
}
path = linkTarget
stat, err = os.Lstat(path)
}
if err != nil {
// It's okay if the destination path doesn't exist. We can still
// continue the copy operation if the parent directory exists.
if !os.IsNotExist(err) {
return CopyInfo{}, err
}
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(path)
parentDirStat, err := os.Stat(dstParent)
if err != nil {
return CopyInfo{}, err
}
if !parentDirStat.IsDir() {
return CopyInfo{}, ErrNotDirectory
}
return CopyInfo{Path: path}, nil
}
// The path exists after resolving symlinks.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
// Ensure in platform semantics
srcInfo.Path = normalizePath(srcInfo.Path)
dstInfo.Path = normalizePath(dstInfo.Path)
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
_, srcBase := SplitPathDirEntry(srcInfo.Path)
switch {
case dstInfo.Exists && dstInfo.IsDir:
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
// you cannot copy a directory to an existing file location.
return "", nil, ErrCannotCopyDir
case dstInfo.Exists:
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
// the destination path instead, and when it is, the directory that is
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path, os.PathSeparator):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
// source.
return "", nil, ErrDirNotExists
default:
// The last remaining case is when the destination does not exist, is
// not asserted to be a directory, and the source content is not an
// archive of a directory. It this case, the destination file will need
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
// that all paths will start with.
oldBase = ""
}
rebased, w := io.Pipe()
go func() {
srcTar := tar.NewReader(srcContent)
rebasedTar := tar.NewWriter(w)
for {
hdr, err := srcTar.Next()
if err == io.EOF {
// Signals end of archive.
rebasedTar.Close()
w.Close()
return
}
if err != nil {
w.CloseWithError(err)
return
}
// srcContent tar stream, as served by TarWithOptions(), is
// definitely in PAX format, but tar.Next() mistakenly guesses it
// as USTAR, which creates a problem: if the newBase is >100
// characters long, WriteHeader() returns an error like
// "archive/tar: cannot encode header: Format specifies USTAR; and USTAR cannot encode Name=...".
//
// To fix, set the format to PAX here. See docker/for-linux issue #484.
hdr.Format = tar.FormatPAX
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
if hdr.Typeflag == tar.TypeLink {
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
}
if err = rebasedTar.WriteHeader(hdr); err != nil {
w.CloseWithError(err)
return
}
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
}
}
}()
return rebased
}
// TODO @gupta-ak. These might have to be changed in the future to be
// continuity driver aware as well to support LCOW.
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
)
// Ensure in platform semantics
srcPath = normalizePath(srcPath)
dstPath = normalizePath(dstPath)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
content, err := TarResource(srcInfo)
if err != nil {
return err
}
defer content.Close()
return CopyTo(content, srcInfo, dstPath)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
if err != nil {
return err
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
}
defer copyArchive.Close()
options := &TarOptions{
NoLchown: true,
NoOverwriteDirNonDir: true,
}
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path, os.PathSeparator) &&
filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) &&
!specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path, os.PathSeparator) &&
!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@@ -1,11 +0,0 @@
// +build !windows
package archive // import "github.com/docker/docker/pkg/archive"
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.ToSlash(path)
}

View File

@@ -1,9 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.FromSlash(path)
}

View File

@@ -1,260 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
"github.com/sirupsen/logrus"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
unpackedPaths := make(map[string]struct{})
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
idMapping := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
// Windows does not support filenames with colons in them. Ignore
// these files. This is not a problem though (although it might
// appear that it is). Let's suppose a client is running docker pull.
// The daemon it points to is Windows. Would it make sense for the
// client to be doing a docker pull Ubuntu for example (which has files
// with colons in the name under /usr/share/man/man3)? No, absolutely
// not as it would really only make sense that they were pulling a
// Windows image. However, for development, it is necessary to be able
// to pull Linux images which are in the repository.
//
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
continue
}
}
// Note as these operations are platform specific, so must the slash be.
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
// Not the root directory, ensure that the parent directory exists.
// This happened in some tests where an image had a tarfile without any
// parent directories.
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = system.MkdirAll(parentPath, 0600)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
return 0, err
}
}
if hdr.Name != WhiteoutOpaqueDir {
continue
}
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return 0, err
}
// Note as these operations are platform specific, so must the slash be.
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
trBuf.Reset(tr)
srcData := io.Reader(trBuf)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := remapIDs(idMapping, srcHdr); err != nil {
return 0, err
}
if err := createTarFile(path, dest, srcHdr, srcData, !options.NoLchown, nil, options.InUserNS); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
if runtime.GOOS != "windows" {
oldmask, err := system.Umask(0)
if err != nil {
return 0, err
}
defer system.Umask(oldmask)
}
if decompress {
decompLayer, err := DecompressStream(layer)
if err != nil {
return 0, err
}
defer decompLayer.Close()
layer = decompLayer
}
return UnpackLayer(dest, layer, options)
}

View File

@@ -1,16 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = (1 << 30) - 2
return
}
return syscall.NsecToTimespec(time.UnixNano())
}

View File

@@ -1,16 +0,0 @@
// +build !linux
package archive // import "github.com/docker/docker/pkg/archive"
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View File

@@ -1,23 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"

View File

@@ -1,59 +0,0 @@
package archive // import "github.com/docker/docker/pkg/archive"
import (
"archive/tar"
"bytes"
"io"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world"
// * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (io.Reader, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return
}

6
vendor/github.com/vbatts/go-mtree/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,6 @@
*~
.cli.test
.lint
.test
.vet
gomtree

23
vendor/github.com/vbatts/go-mtree/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,23 @@
language: go
go:
- "1.x"
- "1.14.x"
- "1.13.x"
- "1.12.x"
- "1.11.x"
- "1.10.x"
- "1.9.x"
sudo: false
before_install:
- git config --global url."https://".insteadOf git://
- make install.tools
- mkdir -p $GOPATH/src/github.com/vbatts && ln -sf $(pwd) $GOPATH/src/github.com/vbatts/go-mtree
install: true
script:
- make validation
- make validation.tags
- make build.arches

28
vendor/github.com/vbatts/go-mtree/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,28 @@
Copyright (c) 2016 Vincent Batts, Raleigh, NC, USA
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

93
vendor/github.com/vbatts/go-mtree/Makefile generated vendored Normal file
View File

@@ -0,0 +1,93 @@
BUILD := gomtree
BUILDPATH := github.com/vbatts/go-mtree/cmd/gomtree
CWD := $(shell pwd)
SOURCE_FILES := $(shell find . -type f -name "*.go")
CLEAN_FILES := *~
TAGS :=
ARCHES := linux,386 linux,amd64 linux,arm linux,arm64 openbsd,amd64 windows,amd64 darwin,amd64
GO_VER := go1.14
default: build validation
.PHONY: validation
validation: .test .lint .vet .cli.test
.PHONY: validation.tags
validation.tags: .test.tags .vet.tags .cli.test
.PHONY: test
test: .test
CLEAN_FILES += .test .test.tags
NO_VENDOR_DIR := $(shell find . -type f -name '*.go' ! -path './vendor*' ! -path './.git*' ! -path './.vscode*' -exec dirname "{}" \; | sort -u)
.test: $(SOURCE_FILES)
go test -v $(NO_VENDOR_DIR) && touch $@
.test.tags: $(SOURCE_FILES)
set -e ; for tag in $(TAGS) ; do go test -tags $$tag -v $(NO_VENDOR_DIR) ; done && touch $@
.PHONY: lint
lint: .lint
CLEAN_FILES += .lint
.lint: $(SOURCE_FILES)
@if [[ "$(findstring $(GO_VER),$(shell go version))" != "" ]] ; then \
set -e ; for dir in $(NO_VENDOR_DIR) ; do golint -set_exit_status $$dir ; done && touch $@ \
else \
touch $@ ; \
fi
.PHONY: vet
vet: .vet .vet.tags
CLEAN_FILES += .vet .vet.tags
.vet: $(SOURCE_FILES)
go vet $(NO_VENDOR_DIR) && touch $@
.vet.tags: $(SOURCE_FILES)
set -e ; for tag in $(TAGS) ; do go vet -tags $$tag -v $(NO_VENDOR_DIR) ; done && touch $@
.PHONY: cli.test
cli.test: .cli.test
CLEAN_FILES += .cli.test .cli.test.tags
.cli.test: $(BUILD) $(wildcard ./test/cli/*.sh)
@go run ./test/cli.go ./test/cli/*.sh && touch $@
.cli.test.tags: $(BUILD) $(wildcard ./test/cli/*.sh)
@set -e ; for tag in $(TAGS) ; do go run -tags $$tag ./test/cli.go ./test/cli/*.sh ; done && touch $@
.PHONY: build
build: $(BUILD)
$(BUILD): $(SOURCE_FILES)
go build -o $(BUILD) $(BUILDPATH)
install.tools:
@go get -u github.com/fatih/color ; \
if [[ "$(findstring $(GO_VER),$(shell go version))" != "" ]] ; then \
go get -u golang.org/x/lint/golint ;\
fi
./bin:
mkdir -p $@
CLEAN_FILES += bin
build.arches: ./bin
@set -e ;\
for pair in $(ARCHES); do \
p=$$(echo $$pair | cut -d , -f 1);\
a=$$(echo $$pair | cut -d , -f 2);\
echo "Building $$p/$$a ...";\
GOOS=$$p GOARCH=$$a go build -o ./bin/gomtree.$$p.$$a $(BUILDPATH) ;\
done
clean:
rm -rf $(BUILD) $(CLEAN_FILES)

213
vendor/github.com/vbatts/go-mtree/README.md generated vendored Normal file
View File

@@ -0,0 +1,213 @@
# go-mtree
[![Build Status](https://travis-ci.org/vbatts/go-mtree.svg?branch=master)](https://travis-ci.org/vbatts/go-mtree) [![Go Report Card](https://goreportcard.com/badge/github.com/vbatts/go-mtree)](https://goreportcard.com/report/github.com/vbatts/go-mtree)
`mtree` is a filesystem hierarchy validation tooling and format.
This is a library and simple cli tool for [mtree(8)][mtree(8)] support.
While the traditional `mtree` cli utility is primarily on BSDs (FreeBSD,
openBSD, etc), even broader support for the `mtree` specification format is
provided with libarchive ([libarchive-formats(5)][libarchive-formats(5)]).
There is also an [mtree port for Linux][archiecobbs/mtree-port] though it is
not widely packaged for Linux distributions.
## Format
The format of hierarchy specification is consistent with the `# mtree v2.0`
format. Both the BSD `mtree` and libarchive ought to be interoperable with it
with only one definite caveat. On Linux, extended attributes (`xattr`) on
files are often a critical aspect of the file, holding ACLs, capabilities, etc.
While FreeBSD filesystem do support `extattr`, this feature has not made its
way into their `mtree`.
This implementation of mtree supports a few non-upstream "keyword"s, such as:
`xattr` and `tar_time`. If you include these keywords, the FreeBSD `mtree`
will fail, as they are unknown keywords to that implementation.
To have `go-mtree` produce specifications that will be
strictly compatible with the BSD `mtree`, use the `-bsd-keywords` flag when
creating a manifest. This will make sure that only the keywords supported by
BSD `mtree` are used in the program.
### Typical form
With the standard keywords, plus say `sha256digest`, the hierarchy
specification looks like:
```mtree
# .
/set type=file nlink=1 mode=0664 uid=1000 gid=100
. size=4096 type=dir mode=0755 nlink=6 time=1459370393.273231538
LICENSE size=1502 mode=0644 time=1458851690.0 sha256digest=ef4e53d83096be56dc38dbf9bc8ba9e3068bec1ec37c179033d1e8f99a1c2a95
README.md size=2820 mode=0644 time=1459370256.316148361 sha256digest=d9b955134d99f84b17c0a711ce507515cc93cd7080a9dcd50400e3d993d876ac
[...]
```
See the directory presently in, and the files present. Along with each
path, is provided the keywords and the unique values for each path. Any common
keyword and values are established in the `/set` command.
### Extended attributes form
```mtree
# .
/set type=file nlink=1 mode=0664 uid=1000 gid=1000
. size=4096 type=dir mode=0775 nlink=6 time=1459370191.11179595 xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA==
LICENSE size=1502 time=1458851690.583562292 xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA==
README.md size=2366 mode=0644 time=1459369604.0 xattr.security.selinux=dW5jb25maW5lZF91Om9iamVjdF9yOnVzZXJfaG9tZV90OnMwAA==
[...]
```
See the keyword prefixed with `xattr.` followed by the extended attribute's
namespace and keyword. This setup is consistent for use with Linux extended
attributes as well as FreeBSD extended attributes.
Since extended attributes are an unordered hashmap, this approach allows for
checking each `<namespace>.<key>` individually.
The value is the [base64 encoded][base64] of the value of the particular
extended attribute. Since the values themselves could be raw bytes, this
approach avoids issues with encoding.
### Tar form
```mtree
# .
/set type=file mode=0664 uid=1000 gid=1000
. type=dir mode=0775 tar_time=1468430408.000000000
# samedir
samedir type=dir mode=0775 tar_time=1468000972.000000000
file2 size=0 tar_time=1467999782.000000000
file1 size=0 tar_time=1467999781.000000000
[...]
```
While `go-mtree` serves mainly as a library for upstream `mtree` support,
`go-mtree` is also compatible with [tar archives][tar] (which is not an upstream feature).
This means that we can now create and validate a manifest by specifying a tar file.
More interestingly, this also means that we can create a manifest from an archive, and then
validate this manifest against a filesystem hierarchy that's on disk, and vice versa.
Notice that for the output of creating a validation manifest from a tar file, the default behavior
for evaluating a notion of time is to use the `tar_time` keyword. In the
"filesystem hierarchy" format of mtree, `time` is being evaluated with
nanosecond precision. However, GNU tar truncates a file's modification time
to 1-second precision. That is, if a file's full modification time is
123456789.123456789, the "tar time" equivalent would be 123456789.000000000.
This way, if you validate a manifest created using a tar file against an
actual root directory, there will be no complaints from `go-mtree` so long as the
1-second precision time of a file in the root directory is the same.
## Usage
To use the Go programming language library, see [the docs][godoc].
To use the command line tool, first [build it](#Building), then the following.
### Create a manifest
This will also include the sha512 digest of the files.
```bash
gomtree -c -K sha512digest -p . > /tmp/root.mtree
```
With a tar file:
```bash
gomtree -c -K sha512digest -T sometarfile.tar > /tmp/tar.mtree
```
### Validate a manifest
```bash
gomtree -p . -f /tmp/root.mtree
```
With a tar file:
```bash
gomtree -T sometarfile.tar -f /tmp/root.mtree
```
### See the supported keywords
```bash
gomtree -list-keywords
Available keywords:
uname
sha1
sha1digest
sha256digest
xattrs (not upstream)
link (default)
nlink (default)
md5digest
rmd160digest
mode (default)
cksum
md5
rmd160
type (default)
time (default)
uid (default)
gid (default)
sha256
sha384
sha512
xattr (not upstream)
tar_time (not upstream)
size (default)
ripemd160digest
sha384digest
sha512digest
```
## Building
Either:
```bash
go get github.com/vbatts/go-mtree/cmd/gomtree
```
or
```bash
git clone git://github.com/vbatts/go-mtree.git $GOPATH/src/github.com/vbatts/go-mtree
cd $GOPATH/src/github.com/vbatts/go-mtree
go build ./cmd/gomtree
```
## Testing
On Linux:
```bash
cd $GOPATH/src/github.com/vbatts/go-mtree
make
```
On FreeBSD:
```bash
cd $GOPATH/src/github.com/vbatts/go-mtree
gmake
```
[mtree(8)]: https://www.freebsd.org/cgi/man.cgi?mtree(8)
[libarchive-formats(5)]: https://www.freebsd.org/cgi/man.cgi?query=libarchive-formats&sektion=5&n=1
[archiecobbs/mtree-port]: https://github.com/archiecobbs/mtree-port
[godoc]: https://godoc.org/github.com/vbatts/go-mtree
[tar]: http://man7.org/linux/man-pages/man1/tar.1.html
[base64]: https://tools.ietf.org/html/rfc4648

20
vendor/github.com/vbatts/go-mtree/check.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
package mtree
// Check a root directory path against the DirectoryHierarchy, regarding only
// the available keywords from the list and each entry in the hierarchy.
// If keywords is nil, the check all present in the DirectoryHierarchy
//
// This is equivalent to creating a new DirectoryHierarchy with Walk(root, nil,
// keywords, fs) and then doing a Compare(dh, newDh, keywords).
func Check(root string, dh *DirectoryHierarchy, keywords []Keyword, fs FsEval) ([]InodeDelta, error) {
if keywords == nil {
keywords = dh.UsedKeywords()
}
newDh, err := Walk(root, nil, keywords, fs)
if err != nil {
return nil, err
}
return Compare(dh, newDh, keywords)
}

49
vendor/github.com/vbatts/go-mtree/cksum.go generated vendored Normal file
View File

@@ -0,0 +1,49 @@
package mtree
import (
"bufio"
"io"
)
const posixPolynomial uint32 = 0x04C11DB7
// cksum is an implementation of the POSIX CRC algorithm
func cksum(r io.Reader) (uint32, int, error) {
in := bufio.NewReader(r)
count := 0
var sum uint32
f := func(b byte) {
for i := 7; i >= 0; i-- {
msb := sum & (1 << 31)
sum = sum << 1
if msb != 0 {
sum = sum ^ posixPolynomial
}
}
sum ^= uint32(b)
}
for done := false; !done; {
switch b, err := in.ReadByte(); err {
case io.EOF:
done = true
case nil:
f(b)
count++
default:
return ^sum, count, err
}
}
for m := count; ; {
f(byte(m) & 0xff)
m = m >> 8
if m == 0 {
break
}
}
f(0)
f(0)
f(0)
f(0)
return ^sum, count, nil
}

471
vendor/github.com/vbatts/go-mtree/compare.go generated vendored Normal file
View File

@@ -0,0 +1,471 @@
package mtree
import (
"encoding/json"
"fmt"
"strconv"
)
// XXX: Do we need a Difference interface to make it so people can do var x
// Difference = <something>? The main problem is that keys and inodes need to
// have different interfaces, so it's just a pain.
// DifferenceType represents the type of a discrepancy encountered for
// an object. This is also used to represent discrepancies between keys
// for objects.
type DifferenceType string
const (
// Missing represents a discrepancy where the object is present in
// the @old manifest but is not present in the @new manifest.
Missing DifferenceType = "missing"
// Extra represents a discrepancy where the object is not present in
// the @old manifest but is present in the @new manifest.
Extra DifferenceType = "extra"
// Modified represents a discrepancy where the object is present in
// both the @old and @new manifests, but one or more of the keys
// have different values (or have not been set in one of the
// manifests).
Modified DifferenceType = "modified"
// Same represents the case where two files are the same. These are
// only generated from CompareSame().
Same DifferenceType = "same"
// ErrorDifference represents an attempted update to the values of
// a keyword that failed
ErrorDifference DifferenceType = "errored"
)
// These functions return *type from the parameter. It's just shorthand, to
// ensure that we don't accidentally expose pointers to the caller that are
// internal data.
func ePtr(e Entry) *Entry { return &e }
func sPtr(s string) *string { return &s }
// InodeDelta Represents a discrepancy in a filesystem object between two
// DirectoryHierarchy manifests. Discrepancies are caused by entries only
// present in one manifest [Missing, Extra], keys only present in one of the
// manifests [Modified] or a difference between the keys of the same object in
// both manifests [Modified].
type InodeDelta struct {
diff DifferenceType
path string
new Entry
old Entry
keys []KeyDelta
}
// Type returns the type of discrepancy encountered when comparing this inode
// between the two DirectoryHierarchy manifests.
func (i InodeDelta) Type() DifferenceType {
return i.diff
}
// Path returns the path to the inode (relative to the root of the
// DirectoryHierarchy manifests).
func (i InodeDelta) Path() string {
return i.path
}
// Diff returns the set of key discrepancies between the two manifests for the
// specific inode. If the DifferenceType of the inode is not Modified, then
// Diff returns nil.
func (i InodeDelta) Diff() []KeyDelta {
return i.keys
}
// Old returns the value of the inode Entry in the "old" DirectoryHierarchy (as
// determined by the ordering of parameters to Compare).
func (i InodeDelta) Old() *Entry {
if i.diff == Modified || i.diff == Missing {
return ePtr(i.old)
}
return nil
}
// New returns the value of the inode Entry in the "new" DirectoryHierarchy (as
// determined by the ordering of parameters to Compare).
func (i InodeDelta) New() *Entry {
if i.diff == Modified || i.diff == Extra {
return ePtr(i.new)
}
return nil
}
// MarshalJSON creates a JSON-encoded version of InodeDelta.
func (i InodeDelta) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type DifferenceType `json:"type"`
Path string `json:"path"`
Keys []KeyDelta `json:"keys"`
}{
Type: i.diff,
Path: i.path,
Keys: i.keys,
})
}
// String returns a "pretty" formatting for InodeDelta.
func (i InodeDelta) String() string {
switch i.diff {
case Modified:
// Output the first failure.
f := i.keys[0]
return fmt.Sprintf("%q: keyword %q: expected %s; got %s", i.path, f.name, f.old, f.new)
case Extra:
return fmt.Sprintf("%q: unexpected path", i.path)
case Missing:
return fmt.Sprintf("%q: missing path", i.path)
default:
panic("programming error")
}
}
// KeyDelta Represents a discrepancy in a key for a particular filesystem
// object between two DirectoryHierarchy manifests. Discrepancies are caused by
// keys only present in one manifest [Missing, Extra] or a difference between
// the keys of the same object in both manifests [Modified]. A set of these is
// returned with InodeDelta.Diff().
type KeyDelta struct {
diff DifferenceType
name Keyword
old string
new string
err error // used for update delta results
}
// Type returns the type of discrepancy encountered when comparing this key
// between the two DirectoryHierarchy manifests' relevant inode entry.
func (k KeyDelta) Type() DifferenceType {
return k.diff
}
// Name returns the name (the key) of the KeyDeltaVal entry in the
// DirectoryHierarchy.
func (k KeyDelta) Name() Keyword {
return k.name
}
// Old returns the value of the KeyDeltaVal entry in the "old" DirectoryHierarchy
// (as determined by the ordering of parameters to Compare). Returns nil if
// there was no entry in the "old" DirectoryHierarchy.
func (k KeyDelta) Old() *string {
if k.diff == Modified || k.diff == Missing {
return sPtr(k.old)
}
return nil
}
// New returns the value of the KeyDeltaVal entry in the "new" DirectoryHierarchy
// (as determined by the ordering of parameters to Compare). Returns nil if
// there was no entry in the "new" DirectoryHierarchy.
func (k KeyDelta) New() *string {
if k.diff == Modified || k.diff == Extra {
return sPtr(k.new)
}
return nil
}
// MarshalJSON creates a JSON-encoded version of KeyDelta.
func (k KeyDelta) MarshalJSON() ([]byte, error) {
return json.Marshal(struct {
Type DifferenceType `json:"type"`
Name Keyword `json:"name"`
Old string `json:"old"`
New string `json:"new"`
}{
Type: k.diff,
Name: k.name,
Old: k.old,
New: k.new,
})
}
// Like Compare, but for single inode entries only. Used to compute the
// cached version of inode.keys.
func compareEntry(oldEntry, newEntry Entry) ([]KeyDelta, error) {
// Represents the new and old states for an entry's keys.
type stateT struct {
Old *KeyVal
New *KeyVal
}
diffs := map[Keyword]*stateT{}
oldKeys := oldEntry.AllKeys()
newKeys := newEntry.AllKeys()
// Fill the map with the old keys first.
for _, kv := range oldKeys {
key := kv.Keyword()
// only add this diff if the new keys has this keyword
if key != "tar_time" && key != "time" && key.Prefix() != "xattr" && len(HasKeyword(newKeys, key)) == 0 {
continue
}
// Cannot take &kv because it's the iterator.
copy := new(KeyVal)
*copy = kv
_, ok := diffs[key]
if !ok {
diffs[key] = new(stateT)
}
diffs[key].Old = copy
}
// Then fill the new keys.
for _, kv := range newKeys {
key := kv.Keyword()
// only add this diff if the old keys has this keyword
if key != "tar_time" && key != "time" && key.Prefix() != "xattr" && len(HasKeyword(oldKeys, key)) == 0 {
continue
}
// Cannot take &kv because it's the iterator.
copy := new(KeyVal)
*copy = kv
_, ok := diffs[key]
if !ok {
diffs[key] = new(stateT)
}
diffs[key].New = copy
}
// We need a full list of the keys so we can deal with different keyvalue
// orderings.
var kws []Keyword
for kw := range diffs {
kws = append(kws, kw)
}
// If both tar_time and time were specified in the set of keys, we have to
// mess with the diffs. This is an unfortunate side-effect of tar archives.
// TODO(cyphar): This really should be abstracted inside keywords.go
if InKeywordSlice("tar_time", kws) && InKeywordSlice("time", kws) {
// Delete "time".
timeStateT := diffs["time"]
delete(diffs, "time")
// Make a new tar_time.
if diffs["tar_time"].Old == nil {
time, err := strconv.ParseFloat(timeStateT.Old.Value(), 64)
if err != nil {
return nil, fmt.Errorf("failed to parse old time: %s", err)
}
newTime := new(KeyVal)
*newTime = KeyVal(fmt.Sprintf("tar_time=%d.000000000", int64(time)))
diffs["tar_time"].Old = newTime
} else if diffs["tar_time"].New == nil {
time, err := strconv.ParseFloat(timeStateT.New.Value(), 64)
if err != nil {
return nil, fmt.Errorf("failed to parse new time: %s", err)
}
newTime := new(KeyVal)
*newTime = KeyVal(fmt.Sprintf("tar_time=%d.000000000", int64(time)))
diffs["tar_time"].New = newTime
} else {
return nil, fmt.Errorf("time and tar_time set in the same manifest")
}
}
// Are there any differences?
var results []KeyDelta
for name, diff := range diffs {
// Invalid
if diff.Old == nil && diff.New == nil {
return nil, fmt.Errorf("invalid state: both old and new are nil: key=%s", name)
}
switch {
// Missing
case diff.New == nil:
results = append(results, KeyDelta{
diff: Missing,
name: name,
old: diff.Old.Value(),
})
// Extra
case diff.Old == nil:
results = append(results, KeyDelta{
diff: Extra,
name: name,
new: diff.New.Value(),
})
// Modified
default:
if !diff.Old.Equal(*diff.New) {
results = append(results, KeyDelta{
diff: Modified,
name: name,
old: diff.Old.Value(),
new: diff.New.Value(),
})
}
}
}
return results, nil
}
// compare is the actual workhorse for Compare() and CompareSame()
func compare(oldDh, newDh *DirectoryHierarchy, keys []Keyword, same bool) ([]InodeDelta, error) {
// Represents the new and old states for an entry.
type stateT struct {
Old *Entry
New *Entry
}
// To deal with different orderings of the entries, use a path-keyed
// map to make sure we don't start comparing unrelated entries.
diffs := map[string]*stateT{}
// First, iterate over the old hierarchy. If nil, pretend it's empty.
if oldDh != nil {
for _, e := range oldDh.Entries {
if e.Type == RelativeType || e.Type == FullType {
path, err := e.Path()
if err != nil {
return nil, err
}
// Cannot take &kv because it's the iterator.
cEntry := new(Entry)
*cEntry = e
_, ok := diffs[path]
if !ok {
diffs[path] = &stateT{}
}
diffs[path].Old = cEntry
}
}
}
// Then, iterate over the new hierarchy. If nil, pretend it's empty.
if newDh != nil {
for _, e := range newDh.Entries {
if e.Type == RelativeType || e.Type == FullType {
path, err := e.Path()
if err != nil {
return nil, err
}
// Cannot take &kv because it's the iterator.
cEntry := new(Entry)
*cEntry = e
_, ok := diffs[path]
if !ok {
diffs[path] = &stateT{}
}
diffs[path].New = cEntry
}
}
}
// Now we compute the diff.
var results []InodeDelta
for path, diff := range diffs {
// Invalid
if diff.Old == nil && diff.New == nil {
return nil, fmt.Errorf("invalid state: both old and new are nil: path=%s", path)
}
switch {
// Missing
case diff.New == nil:
results = append(results, InodeDelta{
diff: Missing,
path: path,
old: *diff.Old,
})
// Extra
case diff.Old == nil:
results = append(results, InodeDelta{
diff: Extra,
path: path,
new: *diff.New,
})
// Modified
default:
changed, err := compareEntry(*diff.Old, *diff.New)
if err != nil {
return nil, fmt.Errorf("comparison failed %s: %s", path, err)
}
// Now remove "changed" entries that don't match the keys.
if keys != nil {
var filterChanged []KeyDelta
for _, keyDiff := range changed {
if InKeywordSlice(keyDiff.name.Prefix(), keys) {
filterChanged = append(filterChanged, keyDiff)
}
}
changed = filterChanged
}
// Check if there were any actual changes.
if len(changed) > 0 {
results = append(results, InodeDelta{
diff: Modified,
path: path,
old: *diff.Old,
new: *diff.New,
keys: changed,
})
} else if same {
// this means that nothing changed, i.e. that
// the files are the same.
results = append(results, InodeDelta{
diff: Same,
path: path,
old: *diff.Old,
new: *diff.New,
keys: changed,
})
}
}
}
return results, nil
}
// Compare compares two directory hierarchy manifests, and returns the
// list of discrepancies between the two. All of the entries in the
// manifest are considered, with differences being generated for
// RelativeType and FullType entries. Differences in structure (such as
// the way /set and /unset are written) are not considered to be
// discrepancies. The list of differences are all filesystem objects.
//
// keys controls which keys will be compared, but if keys is nil then all
// possible keys will be compared between the two manifests (allowing for
// missing entries and the like). A missing or extra key is treated as a
// Modified type.
//
// If oldDh or newDh are empty, we assume they are a hierarchy that is
// completely empty. This is purely for helping callers create synthetic
// InodeDeltas.
//
// NB: The order of the parameters matters (old, new) because Extra and
// Missing are considered as different discrepancy types.
func Compare(oldDh, newDh *DirectoryHierarchy, keys []Keyword) ([]InodeDelta, error) {
return compare(oldDh, newDh, keys, false)
}
// CompareSame is the same as Compare, except it also includes the entries
// that are the same with a Same DifferenceType.
func CompareSame(oldDh, newDh *DirectoryHierarchy, keys []Keyword) ([]InodeDelta, error) {
return compare(oldDh, newDh, keys, true)
}

10
vendor/github.com/vbatts/go-mtree/creator.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
package mtree
// dhCreator is used in when building a DirectoryHierarchy
type dhCreator struct {
DH *DirectoryHierarchy
fs FsEval
curSet *Entry
curDir *Entry
curEnt *Entry
}

187
vendor/github.com/vbatts/go-mtree/entry.go generated vendored Normal file
View File

@@ -0,0 +1,187 @@
package mtree
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/vbatts/go-mtree/pkg/govis"
)
type byPos []Entry
func (bp byPos) Len() int { return len(bp) }
func (bp byPos) Less(i, j int) bool { return bp[i].Pos < bp[j].Pos }
func (bp byPos) Swap(i, j int) { bp[i], bp[j] = bp[j], bp[i] }
// Entry is each component of content in the mtree spec file
type Entry struct {
Parent *Entry // up
Children []*Entry // down
Prev, Next *Entry // left, right
Set *Entry // current `/set` for additional keywords
Pos int // order in the spec
Raw string // file or directory name
Name string // file or directory name
Keywords []KeyVal // TODO(vbatts) maybe a keyword typed set of values?
Type EntryType
}
// Descend searches thru an Entry's children to find the Entry associated with
// `filename`. Directories are stored at the end of an Entry's children so do a
// traverse backwards. If you descend to a "."
func (e Entry) Descend(filename string) *Entry {
if filename == "." || filename == "" {
return &e
}
numChildren := len(e.Children)
for i := range e.Children {
c := e.Children[numChildren-1-i]
if c.Name == filename {
return c
}
}
return nil
}
// Find is a wrapper around Descend that takes in a whole string path and tries
// to find that Entry
func (e Entry) Find(filepath string) *Entry {
resultnode := &e
for _, path := range strings.Split(filepath, "/") {
encoded, err := govis.Vis(path, DefaultVisFlags)
if err != nil {
return nil
}
resultnode = resultnode.Descend(encoded)
if resultnode == nil {
return nil
}
}
return resultnode
}
// Ascend gets the parent of an Entry. Serves mainly to maintain readability
// when traversing up and down an Entry tree
func (e Entry) Ascend() *Entry {
return e.Parent
}
// CleanPath makes a path safe for use with filepath.Join. This is done by not
// only cleaning the path, but also (if the path is relative) adding a leading
// '/' and cleaning it (then removing the leading '/'). This ensures that a
// path resulting from prepending another path will always resolve to lexically
// be a subdirectory of the prefixed path. This is all done lexically, so paths
// that include symlinks won't be safe as a result of using CleanPath.
//
// This code was copied from runc/libcontainer/utils/utils.go. It was
// originally written by myself, so I am dual-licensing it for the purpose of
// this project.
func CleanPath(path string) string {
// Deal with empty strings nicely.
if path == "" {
return ""
}
// Ensure that all paths are cleaned (especially problematic ones like
// "/../../../../../" which can cause lots of issues).
path = filepath.Clean(path)
// If the path isn't absolute, we need to do more processing to fix paths
// such as "../../../../<etc>/some/path". We also shouldn't convert absolute
// paths to relative ones.
if !filepath.IsAbs(path) {
path = filepath.Clean(string(os.PathSeparator) + path)
// This can't fail, as (by definition) all paths are relative to root.
path, _ = filepath.Rel(string(os.PathSeparator), path)
}
// Clean the path again for good measure.
return filepath.Clean(path)
}
// Path provides the full path of the file, despite RelativeType or FullType. It
// will be in Unvis'd form.
func (e Entry) Path() (string, error) {
decodedName, err := govis.Unvis(e.Name, DefaultVisFlags)
if err != nil {
return "", err
}
decodedName = CleanPath(decodedName)
if e.Parent == nil || e.Type == FullType {
return decodedName, nil
}
parentName, err := e.Parent.Path()
if err != nil {
return "", err
}
return CleanPath(filepath.Join(parentName, decodedName)), nil
}
// String joins a file with its associated keywords. The file name will be the
// Vis'd encoded version so that it can be parsed appropriately when Check'd.
func (e Entry) String() string {
if e.Raw != "" {
return e.Raw
}
if e.Type == BlankType {
return ""
}
if e.Type == DotDotType {
return e.Name
}
if e.Type == SpecialType || e.Type == FullType || inKeyValSlice("type=dir", e.Keywords) {
return fmt.Sprintf("%s %s", e.Name, strings.Join(KeyValToString(e.Keywords), " "))
}
return fmt.Sprintf(" %s %s", e.Name, strings.Join(KeyValToString(e.Keywords), " "))
}
// AllKeys returns the full set of KeyVal for the given entry, based on the
// /set keys as well as the entry-local keys. Entry-local keys always take
// precedence.
func (e Entry) AllKeys() []KeyVal {
if e.Set != nil {
return MergeKeyValSet(e.Set.Keywords, e.Keywords)
}
return e.Keywords
}
// IsDir checks the type= value for this entry on whether it is a directory
func (e Entry) IsDir() bool {
for _, kv := range e.AllKeys() {
if kv.Keyword().Prefix() == "type" {
return kv.Value() == "dir"
}
}
return false
}
// EntryType are the formats of lines in an mtree spec file
type EntryType int
// The types of lines to be found in an mtree spec file
const (
SignatureType EntryType = iota // first line of the file, like `#mtree v2.0`
BlankType // blank lines are ignored
CommentType // Lines beginning with `#` are ignored
SpecialType // line that has `/` prefix issue a "special" command (currently only /set and /unset)
RelativeType // if the first white-space delimited word does not have a '/' in it. Options/keywords are applied.
DotDotType // .. - A relative path step. keywords/options are ignored
FullType // if the first word on the line has a `/` after the first character, it interpretted as a file pathname with options
)
// String returns the name of the EntryType
func (et EntryType) String() string {
return typeNames[et]
}
var typeNames = map[EntryType]string{
SignatureType: "SignatureType",
BlankType: "BlankType",
CommentType: "CommentType",
SpecialType: "SpecialType",
RelativeType: "RelativeType",
DotDotType: "DotDotType",
FullType: "FullType",
}

54
vendor/github.com/vbatts/go-mtree/fseval.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
package mtree
import "os"
// FsEval is a mock-friendly method of specifying to go-mtree how to carry out
// filesystem operations such as opening files and the like. The semantics of
// all of these wrappers MUST be identical to the semantics described here.
type FsEval interface {
// Open must have the same semantics as os.Open.
Open(path string) (*os.File, error)
// Lstat must have the same semantics as os.Lstat.
Lstat(path string) (os.FileInfo, error)
// Readdir must have the same semantics as calling os.Open on the given
// path and then returning the result of (*os.File).Readdir(-1).
Readdir(path string) ([]os.FileInfo, error)
// KeywordFunc must return a wrapper around the provided function (in other
// words, the returned function must refer to the same keyword).
KeywordFunc(fn KeywordFunc) KeywordFunc
}
// DefaultFsEval is the default implementation of FsEval (and is the default
// used if a nil interface is passed to any mtree function). It does not modify
// or wrap any of the methods (they all just call out to os.*).
type DefaultFsEval struct{}
// Open must have the same semantics as os.Open.
func (fs DefaultFsEval) Open(path string) (*os.File, error) {
return os.Open(path)
}
// Lstat must have the same semantics as os.Lstat.
func (fs DefaultFsEval) Lstat(path string) (os.FileInfo, error) {
return os.Lstat(path)
}
// Readdir must have the same semantics as calling os.Open on the given
// path and then returning the result of (*os.File).Readdir(-1).
func (fs DefaultFsEval) Readdir(path string) ([]os.FileInfo, error) {
fh, err := os.Open(path)
if err != nil {
return nil, err
}
defer fh.Close()
return fh.Readdir(-1)
}
// KeywordFunc must return a wrapper around the provided function (in other
// words, the returned function must refer to the same keyword).
func (fs DefaultFsEval) KeywordFunc(fn KeywordFunc) KeywordFunc {
return fn
}

11
vendor/github.com/vbatts/go-mtree/go.mod generated vendored Normal file
View File

@@ -0,0 +1,11 @@
module github.com/vbatts/go-mtree
go 1.13
require (
github.com/davecgh/go-spew v1.1.1
github.com/fatih/color v1.9.0 // indirect
github.com/sirupsen/logrus v1.3.0
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037
)

31
vendor/github.com/vbatts/go-mtree/go.sum generated vendored Normal file
View File

@@ -0,0 +1,31 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME=
github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

48
vendor/github.com/vbatts/go-mtree/hierarchy.go generated vendored Normal file
View File

@@ -0,0 +1,48 @@
package mtree
import (
"io"
"sort"
)
// DirectoryHierarchy is the mapped structure for an mtree directory hierarchy
// spec
type DirectoryHierarchy struct {
Entries []Entry
}
// WriteTo simplifies the output of the resulting hierarchy spec
func (dh DirectoryHierarchy) WriteTo(w io.Writer) (n int64, err error) {
sort.Sort(byPos(dh.Entries))
var sum int64
for _, e := range dh.Entries {
str := e.String()
i, err := io.WriteString(w, str+"\n")
if err != nil {
return sum, err
}
sum += int64(i)
}
return sum, nil
}
// UsedKeywords collects and returns all the keywords used in a
// a DirectoryHierarchy
func (dh DirectoryHierarchy) UsedKeywords() []Keyword {
usedkeywords := []Keyword{}
for _, e := range dh.Entries {
switch e.Type {
case FullType, RelativeType, SpecialType:
if e.Type != SpecialType || e.Name == "/set" {
kvs := e.Keywords
for _, kv := range kvs {
kw := KeyVal(kv).Keyword().Prefix()
if !InKeywordSlice(kw, usedkeywords) {
usedkeywords = append(usedkeywords, KeywordSynonym(string(kw)))
}
}
}
}
}
return usedkeywords
}

172
vendor/github.com/vbatts/go-mtree/keywordfunc.go generated vendored Normal file
View File

@@ -0,0 +1,172 @@
package mtree
import (
"archive/tar"
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"fmt"
"hash"
"io"
"os"
"github.com/vbatts/go-mtree/pkg/govis"
"golang.org/x/crypto/ripemd160"
)
// KeywordFunc is the type of a function called on each file to be included in
// a DirectoryHierarchy, that will produce the string output of the keyword to
// be included for the file entry. Otherwise, empty string.
// io.Reader `r` is to the file stream for the file payload. While this
// function takes an io.Reader, the caller needs to reset it to the beginning
// for each new KeywordFunc
type KeywordFunc func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error)
var (
// KeywordFuncs is the map of all keywords (and the functions to produce them)
KeywordFuncs = map[Keyword]KeywordFunc{
"size": sizeKeywordFunc, // The size, in bytes, of the file
"type": typeKeywordFunc, // The type of the file
"time": timeKeywordFunc, // The last modification time of the file
"link": linkKeywordFunc, // The target of the symbolic link when type=link
"uid": uidKeywordFunc, // The file owner as a numeric value
"gid": gidKeywordFunc, // The file group as a numeric value
"nlink": nlinkKeywordFunc, // The number of hard links the file is expected to have
"uname": unameKeywordFunc, // The file owner as a symbolic name
"gname": gnameKeywordFunc, // The file group as a symbolic name
"mode": modeKeywordFunc, // The current file's permissions as a numeric (octal) or symbolic value
"cksum": cksumKeywordFunc, // The checksum of the file using the default algorithm specified by the cksum(1) utility
"md5": hasherKeywordFunc("md5digest", md5.New), // The MD5 message digest of the file
"md5digest": hasherKeywordFunc("md5digest", md5.New), // A synonym for `md5`
"rmd160": hasherKeywordFunc("ripemd160digest", ripemd160.New), // The RIPEMD160 message digest of the file
"rmd160digest": hasherKeywordFunc("ripemd160digest", ripemd160.New), // A synonym for `rmd160`
"ripemd160digest": hasherKeywordFunc("ripemd160digest", ripemd160.New), // A synonym for `rmd160`
"sha1": hasherKeywordFunc("sha1digest", sha1.New), // The SHA1 message digest of the file
"sha1digest": hasherKeywordFunc("sha1digest", sha1.New), // A synonym for `sha1`
"sha256": hasherKeywordFunc("sha256digest", sha256.New), // The SHA256 message digest of the file
"sha256digest": hasherKeywordFunc("sha256digest", sha256.New), // A synonym for `sha256`
"sha384": hasherKeywordFunc("sha384digest", sha512.New384), // The SHA384 message digest of the file
"sha384digest": hasherKeywordFunc("sha384digest", sha512.New384), // A synonym for `sha384`
"sha512": hasherKeywordFunc("sha512digest", sha512.New), // The SHA512 message digest of the file
"sha512digest": hasherKeywordFunc("sha512digest", sha512.New), // A synonym for `sha512`
"sha512256": hasherKeywordFunc("sha512digest", sha512.New512_256), // The SHA512/256 message digest of the file
"sha512256digest": hasherKeywordFunc("sha512digest", sha512.New512_256), // A synonym for `sha512256`
"flags": flagsKeywordFunc, // NOTE: this is a noop, but here to support the presence of the "flags" keyword.
// This is not an upstreamed keyword, but used to vary from "time", as tar
// archives do not store nanosecond precision. So comparing on "time" will
// be only seconds level accurate.
"tar_time": tartimeKeywordFunc, // The last modification time of the file, from a tar archive mtime
// This is not an upstreamed keyword, but a needed attribute for file validation.
// The pattern for this keyword key is prefixed by "xattr." followed by the extended attribute "namespace.key".
// The keyword value is the SHA1 digest of the extended attribute's value.
// In this way, the order of the keys does not matter, and the contents of the value is not revealed.
"xattr": xattrKeywordFunc,
"xattrs": xattrKeywordFunc,
}
)
var (
modeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
permissions := info.Mode().Perm()
if os.ModeSetuid&info.Mode() > 0 {
permissions |= (1 << 11)
}
if os.ModeSetgid&info.Mode() > 0 {
permissions |= (1 << 10)
}
if os.ModeSticky&info.Mode() > 0 {
permissions |= (1 << 9)
}
return []KeyVal{KeyVal(fmt.Sprintf("mode=%#o", permissions))}, nil
}
sizeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if sys, ok := info.Sys().(*tar.Header); ok {
if sys.Typeflag == tar.TypeSymlink {
return []KeyVal{KeyVal(fmt.Sprintf("size=%d", len(sys.Linkname)))}, nil
}
}
return []KeyVal{KeyVal(fmt.Sprintf("size=%d", info.Size()))}, nil
}
cksumKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if !info.Mode().IsRegular() {
return nil, nil
}
sum, _, err := cksum(r)
if err != nil {
return nil, err
}
return []KeyVal{KeyVal(fmt.Sprintf("cksum=%d", sum))}, nil
}
hasherKeywordFunc = func(name string, newHash func() hash.Hash) KeywordFunc {
return func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if !info.Mode().IsRegular() {
return nil, nil
}
h := newHash()
if _, err := io.Copy(h, r); err != nil {
return nil, err
}
return []KeyVal{KeyVal(fmt.Sprintf("%s=%x", KeywordSynonym(name), h.Sum(nil)))}, nil
}
}
tartimeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
return []KeyVal{KeyVal(fmt.Sprintf("tar_time=%d.%9.9d", info.ModTime().Unix(), 0))}, nil
}
timeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
tSec := info.ModTime().Unix()
tNano := info.ModTime().Nanosecond()
return []KeyVal{KeyVal(fmt.Sprintf("time=%d.%9.9d", tSec, tNano))}, nil
}
linkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if sys, ok := info.Sys().(*tar.Header); ok {
if sys.Linkname != "" {
linkname, err := govis.Vis(sys.Linkname, DefaultVisFlags)
if err != nil {
return nil, nil
}
return []KeyVal{KeyVal(fmt.Sprintf("link=%s", linkname))}, nil
}
return nil, nil
}
if info.Mode()&os.ModeSymlink != 0 {
str, err := os.Readlink(path)
if err != nil {
return nil, nil
}
linkname, err := govis.Vis(str, DefaultVisFlags)
if err != nil {
return nil, nil
}
return []KeyVal{KeyVal(fmt.Sprintf("link=%s", linkname))}, nil
}
return nil, nil
}
typeKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if info.Mode().IsDir() {
return []KeyVal{"type=dir"}, nil
}
if info.Mode().IsRegular() {
return []KeyVal{"type=file"}, nil
}
if info.Mode()&os.ModeSocket != 0 {
return []KeyVal{"type=socket"}, nil
}
if info.Mode()&os.ModeSymlink != 0 {
return []KeyVal{"type=link"}, nil
}
if info.Mode()&os.ModeNamedPipe != 0 {
return []KeyVal{"type=fifo"}, nil
}
if info.Mode()&os.ModeDevice != 0 {
if info.Mode()&os.ModeCharDevice != 0 {
return []KeyVal{"type=char"}, nil
}
return []KeyVal{"type=block"}, nil
}
return nil, nil
}
)

69
vendor/github.com/vbatts/go-mtree/keywordfuncs_bsd.go generated vendored Normal file
View File

@@ -0,0 +1,69 @@
// +build darwin freebsd netbsd openbsd
package mtree
import (
"archive/tar"
"fmt"
"io"
"os"
"os/user"
"syscall"
)
var (
flagsKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
// ideally this will pull in from here https://www.freebsd.org/cgi/man.cgi?query=chflags&sektion=2
return nil, nil
}
unameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", hdr.Uname))}, nil
}
stat := info.Sys().(*syscall.Stat_t)
u, err := user.LookupId(fmt.Sprintf("%d", stat.Uid))
if err != nil {
return nil, err
}
return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", u.Username))}, nil
}
gnameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", hdr.Gname))}, nil
}
stat := info.Sys().(*syscall.Stat_t)
g, err := lookupGroupID(fmt.Sprintf("%d", stat.Gid))
if err != nil {
return nil, err
}
return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", g.Name))}, nil
}
uidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", hdr.Uid))}, nil
}
stat := info.Sys().(*syscall.Stat_t)
return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", stat.Uid))}, nil
}
gidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", hdr.Gid))}, nil
}
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", stat.Gid))}, nil
}
return nil, nil
}
nlinkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
return []KeyVal{KeyVal(fmt.Sprintf("nlink=%d", stat.Nlink))}, nil
}
return nil, nil
}
xattrKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
return nil, nil
}
)

107
vendor/github.com/vbatts/go-mtree/keywordfuncs_linux.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
// +build linux
package mtree
import (
"archive/tar"
"encoding/base64"
"fmt"
"io"
"os"
"os/user"
"syscall"
"github.com/vbatts/go-mtree/pkg/govis"
"github.com/vbatts/go-mtree/xattr"
)
var (
// this is bsd specific https://www.freebsd.org/cgi/man.cgi?query=chflags&sektion=2
flagsKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
return nil, nil
}
unameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", hdr.Uname))}, nil
}
stat := info.Sys().(*syscall.Stat_t)
u, err := user.LookupId(fmt.Sprintf("%d", stat.Uid))
if err != nil {
return nil, nil
}
return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", u.Username))}, nil
}
gnameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", hdr.Gname))}, nil
}
stat := info.Sys().(*syscall.Stat_t)
g, err := lookupGroupID(fmt.Sprintf("%d", stat.Gid))
if err != nil {
return nil, nil
}
return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", g.Name))}, nil
}
uidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", hdr.Uid))}, nil
}
stat := info.Sys().(*syscall.Stat_t)
return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", stat.Uid))}, nil
}
gidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", hdr.Gid))}, nil
}
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", stat.Gid))}, nil
}
return nil, nil
}
nlinkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if stat, ok := info.Sys().(*syscall.Stat_t); ok {
return []KeyVal{KeyVal(fmt.Sprintf("nlink=%d", stat.Nlink))}, nil
}
return nil, nil
}
xattrKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
if len(hdr.Xattrs) == 0 {
return nil, nil
}
klist := []KeyVal{}
for k, v := range hdr.Xattrs {
encKey, err := govis.Vis(k, DefaultVisFlags)
if err != nil {
return nil, nil
}
klist = append(klist, KeyVal(fmt.Sprintf("xattr.%s=%s", encKey, base64.StdEncoding.EncodeToString([]byte(v)))))
}
return klist, nil
}
if !info.Mode().IsRegular() && !info.Mode().IsDir() {
return nil, nil
}
xlist, err := xattr.List(path)
if err != nil {
return nil, nil
}
klist := make([]KeyVal, len(xlist))
for i := range xlist {
data, err := xattr.Get(path, xlist[i])
if err != nil {
return nil, nil
}
encKey, err := govis.Vis(xlist[i], DefaultVisFlags)
if err != nil {
return nil, nil
}
klist[i] = KeyVal(fmt.Sprintf("xattr.%s=%s", encKey, base64.StdEncoding.EncodeToString(data)))
}
return klist, nil
}
)

View File

@@ -0,0 +1,47 @@
// +build !linux,!darwin,!freebsd,!netbsd,!openbsd
package mtree
import (
"archive/tar"
"fmt"
"io"
"os"
)
var (
// this is bsd specific https://www.freebsd.org/cgi/man.cgi?query=chflags&sektion=2
flagsKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
return nil, nil
}
unameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("uname=%s", hdr.Uname))}, nil
}
return nil, nil
}
gnameKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("gname=%s", hdr.Gname))}, nil
}
return nil, nil
}
uidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("uid=%d", hdr.Uid))}, nil
}
return nil, nil
}
gidKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
if hdr, ok := info.Sys().(*tar.Header); ok {
return []KeyVal{KeyVal(fmt.Sprintf("gid=%d", hdr.Gid))}, nil
}
return nil, nil
}
nlinkKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
return nil, nil
}
xattrKeywordFunc = func(path string, info os.FileInfo, r io.Reader) ([]KeyVal, error) {
return nil, nil
}
)

327
vendor/github.com/vbatts/go-mtree/keywords.go generated vendored Normal file
View File

@@ -0,0 +1,327 @@
package mtree
import (
"fmt"
"strings"
"github.com/vbatts/go-mtree/pkg/govis"
)
// DefaultVisFlags is the set of Vis flags used when encoding filenames and
// other similar entries.
const DefaultVisFlags govis.VisFlag = govis.VisWhite | govis.VisOctal | govis.VisGlob
// Keyword is the string name of a keyword, with some convenience functions for
// determining whether it is a default or bsd standard keyword.
// It first portion before the "="
type Keyword string
// Prefix is the portion of the keyword before a first "." (if present).
//
// Primarly for the xattr use-case, where the keyword `xattr.security.selinux` would have a Suffix of `security.selinux`.
func (k Keyword) Prefix() Keyword {
if strings.Contains(string(k), ".") {
return Keyword(strings.SplitN(string(k), ".", 2)[0])
}
return k
}
// Suffix is the portion of the keyword after a first ".".
// This is an option feature.
//
// Primarly for the xattr use-case, where the keyword `xattr.security.selinux` would have a Suffix of `security.selinux`.
func (k Keyword) Suffix() string {
if strings.Contains(string(k), ".") {
return strings.SplitN(string(k), ".", 2)[1]
}
return string(k)
}
// Default returns whether this keyword is in the default set of keywords
func (k Keyword) Default() bool {
return InKeywordSlice(k, DefaultKeywords)
}
// Bsd returns whether this keyword is in the upstream FreeBSD mtree(8)
func (k Keyword) Bsd() bool {
return InKeywordSlice(k, BsdKeywords)
}
// Synonym returns the canonical name for this keyword. This is provides the
// same functionality as KeywordSynonym()
func (k Keyword) Synonym() Keyword {
return KeywordSynonym(string(k))
}
// InKeywordSlice checks for the presence of `a` in `list`
func InKeywordSlice(a Keyword, list []Keyword) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
func inKeyValSlice(a KeyVal, list []KeyVal) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
// ToKeywords makes a list of Keyword from a list of string
func ToKeywords(list []string) []Keyword {
ret := make([]Keyword, len(list))
for i := range list {
ret[i] = Keyword(list[i])
}
return ret
}
// FromKeywords makes a list of string from a list of Keyword
func FromKeywords(list []Keyword) []string {
ret := make([]string, len(list))
for i := range list {
ret[i] = string(list[i])
}
return ret
}
// KeyValToString constructs a list of string from the list of KeyVal
func KeyValToString(list []KeyVal) []string {
ret := make([]string, len(list))
for i := range list {
ret[i] = string(list[i])
}
return ret
}
// StringToKeyVals constructs a list of KeyVal from the list of strings, like "keyword=value"
func StringToKeyVals(list []string) []KeyVal {
ret := make([]KeyVal, len(list))
for i := range list {
ret[i] = KeyVal(list[i])
}
return ret
}
// KeyVal is a "keyword=value"
type KeyVal string
// Keyword is the mapping to the available keywords
func (kv KeyVal) Keyword() Keyword {
if !strings.Contains(string(kv), "=") {
return Keyword("")
}
return Keyword(strings.SplitN(strings.TrimSpace(string(kv)), "=", 2)[0])
}
// Value is the data/value portion of "keyword=value"
func (kv KeyVal) Value() string {
if !strings.Contains(string(kv), "=") {
return ""
}
return strings.SplitN(strings.TrimSpace(string(kv)), "=", 2)[1]
}
// NewValue returns a new KeyVal with the newval
func (kv KeyVal) NewValue(newval string) KeyVal {
return KeyVal(fmt.Sprintf("%s=%s", kv.Keyword(), newval))
}
// Equal returns whether two KeyVal are equivalent. This takes
// care of certain odd cases such as tar_mtime, and should be used over
// using == comparisons directly unless you really know what you're
// doing.
func (kv KeyVal) Equal(b KeyVal) bool {
// TODO: Implement handling of tar_mtime.
return kv.Keyword() == b.Keyword() && kv.Value() == b.Value()
}
func keywordPrefixes(kvset []Keyword) []Keyword {
kvs := []Keyword{}
for _, kv := range kvset {
kvs = append(kvs, kv.Prefix())
}
return kvs
}
// keyvalSelector takes an array of KeyVal ("keyword=value") and filters out
// that only the set of keywords
func keyvalSelector(keyval []KeyVal, keyset []Keyword) []KeyVal {
retList := []KeyVal{}
for _, kv := range keyval {
if InKeywordSlice(kv.Keyword().Prefix(), keywordPrefixes(keyset)) {
retList = append(retList, kv)
}
}
return retList
}
func keyValDifference(this, that []KeyVal) []KeyVal {
if len(this) == 0 {
return that
}
diff := []KeyVal{}
for _, kv := range this {
if !inKeyValSlice(kv, that) {
diff = append(diff, kv)
}
}
return diff
}
func keyValCopy(set []KeyVal) []KeyVal {
ret := make([]KeyVal, len(set))
for i := range set {
ret[i] = set[i]
}
return ret
}
// Has the "keyword" present in the list of KeyVal, and returns the
// corresponding KeyVal, else an empty string.
func Has(keyvals []KeyVal, keyword string) []KeyVal {
return HasKeyword(keyvals, Keyword(keyword))
}
// HasKeyword the "keyword" present in the list of KeyVal, and returns the
// corresponding KeyVal, else an empty string.
// This match is done on the Prefix of the keyword only.
func HasKeyword(keyvals []KeyVal, keyword Keyword) []KeyVal {
kvs := []KeyVal{}
for i := range keyvals {
if keyvals[i].Keyword().Prefix() == keyword.Prefix() {
kvs = append(kvs, keyvals[i])
}
}
return kvs
}
// MergeSet takes the current setKeyVals, and then applies the entryKeyVals
// such that the entry's values win. The union is returned.
func MergeSet(setKeyVals, entryKeyVals []string) []KeyVal {
retList := StringToKeyVals(setKeyVals)
eKVs := StringToKeyVals(entryKeyVals)
return MergeKeyValSet(retList, eKVs)
}
// MergeKeyValSet does a merge of the two sets of KeyVal, and the KeyVal of
// entryKeyVals win when there is a duplicate Keyword.
func MergeKeyValSet(setKeyVals, entryKeyVals []KeyVal) []KeyVal {
retList := keyValCopy(setKeyVals)
seenKeywords := []Keyword{}
for i := range retList {
word := retList[i].Keyword()
for _, kv := range HasKeyword(entryKeyVals, word) {
// match on the keyword prefix and suffix here
if kv.Keyword() == word {
retList[i] = kv
}
}
seenKeywords = append(seenKeywords, word)
}
for i := range entryKeyVals {
if !InKeywordSlice(entryKeyVals[i].Keyword(), seenKeywords) {
retList = append(retList, entryKeyVals[i])
}
}
return retList
}
var (
// DefaultKeywords has the several default keyword producers (uid, gid,
// mode, nlink, type, size, mtime)
DefaultKeywords = []Keyword{
"size",
"type",
"uid",
"gid",
"mode",
"link",
"nlink",
"time",
}
// DefaultTarKeywords has keywords that should be used when creating a manifest from
// an archive. Currently, evaluating the # of hardlinks has not been implemented yet
DefaultTarKeywords = []Keyword{
"size",
"type",
"uid",
"gid",
"mode",
"link",
"tar_time",
}
// BsdKeywords is the set of keywords that is only in the upstream FreeBSD mtree
BsdKeywords = []Keyword{
"cksum",
"flags", // this one is really mostly BSD specific ...
"ignore",
"gid",
"gname",
"link",
"md5",
"md5digest",
"mode",
"nlink",
"nochange",
"optional",
"ripemd160digest",
"rmd160",
"rmd160digest",
"sha1",
"sha1digest",
"sha256",
"sha256digest",
"sha384",
"sha384digest",
"sha512",
"sha512digest",
"size",
"tags",
"time",
"type",
"uid",
"uname",
}
// SetKeywords is the default set of keywords calculated for a `/set` SpecialType
SetKeywords = []Keyword{
"uid",
"gid",
}
)
// KeywordSynonym returns the canonical name for keywords that have synonyms,
// and just returns the name provided if there is no synonym. In this way it
// ought to be safe to wrap any keyword name.
func KeywordSynonym(name string) Keyword {
var retname string
switch name {
case "md5":
retname = "md5digest"
case "rmd160":
retname = "ripemd160digest"
case "rmd160digest":
retname = "ripemd160digest"
case "sha1":
retname = "sha1digest"
case "sha256":
retname = "sha256digest"
case "sha384":
retname = "sha384digest"
case "sha512":
retname = "sha512digest"
case "sha512256":
retname = "sha512256digest"
case "xattrs":
retname = "xattr"
default:
retname = name
}
return Keyword(retname)
}

22
vendor/github.com/vbatts/go-mtree/lchtimes_unix.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
// +build darwin dragonfly freebsd openbsd linux netbsd solaris
package mtree
import (
"os"
"time"
"golang.org/x/sys/unix"
)
func lchtimes(name string, atime time.Time, mtime time.Time) error {
utimes := []unix.Timespec{
unix.NsecToTimespec(atime.UnixNano()),
unix.NsecToTimespec(mtime.UnixNano()),
}
if e := unix.UtimesNanoAt(unix.AT_FDCWD, name, utimes, unix.AT_SYMLINK_NOFOLLOW); e != nil {
return &os.PathError{Op: "chtimes", Path: name, Err: e}
}
return nil
}

View File

@@ -0,0 +1,11 @@
// +build windows
package mtree
import (
"time"
)
func lchtimes(name string, atime time.Time, mtime time.Time) error {
return nil
}

9
vendor/github.com/vbatts/go-mtree/lookup_new.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
// +build go1.7
package mtree
import (
"os/user"
)
var lookupGroupID = user.LookupGroupId

102
vendor/github.com/vbatts/go-mtree/lookup_old.go generated vendored Normal file
View File

@@ -0,0 +1,102 @@
// +build !go1.7
package mtree
import (
"bufio"
"bytes"
"io"
"os"
"strconv"
"strings"
)
const groupFile = "/etc/group"
var colon = []byte{':'}
// Group represents a grouping of users.
//
// On POSIX systems Gid contains a decimal number representing the group ID.
type Group struct {
Gid string // group ID
Name string // group name
}
func lookupGroupID(id string) (*Group, error) {
f, err := os.Open(groupFile)
if err != nil {
return nil, err
}
defer f.Close()
return findGroupID(id, f)
}
func findGroupID(id string, r io.Reader) (*Group, error) {
if v, err := readColonFile(r, matchGroupIndexValue(id, 2)); err != nil {
return nil, err
} else if v != nil {
return v.(*Group), nil
}
return nil, UnknownGroupIDError(id)
}
// lineFunc returns a value, an error, or (nil, nil) to skip the row.
type lineFunc func(line []byte) (v interface{}, err error)
// readColonFile parses r as an /etc/group or /etc/passwd style file, running
// fn for each row. readColonFile returns a value, an error, or (nil, nil) if
// the end of the file is reached without a match.
func readColonFile(r io.Reader, fn lineFunc) (v interface{}, err error) {
bs := bufio.NewScanner(r)
for bs.Scan() {
line := bs.Bytes()
// There's no spec for /etc/passwd or /etc/group, but we try to follow
// the same rules as the glibc parser, which allows comments and blank
// space at the beginning of a line.
line = bytes.TrimSpace(line)
if len(line) == 0 || line[0] == '#' {
continue
}
v, err = fn(line)
if v != nil || err != nil {
return
}
}
return nil, bs.Err()
}
func matchGroupIndexValue(value string, idx int) lineFunc {
var leadColon string
if idx > 0 {
leadColon = ":"
}
substr := []byte(leadColon + value + ":")
return func(line []byte) (v interface{}, err error) {
if !bytes.Contains(line, substr) || bytes.Count(line, colon) < 3 {
return
}
// wheel:*:0:root
parts := strings.SplitN(string(line), ":", 4)
if len(parts) < 4 || parts[0] == "" || parts[idx] != value ||
// If the file contains +foo and you search for "foo", glibc
// returns an "invalid argument" error. Similarly, if you search
// for a gid for a row where the group name starts with "+" or "-",
// glibc fails to find the record.
parts[0][0] == '+' || parts[0][0] == '-' {
return
}
if _, err := strconv.Atoi(parts[2]); err != nil {
return nil, nil
}
return &Group{Name: parts[0], Gid: parts[2]}, nil
}
}
// UnknownGroupIDError is returned by LookupGroupId when
// a group cannot be found.
type UnknownGroupIDError string
func (e UnknownGroupIDError) Error() string {
return "group: unknown groupid " + string(e)
}

105
vendor/github.com/vbatts/go-mtree/parse.go generated vendored Normal file
View File

@@ -0,0 +1,105 @@
package mtree
import (
"bufio"
"io"
"path/filepath"
"strings"
)
// ParseSpec reads a stream of an mtree specification, and returns the DirectoryHierarchy
func ParseSpec(r io.Reader) (*DirectoryHierarchy, error) {
s := bufio.NewScanner(r)
i := int(0)
creator := dhCreator{
DH: &DirectoryHierarchy{},
}
for s.Scan() {
str := s.Text()
trimmedStr := strings.TrimLeftFunc(str, func(c rune) bool {
return c == ' ' || c == '\t'
})
e := Entry{Pos: i}
switch {
case strings.HasPrefix(trimmedStr, "#"):
e.Raw = str
if strings.HasPrefix(trimmedStr, "#mtree") {
e.Type = SignatureType
} else {
e.Type = CommentType
// from here, the comment could be "# key: value" metadata
// or a relative path hint
}
case str == "":
e.Type = BlankType
// nothing else to do here
case strings.HasPrefix(str, "/"):
e.Type = SpecialType
// collapse any escaped newlines
for {
if strings.HasSuffix(str, `\`) {
str = str[:len(str)-1]
s.Scan()
str += s.Text()
} else {
break
}
}
// parse the options
f := strings.Fields(str)
e.Name = f[0]
e.Keywords = StringToKeyVals(f[1:])
if e.Name == "/set" {
creator.curSet = &e
} else if e.Name == "/unset" {
creator.curSet = nil
}
case len(strings.Fields(str)) > 0 && strings.Fields(str)[0] == "..":
e.Type = DotDotType
e.Raw = str
if creator.curDir != nil {
creator.curDir = creator.curDir.Parent
}
// nothing else to do here
case len(strings.Fields(str)) > 0:
// collapse any escaped newlines
for {
if strings.HasSuffix(str, `\`) {
str = str[:len(str)-1]
s.Scan()
str += s.Text()
} else {
break
}
}
// parse the options
f := strings.Fields(str)
e.Name = filepath.Clean(f[0])
if strings.Contains(e.Name, "/") {
e.Type = FullType
} else {
e.Type = RelativeType
}
e.Keywords = StringToKeyVals(f[1:])
// TODO: gather keywords if using tar stream
e.Parent = creator.curDir
for i := range e.Keywords {
kv := KeyVal(e.Keywords[i])
if kv.Keyword() == "type" {
if kv.Value() == "dir" {
creator.curDir = &e
} else {
creator.curEnt = &e
}
}
}
e.Set = creator.curSet
default:
// TODO(vbatts) log a warning?
continue
}
creator.DH.Entries = append(creator.DH.Entries, e)
i++
}
return creator.DH, s.Err()
}

202
vendor/github.com/vbatts/go-mtree/pkg/govis/COPYING generated vendored Normal file
View File

@@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

27
vendor/github.com/vbatts/go-mtree/pkg/govis/README.md generated vendored Normal file
View File

@@ -0,0 +1,27 @@
## `govis` ##
`govis` is a BSD-compatible `vis(3)` and `unvis(3)` encoding implementation
that is unicode aware and written in Go. None of this code comes from the
original BSD code, nor does it come from `go-mtree`'s port of said code.
Because 80s BSD code is not very nice to read.
### License ###
`govis` is licensed under the Apache 2.0 license.
```
govis: unicode aware vis(3) encoding implementation
Copyright (C) 2017 SUSE LLC.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
```

39
vendor/github.com/vbatts/go-mtree/pkg/govis/govis.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
/*
* govis: unicode aware vis(3) encoding implementation
* Copyright (C) 2017 SUSE LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package govis
// VisFlag manipulates how the characters are encoded/decoded
type VisFlag uint
// vis() has a variety of flags when deciding what encodings to use. While
// mtree only uses one set of flags, implementing them all is necessary in
// order to have compatibility with BSD's vis() and unvis() commands.
const (
VisOctal VisFlag = (1 << iota) // VIS_OCTAL: Use octal \ddd format.
VisCStyle // VIS_CSTYLE: Use \[nrft0..] where appropriate.
VisSpace // VIS_SP: Also encode space.
VisTab // VIS_TAB: Also encode tab.
VisNewline // VIS_NL: Also encode newline.
VisSafe // VIS_SAFE: Encode unsafe characters.
VisNoSlash // VIS_NOSLASH: Inhibit printing '\'.
VisHTTPStyle // VIS_HTTPSTYLE: HTTP-style escape %xx.
VisGlob // VIS_GLOB: Encode glob(3) magics.
visMask VisFlag = (1 << iota) - 1 // Mask of all flags.
VisWhite VisFlag = (VisSpace | VisTab | VisNewline)
)

294
vendor/github.com/vbatts/go-mtree/pkg/govis/unvis.go generated vendored Normal file
View File

@@ -0,0 +1,294 @@
/*
* govis: unicode aware vis(3) encoding implementation
* Copyright (C) 2017 SUSE LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package govis
import (
"fmt"
"strconv"
"unicode"
)
// unvisParser stores the current state of the token parser.
type unvisParser struct {
tokens []rune
idx int
flag VisFlag
}
// Next moves the index to the next character.
func (p *unvisParser) Next() {
p.idx++
}
// Peek gets the current token.
func (p *unvisParser) Peek() (rune, error) {
if p.idx >= len(p.tokens) {
return unicode.ReplacementChar, fmt.Errorf("tried to read past end of token list")
}
return p.tokens[p.idx], nil
}
// End returns whether all of the tokens have been consumed.
func (p *unvisParser) End() bool {
return p.idx >= len(p.tokens)
}
func newParser(input string, flag VisFlag) *unvisParser {
return &unvisParser{
tokens: []rune(input),
idx: 0,
flag: flag,
}
}
// While a recursive descent parser is overkill for parsing simple escape
// codes, this is IMO much easier to read than the ugly 80s coroutine code used
// by the original unvis(3) parser. Here's the EBNF for an unvis sequence:
//
// <input> ::= (<rune>)*
// <rune> ::= ("\" <escape-sequence>) | ("%" <escape-hex>) | <plain-rune>
// <plain-rune> ::= any rune
// <escape-sequence> ::= ("x" <escape-hex>) | ("M" <escape-meta>) | ("^" <escape-ctrl) | <escape-cstyle> | <escape-octal>
// <escape-meta> ::= ("-" <escape-meta1>) | ("^" <escape-ctrl>)
// <escape-meta1> ::= any rune
// <escape-ctrl> ::= "?" | any rune
// <escape-cstyle> ::= "\" | "n" | "r" | "b" | "a" | "v" | "t" | "f"
// <escape-hex> ::= [0-9a-f] [0-9a-f]
// <escape-octal> ::= [0-7] ([0-7] ([0-7])?)?
func unvisPlainRune(p *unvisParser) ([]byte, error) {
ch, err := p.Peek()
if err != nil {
return nil, fmt.Errorf("plain rune: %c", ch)
}
p.Next()
// XXX: Maybe we should not be converting to runes and then back to strings
// here. Are we sure that the byte-for-byte representation is the
// same? If the bytes change, then using these strings for paths will
// break...
str := string(ch)
return []byte(str), nil
}
func unvisEscapeCStyle(p *unvisParser) ([]byte, error) {
ch, err := p.Peek()
if err != nil {
return nil, fmt.Errorf("escape hex: %s", err)
}
output := ""
switch ch {
case 'n':
output = "\n"
case 'r':
output = "\r"
case 'b':
output = "\b"
case 'a':
output = "\x07"
case 'v':
output = "\v"
case 't':
output = "\t"
case 'f':
output = "\f"
case 's':
output = " "
case 'E':
output = "\x1b"
case '\n':
// Hidden newline.
case '$':
// Hidden marker.
default:
// XXX: We should probably allow falling through and return "\" here...
return nil, fmt.Errorf("escape cstyle: unknown escape character: %q", ch)
}
p.Next()
return []byte(output), nil
}
func unvisEscapeDigits(p *unvisParser, base int, force bool) ([]byte, error) {
var code int
for i := int(0xFF); i > 0; i /= base {
ch, err := p.Peek()
if err != nil {
if !force && i != 0xFF {
break
}
return nil, fmt.Errorf("escape base %d: %s", base, err)
}
digit, err := strconv.ParseInt(string(ch), base, 8)
if err != nil {
if !force && i != 0xFF {
break
}
return nil, fmt.Errorf("escape base %d: could not parse digit: %s", base, err)
}
code = (code * base) + int(digit)
p.Next()
}
if code > unicode.MaxLatin1 {
return nil, fmt.Errorf("escape base %d: code %q outside latin-1 encoding", base, code)
}
char := byte(code & 0xFF)
return []byte{char}, nil
}
func unvisEscapeCtrl(p *unvisParser, mask byte) ([]byte, error) {
ch, err := p.Peek()
if err != nil {
return nil, fmt.Errorf("escape ctrl: %s", err)
}
if ch > unicode.MaxLatin1 {
return nil, fmt.Errorf("escape ctrl: code %q outside latin-1 encoding", ch)
}
char := byte(ch) & 0x1f
if ch == '?' {
char = 0x7f
}
p.Next()
return []byte{mask | char}, nil
}
func unvisEscapeMeta(p *unvisParser) ([]byte, error) {
ch, err := p.Peek()
if err != nil {
return nil, fmt.Errorf("escape meta: %s", err)
}
mask := byte(0x80)
switch ch {
case '^':
// The same as "\^..." except we apply a mask.
p.Next()
return unvisEscapeCtrl(p, mask)
case '-':
p.Next()
ch, err := p.Peek()
if err != nil {
return nil, fmt.Errorf("escape meta1: %s", err)
}
if ch > unicode.MaxLatin1 {
return nil, fmt.Errorf("escape meta1: code %q outside latin-1 encoding", ch)
}
// Add mask to character.
p.Next()
return []byte{mask | byte(ch)}, nil
}
return nil, fmt.Errorf("escape meta: unknown escape char: %s", err)
}
func unvisEscapeSequence(p *unvisParser) ([]byte, error) {
ch, err := p.Peek()
if err != nil {
return nil, fmt.Errorf("escape sequence: %s", err)
}
switch ch {
case '\\':
p.Next()
return []byte("\\"), nil
case '0', '1', '2', '3', '4', '5', '6', '7':
return unvisEscapeDigits(p, 8, false)
case 'x':
p.Next()
return unvisEscapeDigits(p, 16, true)
case '^':
p.Next()
return unvisEscapeCtrl(p, 0x00)
case 'M':
p.Next()
return unvisEscapeMeta(p)
default:
return unvisEscapeCStyle(p)
}
}
func unvisRune(p *unvisParser) ([]byte, error) {
ch, err := p.Peek()
if err != nil {
return nil, fmt.Errorf("rune: %s", err)
}
switch ch {
case '\\':
p.Next()
return unvisEscapeSequence(p)
case '%':
// % HEX HEX only applies to HTTPStyle encodings.
if p.flag&VisHTTPStyle == VisHTTPStyle {
p.Next()
return unvisEscapeDigits(p, 16, true)
}
fallthrough
default:
return unvisPlainRune(p)
}
}
func unvis(p *unvisParser) (string, error) {
var output []byte
for !p.End() {
ch, err := unvisRune(p)
if err != nil {
return "", fmt.Errorf("input: %s", err)
}
output = append(output, ch...)
}
return string(output), nil
}
// Unvis takes a string formatted with the given Vis flags (though only the
// VisHTTPStyle flag is checked) and output the un-encoded version of the
// encoded string. An error is returned if any escape sequences in the input
// string were invalid.
func Unvis(input string, flag VisFlag) (string, error) {
// TODO: Check all of the VisFlag bits.
p := newParser(input, flag)
output, err := unvis(p)
if err != nil {
return "", fmt.Errorf("unvis: %s", err)
}
if !p.End() {
return "", fmt.Errorf("unvis: trailing characters at end of input")
}
return output, nil
}

177
vendor/github.com/vbatts/go-mtree/pkg/govis/vis.go generated vendored Normal file
View File

@@ -0,0 +1,177 @@
/*
* govis: unicode aware vis(3) encoding implementation
* Copyright (C) 2017 SUSE LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package govis
import (
"fmt"
"unicode"
)
func isunsafe(ch rune) bool {
return ch == '\b' || ch == '\007' || ch == '\r'
}
func isglob(ch rune) bool {
return ch == '*' || ch == '?' || ch == '[' || ch == '#'
}
// ishttp is defined by RFC 1808.
func ishttp(ch rune) bool {
// RFC1808 does not really consider characters outside of ASCII, so just to
// be safe always treat characters outside the ASCII character set as "not
// HTTP".
if ch > unicode.MaxASCII {
return false
}
return unicode.IsDigit(ch) || unicode.IsLetter(ch) ||
// Safe characters.
ch == '$' || ch == '-' || ch == '_' || ch == '.' || ch == '+' ||
// Extra characters.
ch == '!' || ch == '*' || ch == '\'' || ch == '(' ||
ch == ')' || ch == ','
}
func isgraph(ch rune) bool {
return unicode.IsGraphic(ch) && !unicode.IsSpace(ch) && ch <= unicode.MaxASCII
}
// vis converts a single *byte* into its encoding. While Go supports the
// concept of runes (and thus native utf-8 parsing), in order to make sure that
// the bit-stream will be completely maintained through an Unvis(Vis(...))
// round-trip. The downside is that Vis() will never output unicode -- but on
// the plus side this is actually a benefit on the encoding side (it will
// always work with the simple unvis(3) implementation). It also means that we
// don't have to worry about different multi-byte encodings.
func vis(b byte, flag VisFlag) (string, error) {
// Treat the single-byte character as a rune.
ch := rune(b)
// XXX: This is quite a horrible thing to support.
if flag&VisHTTPStyle == VisHTTPStyle {
if !ishttp(ch) {
return "%" + fmt.Sprintf("%.2X", ch), nil
}
}
// Figure out if the character doesn't need to be encoded. Effectively, we
// encode most "normal" (graphical) characters as themselves unless we have
// been specifically asked not to. Note though that we *ALWAYS* encode
// everything outside ASCII.
// TODO: Switch this to much more logical code.
if ch > unicode.MaxASCII {
/* ... */
} else if flag&VisGlob == VisGlob && isglob(ch) {
/* ... */
} else if isgraph(ch) ||
(flag&VisSpace != VisSpace && ch == ' ') ||
(flag&VisTab != VisTab && ch == '\t') ||
(flag&VisNewline != VisNewline && ch == '\n') ||
(flag&VisSafe != 0 && isunsafe(ch)) {
encoded := string(ch)
if ch == '\\' && flag&VisNoSlash == 0 {
encoded += "\\"
}
return encoded, nil
}
// Try to use C-style escapes first.
if flag&VisCStyle == VisCStyle {
switch ch {
case ' ':
return "\\s", nil
case '\n':
return "\\n", nil
case '\r':
return "\\r", nil
case '\b':
return "\\b", nil
case '\a':
return "\\a", nil
case '\v':
return "\\v", nil
case '\t':
return "\\t", nil
case '\f':
return "\\f", nil
case '\x00':
// Output octal just to be safe.
return "\\000", nil
}
}
// For graphical characters we generate octal output (and also if it's
// being forced by the caller's flags). Also spaces should always be
// encoded as octal.
if flag&VisOctal == VisOctal || isgraph(ch) || ch&0x7f == ' ' {
// Always output three-character octal just to be safe.
return fmt.Sprintf("\\%.3o", ch), nil
}
// Now we have to output meta or ctrl escapes. As far as I can tell, this
// is not actually defined by any standard -- so this logic is basically
// copied from the original vis(3) implementation. Hopefully nobody
// actually relies on this (octal and hex are better).
encoded := ""
if flag&VisNoSlash == 0 {
encoded += "\\"
}
// Meta characters have 0x80 set, but are otherwise identical to control
// characters.
if b&0x80 != 0 {
b &= 0x7f
encoded += "M"
}
if unicode.IsControl(rune(b)) {
encoded += "^"
if b == 0x7f {
encoded += "?"
} else {
encoded += fmt.Sprintf("%c", b+'@')
}
} else {
encoded += fmt.Sprintf("-%c", b)
}
return encoded, nil
}
// Vis encodes the provided string to a BSD-compatible encoding using BSD's
// vis() flags. However, it will correctly handle multi-byte encoding (which is
// not done properly by BSD's vis implementation).
func Vis(src string, flag VisFlag) (string, error) {
if flag&visMask != flag {
return "", fmt.Errorf("vis: flag %q contains unknown or unsupported flags", flag)
}
output := ""
for _, ch := range []byte(src) {
encodedCh, err := vis(ch, flag)
if err != nil {
return "", err
}
output += encodedCh
}
return output, nil
}

11
vendor/github.com/vbatts/go-mtree/releases.md generated vendored Normal file
View File

@@ -0,0 +1,11 @@
# How to do releases:
* Create a changeset with an update to `version.go`
- this commit will be tagged
- add another commit putting it back with '-dev' appended
* gpg sign the commit with an incremented version, like 'vX.Y.Z'
* Push the tag
* Create a "release" from the tag on github
- include the binaries from `make build.arches`
- write about notable changes, and their contributors
- PRs merged for the release

18
vendor/github.com/vbatts/go-mtree/stat_unix.go generated vendored Normal file
View File

@@ -0,0 +1,18 @@
// +build !windows
package mtree
import (
"os"
"syscall"
)
func statIsUID(stat os.FileInfo, uid int) bool {
statT := stat.Sys().(*syscall.Stat_t)
return statT.Uid == uint32(uid)
}
func statIsGID(stat os.FileInfo, gid int) bool {
statT := stat.Sys().(*syscall.Stat_t)
return statT.Gid == uint32(gid)
}

12
vendor/github.com/vbatts/go-mtree/stat_windows.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// +build windows
package mtree
import "os"
func statIsUID(stat os.FileInfo, uid int) bool {
return false
}
func statIsGID(stat os.FileInfo, uid int) bool {
return false
}

461
vendor/github.com/vbatts/go-mtree/tar.go generated vendored Normal file
View File

@@ -0,0 +1,461 @@
package mtree
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/vbatts/go-mtree/pkg/govis"
)
// Streamer creates a file hierarchy out of a tar stream
type Streamer interface {
io.ReadCloser
Hierarchy() (*DirectoryHierarchy, error)
}
var tarDefaultSetKeywords = []KeyVal{
"type=file",
"flags=none",
"mode=0664",
}
// NewTarStreamer streams a tar archive and creates a file hierarchy based off
// of the tar metadata headers
func NewTarStreamer(r io.Reader, excludes []ExcludeFunc, keywords []Keyword) Streamer {
pR, pW := io.Pipe()
ts := &tarStream{
pipeReader: pR,
pipeWriter: pW,
creator: dhCreator{DH: &DirectoryHierarchy{}},
teeReader: io.TeeReader(r, pW),
tarReader: tar.NewReader(pR),
keywords: keywords,
hardlinks: map[string][]string{},
excludes: excludes,
}
go ts.readHeaders()
return ts
}
type tarStream struct {
root *Entry
hardlinks map[string][]string
creator dhCreator
pipeReader *io.PipeReader
pipeWriter *io.PipeWriter
teeReader io.Reader
tarReader *tar.Reader
keywords []Keyword
excludes []ExcludeFunc
err error
}
func (ts *tarStream) readHeaders() {
// remove "time" keyword
notimekws := []Keyword{}
for _, kw := range ts.keywords {
if !InKeywordSlice(kw, notimekws) {
if kw == "time" {
if !InKeywordSlice("tar_time", ts.keywords) {
notimekws = append(notimekws, "tar_time")
}
} else {
notimekws = append(notimekws, kw)
}
}
}
ts.keywords = notimekws
// We have to start with the directory we're in, and anything beyond these
// items is determined at the time a tar is extracted.
ts.root = &Entry{
Name: ".",
Type: RelativeType,
Prev: &Entry{
Raw: "# .",
Type: CommentType,
},
Set: nil,
Keywords: []KeyVal{"type=dir"},
}
// insert signature and metadata comments first (user, machine, tree, date)
for _, e := range signatureEntries("<user specified tar archive>") {
e.Pos = len(ts.creator.DH.Entries)
ts.creator.DH.Entries = append(ts.creator.DH.Entries, e)
}
// insert keyword metadata next
for _, e := range keywordEntries(ts.keywords) {
e.Pos = len(ts.creator.DH.Entries)
ts.creator.DH.Entries = append(ts.creator.DH.Entries, e)
}
hdrloop:
for {
hdr, err := ts.tarReader.Next()
if err != nil {
ts.pipeReader.CloseWithError(err)
return
}
for _, ex := range ts.excludes {
if ex(hdr.Name, hdr.FileInfo()) {
continue hdrloop
}
}
// Because the content of the file may need to be read by several
// KeywordFuncs, it needs to be an io.Seeker as well. So, just reading from
// ts.tarReader is not enough.
tmpFile, err := ioutil.TempFile("", "ts.payload.")
if err != nil {
ts.pipeReader.CloseWithError(err)
return
}
// for good measure
if err := tmpFile.Chmod(0600); err != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
ts.pipeReader.CloseWithError(err)
return
}
if _, err := io.Copy(tmpFile, ts.tarReader); err != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
ts.pipeReader.CloseWithError(err)
return
}
// Alright, it's either file or directory
encodedName, err := govis.Vis(filepath.Base(hdr.Name), DefaultVisFlags)
if err != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
ts.pipeReader.CloseWithError(err)
return
}
e := Entry{
Name: encodedName,
Type: RelativeType,
}
// Keep track of which files are hardlinks so we can resolve them later
if hdr.Typeflag == tar.TypeLink {
keyFunc := KeywordFuncs["link"]
kvs, err := keyFunc(hdr.Name, hdr.FileInfo(), nil)
if err != nil {
logrus.Warn(err)
break // XXX is breaking an okay thing to do here?
}
linkname, err := govis.Unvis(KeyVal(kvs[0]).Value(), DefaultVisFlags)
if err != nil {
logrus.Warn(err)
break // XXX is breaking an okay thing to do here?
}
if _, ok := ts.hardlinks[linkname]; !ok {
ts.hardlinks[linkname] = []string{hdr.Name}
} else {
ts.hardlinks[linkname] = append(ts.hardlinks[linkname], hdr.Name)
}
}
// now collect keywords on the file
for _, keyword := range ts.keywords {
if keyFunc, ok := KeywordFuncs[keyword.Prefix()]; ok {
// We can't extract directories on to disk, so "size" keyword
// is irrelevant for now
if hdr.FileInfo().IsDir() && keyword == "size" {
continue
}
kvs, err := keyFunc(hdr.Name, hdr.FileInfo(), tmpFile)
if err != nil {
ts.setErr(err)
}
// for good measure, check that we actually get a value for a keyword
if len(kvs) > 0 && kvs[0] != "" {
e.Keywords = append(e.Keywords, kvs[0])
}
// don't forget to reset the reader
if _, err := tmpFile.Seek(0, 0); err != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
ts.pipeReader.CloseWithError(err)
return
}
}
}
// collect meta-set keywords for a directory so that we can build the
// actual sets in `flatten`
if hdr.FileInfo().IsDir() {
s := Entry{
Name: "meta-set",
Type: SpecialType,
}
for _, setKW := range SetKeywords {
if keyFunc, ok := KeywordFuncs[setKW.Prefix()]; ok {
kvs, err := keyFunc(hdr.Name, hdr.FileInfo(), tmpFile)
if err != nil {
ts.setErr(err)
}
for _, kv := range kvs {
if kv != "" {
s.Keywords = append(s.Keywords, kv)
}
}
if _, err := tmpFile.Seek(0, 0); err != nil {
tmpFile.Close()
os.Remove(tmpFile.Name())
ts.pipeReader.CloseWithError(err)
}
}
}
e.Set = &s
}
err = populateTree(ts.root, &e, hdr)
if err != nil {
ts.setErr(err)
}
tmpFile.Close()
os.Remove(tmpFile.Name())
}
}
// populateTree creates a pseudo file tree hierarchy using an Entry's Parent and
// Children fields. When examining the Entry e to insert in the tree, we
// determine if the path to that Entry exists yet. If it does, insert it in the
// appropriate position in the tree. If not, create a path up until the Entry's
// directory that it is contained in. Then, insert the Entry.
// root: the "." Entry
// e: the Entry we are looking to insert
// hdr: the tar header struct associated with e
func populateTree(root, e *Entry, hdr *tar.Header) error {
if root == nil || e == nil {
return fmt.Errorf("cannot populate or insert nil Entry's")
} else if root.Prev == nil {
return fmt.Errorf("root needs to be an Entry associated with a directory")
}
isDir := hdr.FileInfo().IsDir()
wd := filepath.Clean(hdr.Name)
if !isDir {
// directory up until the actual file
wd = filepath.Dir(wd)
if wd == "." {
root.Children = append([]*Entry{e}, root.Children...)
e.Parent = root
return nil
}
}
dirNames := strings.Split(wd, "/")
parent := root
for _, name := range dirNames[:] {
encoded, err := govis.Vis(name, DefaultVisFlags)
if err != nil {
return err
}
if node := parent.Descend(encoded); node == nil {
// Entry for directory doesn't exist in tree relative to root.
// We don't know if this directory is an actual tar header (because a
// user could have just specified a path to a deep file), so we must
// specify this placeholder directory as a "type=dir", and Set=nil.
newEntry := Entry{
Name: encoded,
Type: RelativeType,
Parent: parent,
Keywords: []KeyVal{"type=dir"}, // temp data
Set: nil, // temp data
}
pathname, err := newEntry.Path()
if err != nil {
return err
}
newEntry.Prev = &Entry{
Type: CommentType,
Raw: "# " + pathname,
}
parent.Children = append(parent.Children, &newEntry)
parent = &newEntry
} else {
// Entry for directory exists in tree, just keep going
parent = node
}
}
if !isDir {
parent.Children = append([]*Entry{e}, parent.Children...)
e.Parent = parent
} else {
// fill in the actual data from e
parent.Keywords = e.Keywords
parent.Set = e.Set
}
return nil
}
// After constructing a pseudo file hierarchy tree, we want to "flatten" this
// tree by putting the Entries into a slice with appropriate positioning.
// root: the "head" of the sub-tree to flatten
// creator: a dhCreator that helps with the '/set' keyword
// keywords: keywords specified by the user that should be evaluated
func flatten(root *Entry, creator *dhCreator, keywords []Keyword) {
if root == nil || creator == nil {
return
}
if root.Prev != nil {
// root.Prev != nil implies root is a directory
creator.DH.Entries = append(creator.DH.Entries,
Entry{
Type: BlankType,
Pos: len(creator.DH.Entries),
})
root.Prev.Pos = len(creator.DH.Entries)
creator.DH.Entries = append(creator.DH.Entries, *root.Prev)
if root.Set != nil {
// Check if we need a new set
consolidatedKeys := keyvalSelector(append(tarDefaultSetKeywords, root.Set.Keywords...), keywords)
if creator.curSet == nil {
creator.curSet = &Entry{
Type: SpecialType,
Name: "/set",
Keywords: consolidatedKeys,
Pos: len(creator.DH.Entries),
}
creator.DH.Entries = append(creator.DH.Entries, *creator.curSet)
} else {
needNewSet := false
for _, k := range root.Set.Keywords {
if !inKeyValSlice(k, creator.curSet.Keywords) {
needNewSet = true
break
}
}
if needNewSet {
creator.curSet = &Entry{
Name: "/set",
Type: SpecialType,
Pos: len(creator.DH.Entries),
Keywords: consolidatedKeys,
}
creator.DH.Entries = append(creator.DH.Entries, *creator.curSet)
}
}
} else if creator.curSet != nil {
// Getting into here implies that the Entry's set has not and
// was not supposed to be evaluated, thus, we need to reset curSet
creator.DH.Entries = append(creator.DH.Entries, Entry{
Name: "/unset",
Type: SpecialType,
Pos: len(creator.DH.Entries),
})
creator.curSet = nil
}
}
root.Set = creator.curSet
if creator.curSet != nil {
root.Keywords = keyValDifference(root.Keywords, creator.curSet.Keywords)
}
root.Pos = len(creator.DH.Entries)
creator.DH.Entries = append(creator.DH.Entries, *root)
for _, c := range root.Children {
flatten(c, creator, keywords)
}
if root.Prev != nil {
// Show a comment when stepping out
root.Prev.Pos = len(creator.DH.Entries)
creator.DH.Entries = append(creator.DH.Entries, *root.Prev)
dotEntry := Entry{
Type: DotDotType,
Name: "..",
Pos: len(creator.DH.Entries),
}
creator.DH.Entries = append(creator.DH.Entries, dotEntry)
}
return
}
// resolveHardlinks goes through an Entry tree, and finds the Entry's associated
// with hardlinks and fills them in with the actual data from the base file.
func resolveHardlinks(root *Entry, hardlinks map[string][]string, countlinks bool) {
originals := make(map[string]*Entry)
for base, links := range hardlinks {
var basefile *Entry
if seen, ok := originals[base]; !ok {
basefile = root.Find(base)
if basefile == nil {
logrus.Printf("%s does not exist in this tree\n", base)
continue
}
originals[base] = basefile
} else {
basefile = seen
}
for _, link := range links {
linkfile := root.Find(link)
if linkfile == nil {
logrus.Printf("%s does not exist in this tree\n", link)
continue
}
linkfile.Keywords = basefile.Keywords
if countlinks {
linkfile.Keywords = append(linkfile.Keywords, KeyVal(fmt.Sprintf("nlink=%d", len(links)+1)))
}
}
if countlinks {
basefile.Keywords = append(basefile.Keywords, KeyVal(fmt.Sprintf("nlink=%d", len(links)+1)))
}
}
}
// filter takes in a pointer to an Entry, and returns a slice of Entry's that
// satisfy the predicate p
func filter(root *Entry, p func(*Entry) bool) []Entry {
if root != nil {
var validEntrys []Entry
if len(root.Children) > 0 || root.Prev != nil {
for _, c := range root.Children {
// filter the sub-directory
if c.Prev != nil {
validEntrys = append(validEntrys, filter(c, p)...)
}
if p(c) {
if c.Prev == nil {
validEntrys = append([]Entry{*c}, validEntrys...)
} else {
validEntrys = append(validEntrys, *c)
}
}
}
return validEntrys
}
}
return nil
}
func (ts *tarStream) setErr(err error) {
ts.err = err
}
func (ts *tarStream) Read(p []byte) (n int, err error) {
return ts.teeReader.Read(p)
}
func (ts *tarStream) Close() error {
return ts.pipeReader.Close()
}
// Hierarchy returns the DirectoryHierarchy of the archive. It flattens the
// Entry tree before returning the DirectoryHierarchy
func (ts *tarStream) Hierarchy() (*DirectoryHierarchy, error) {
if ts.err != nil && ts.err != io.EOF {
return nil, ts.err
}
if ts.root == nil {
return nil, fmt.Errorf("root Entry not found, nothing to flatten")
}
resolveHardlinks(ts.root, ts.hardlinks, InKeywordSlice(Keyword("nlink"), ts.keywords))
flatten(ts.root, &ts.creator, ts.keywords)
return ts.creator.DH, nil
}

154
vendor/github.com/vbatts/go-mtree/update.go generated vendored Normal file
View File

@@ -0,0 +1,154 @@
package mtree
import (
"container/heap"
"os"
"sort"
"github.com/sirupsen/logrus"
)
// DefaultUpdateKeywords is the default set of keywords that can take updates to the files on disk
var DefaultUpdateKeywords = []Keyword{
"uid",
"gid",
"mode",
"xattr",
"link",
"time",
}
// Update attempts to set the attributes of root directory path, given the values of `keywords` in dh DirectoryHierarchy.
func Update(root string, dh *DirectoryHierarchy, keywords []Keyword, fs FsEval) ([]InodeDelta, error) {
creator := dhCreator{DH: dh}
curDir, err := os.Getwd()
if err == nil {
defer os.Chdir(curDir)
}
if err := os.Chdir(root); err != nil {
return nil, err
}
sort.Sort(byPos(creator.DH.Entries))
// This is for deferring the update of mtimes of directories, to unwind them
// in a most specific path first
h := &pathUpdateHeap{}
heap.Init(h)
results := []InodeDelta{}
for i, e := range creator.DH.Entries {
switch e.Type {
case SpecialType:
if e.Name == "/set" {
creator.curSet = &creator.DH.Entries[i]
} else if e.Name == "/unset" {
creator.curSet = nil
}
logrus.Debugf("%#v", e)
continue
case RelativeType, FullType:
e.Set = creator.curSet
pathname, err := e.Path()
if err != nil {
return nil, err
}
// filter the keywords to update on the file, from the keywords available for this entry:
var kvToUpdate []KeyVal
kvToUpdate = keyvalSelector(e.AllKeys(), keywords)
logrus.Debugf("kvToUpdate(%q): %#v", pathname, kvToUpdate)
for _, kv := range kvToUpdate {
if !InKeywordSlice(kv.Keyword().Prefix(), keywordPrefixes(keywords)) {
continue
}
logrus.Debugf("finding function for %q (%q)", kv.Keyword(), kv.Keyword().Prefix())
ukFunc, ok := UpdateKeywordFuncs[kv.Keyword().Prefix()]
if !ok {
logrus.Debugf("no UpdateKeywordFunc for %s; skipping", kv.Keyword())
continue
}
// TODO check for the type=dir of the entry as well
if kv.Keyword().Prefix() == "time" && e.IsDir() {
heap.Push(h, pathUpdate{
Path: pathname,
E: e,
KV: kv,
Func: ukFunc,
})
continue
}
if _, err := ukFunc(pathname, kv); err != nil {
results = append(results, InodeDelta{
diff: ErrorDifference,
path: pathname,
old: e,
keys: []KeyDelta{
{
diff: ErrorDifference,
name: kv.Keyword(),
err: err,
},
}})
}
// XXX really would be great to have a Check() or Compare() right here,
// to compare each entry as it is encountered, rather than just running
// Check() on this path after the whole update is finished.
}
}
}
for h.Len() > 0 {
pu := heap.Pop(h).(pathUpdate)
if _, err := pu.Func(pu.Path, pu.KV); err != nil {
results = append(results, InodeDelta{
diff: ErrorDifference,
path: pu.Path,
old: pu.E,
keys: []KeyDelta{
{
diff: ErrorDifference,
name: pu.KV.Keyword(),
err: err,
},
}})
}
}
return results, nil
}
type pathUpdateHeap []pathUpdate
func (h pathUpdateHeap) Len() int { return len(h) }
func (h pathUpdateHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
// This may end up looking backwards, but for container/heap, Less evaluates
// the negative priority. So when popping members of the array, it will be
// sorted by least. For this use-case, we want the most-qualified-name popped
// first (the longest path name), such that "." is the last entry popped.
func (h pathUpdateHeap) Less(i, j int) bool {
return len(h[i].Path) > len(h[j].Path)
}
func (h *pathUpdateHeap) Push(x interface{}) {
*h = append(*h, x.(pathUpdate))
}
func (h *pathUpdateHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[0 : n-1]
return x
}
type pathUpdate struct {
Path string
E Entry
KV KeyVal
Func UpdateKeywordFunc
}

201
vendor/github.com/vbatts/go-mtree/updatefuncs.go generated vendored Normal file
View File

@@ -0,0 +1,201 @@
package mtree
import (
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/vbatts/go-mtree/pkg/govis"
)
// UpdateKeywordFunc is the signature for a function that will restore a file's
// attributes. Where path is relative path to the file, and value to be
// restored to.
type UpdateKeywordFunc func(path string, kv KeyVal) (os.FileInfo, error)
// UpdateKeywordFuncs is the registered list of functions to update file attributes.
// Keyed by the keyword as it would show up in the manifest
var UpdateKeywordFuncs = map[Keyword]UpdateKeywordFunc{
"mode": modeUpdateKeywordFunc,
"time": timeUpdateKeywordFunc,
"tar_time": tartimeUpdateKeywordFunc,
"uid": uidUpdateKeywordFunc,
"gid": gidUpdateKeywordFunc,
"xattr": xattrUpdateKeywordFunc,
"link": linkUpdateKeywordFunc,
}
func uidUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
uid, err := strconv.Atoi(kv.Value())
if err != nil {
return nil, err
}
stat, err := os.Lstat(path)
if err != nil {
return nil, err
}
if statIsUID(stat, uid) {
return stat, nil
}
if err := os.Lchown(path, uid, -1); err != nil {
return nil, err
}
return os.Lstat(path)
}
func gidUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
gid, err := strconv.Atoi(kv.Value())
if err != nil {
return nil, err
}
stat, err := os.Lstat(path)
if err != nil {
return nil, err
}
if statIsGID(stat, gid) {
return stat, nil
}
if err := os.Lchown(path, -1, gid); err != nil {
return nil, err
}
return os.Lstat(path)
}
func modeUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
info, err := os.Lstat(path)
if err != nil {
return nil, err
}
// don't set mode on symlinks, as it passes through to the backing file
if info.Mode()&os.ModeSymlink != 0 {
return info, nil
}
vmode, err := strconv.ParseInt(kv.Value(), 8, 32)
if err != nil {
return nil, err
}
stat, err := os.Lstat(path)
if err != nil {
return nil, err
}
if stat.Mode() == os.FileMode(vmode) {
return stat, nil
}
logrus.Debugf("path: %q, kv.Value(): %q, vmode: %o", path, kv.Value(), vmode)
if err := os.Chmod(path, os.FileMode(vmode)); err != nil {
return nil, err
}
return os.Lstat(path)
}
// since tar_time will only be second level precision, then when restoring the
// filepath from a tar_time, then compare the seconds first and only Chtimes if
// the seconds value is different.
func tartimeUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
info, err := os.Lstat(path)
if err != nil {
return nil, err
}
v := strings.SplitN(kv.Value(), ".", 2)
if len(v) != 2 {
return nil, fmt.Errorf("expected a number like 1469104727.000000000")
}
sec, err := strconv.ParseInt(v[0], 10, 64)
if err != nil {
return nil, fmt.Errorf("expected seconds, but got %q", v[0])
}
// if the seconds are the same, don't do anything, because the file might
// have nanosecond value, and if using tar_time it would zero it out.
if info.ModTime().Unix() == sec {
return info, nil
}
vtime := time.Unix(sec, 0)
// if times are same then don't modify anything
// comparing Unix, since it does not include Nano seconds
if info.ModTime().Unix() == vtime.Unix() {
return info, nil
}
// symlinks are strange and most of the time passes through to the backing file
if info.Mode()&os.ModeSymlink != 0 {
if err := lchtimes(path, vtime, vtime); err != nil {
return nil, err
}
} else if err := os.Chtimes(path, vtime, vtime); err != nil {
return nil, err
}
return os.Lstat(path)
}
// this is nano second precision
func timeUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
info, err := os.Lstat(path)
if err != nil {
return nil, err
}
v := strings.SplitN(kv.Value(), ".", 2)
if len(v) != 2 {
return nil, fmt.Errorf("expected a number like 1469104727.871937272")
}
nsec, err := strconv.ParseInt(v[0]+v[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("expected nano seconds, but got %q", v[0]+v[1])
}
logrus.Debugf("arg: %q; nsec: %d", v[0]+v[1], nsec)
vtime := time.Unix(0, nsec)
// if times are same then don't modify anything
if info.ModTime().Equal(vtime) {
return info, nil
}
// symlinks are strange and most of the time passes through to the backing file
if info.Mode()&os.ModeSymlink != 0 {
if err := lchtimes(path, vtime, vtime); err != nil {
return nil, err
}
} else if err := os.Chtimes(path, vtime, vtime); err != nil {
return nil, err
}
return os.Lstat(path)
}
func linkUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
linkname, err := govis.Unvis(kv.Value(), DefaultVisFlags)
if err != nil {
return nil, err
}
got, err := os.Readlink(path)
if err != nil {
return nil, err
}
if got == linkname {
return os.Lstat(path)
}
logrus.Debugf("linkUpdateKeywordFunc: removing %q to link to %q", path, linkname)
if err := os.Remove(path); err != nil {
return nil, err
}
if err := os.Symlink(linkname, path); err != nil {
return nil, err
}
return os.Lstat(path)
}

21
vendor/github.com/vbatts/go-mtree/updatefuncs_linux.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
// +build linux
package mtree
import (
"encoding/base64"
"os"
"github.com/vbatts/go-mtree/xattr"
)
func xattrUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
buf, err := base64.StdEncoding.DecodeString(kv.Value())
if err != nil {
return nil, err
}
if err := xattr.Set(path, kv.Keyword().Suffix(), buf); err != nil {
return nil, err
}
return os.Lstat(path)
}

View File

@@ -0,0 +1,11 @@
// +build !linux
package mtree
import (
"os"
)
func xattrUpdateKeywordFunc(path string, kv KeyVal) (os.FileInfo, error) {
return os.Lstat(path)
}

23
vendor/github.com/vbatts/go-mtree/version.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
package mtree
import "fmt"
const (
// AppName is the name ... of this library/application
AppName = "gomtree"
)
const (
// VersionMajor is for an API incompatible changes
VersionMajor = 0
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 5
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
)
// Version is the specification version that the package types support.
var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)

385
vendor/github.com/vbatts/go-mtree/walk.go generated vendored Normal file
View File

@@ -0,0 +1,385 @@
package mtree
import (
"fmt"
"io"
"os"
"os/user"
"path/filepath"
"sort"
"strings"
"time"
"github.com/vbatts/go-mtree/pkg/govis"
)
// ExcludeFunc is the type of function called on each path walked to determine
// whether to be excluded from the assembled DirectoryHierarchy. If the func
// returns true, then the path is not included in the spec.
type ExcludeFunc func(path string, info os.FileInfo) bool
// ExcludeNonDirectories is an ExcludeFunc for excluding all paths that are not directories
var ExcludeNonDirectories = func(path string, info os.FileInfo) bool {
return !info.IsDir()
}
var defaultSetKeyVals = []KeyVal{"type=file", "nlink=1", "flags=none", "mode=0664"}
// Walk from root directory and assemble the DirectoryHierarchy
// * `excludes` provided are used to skip paths
// * `keywords` are the set to collect from the walked paths. The recommended default list is DefaultKeywords.
// * `fsEval` is the interface to use in evaluating files. If `nil`, then DefaultFsEval is used.
func Walk(root string, excludes []ExcludeFunc, keywords []Keyword, fsEval FsEval) (*DirectoryHierarchy, error) {
if fsEval == nil {
fsEval = DefaultFsEval{}
}
creator := dhCreator{DH: &DirectoryHierarchy{}, fs: fsEval}
// insert signature and metadata comments first (user, machine, tree, date)
for _, e := range signatureEntries(root) {
e.Pos = len(creator.DH.Entries)
creator.DH.Entries = append(creator.DH.Entries, e)
}
// insert keyword metadata next
for _, e := range keywordEntries(keywords) {
e.Pos = len(creator.DH.Entries)
creator.DH.Entries = append(creator.DH.Entries, e)
}
// walk the directory and add entries
err := startWalk(&creator, root, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
for _, ex := range excludes {
if ex(path, info) {
return nil
}
}
entryPathName := filepath.Base(path)
if info.IsDir() {
creator.DH.Entries = append(creator.DH.Entries, Entry{
Type: BlankType,
Pos: len(creator.DH.Entries),
})
// Insert a comment of the full path of the directory's name
if creator.curDir != nil {
dirname, err := creator.curDir.Path()
if err != nil {
return err
}
creator.DH.Entries = append(creator.DH.Entries, Entry{
Pos: len(creator.DH.Entries),
Raw: "# " + filepath.Join(dirname, entryPathName),
Type: CommentType,
})
} else {
entryPathName = "."
creator.DH.Entries = append(creator.DH.Entries, Entry{
Pos: len(creator.DH.Entries),
Raw: "# .",
Type: CommentType,
})
}
// set the initial /set keywords
if creator.curSet == nil {
e := Entry{
Name: "/set",
Type: SpecialType,
Pos: len(creator.DH.Entries),
Keywords: keyvalSelector(defaultSetKeyVals, keywords),
}
for _, keyword := range SetKeywords {
err := func() error {
var r io.Reader
if info.Mode().IsRegular() {
fh, err := creator.fs.Open(path)
if err != nil {
return err
}
defer fh.Close()
r = fh
}
keyFunc, ok := KeywordFuncs[keyword.Prefix()]
if !ok {
return fmt.Errorf("Unknown keyword %q for file %q", keyword.Prefix(), path)
}
kvs, err := creator.fs.KeywordFunc(keyFunc)(path, info, r)
if err != nil {
return err
}
for _, kv := range kvs {
if kv != "" {
e.Keywords = append(e.Keywords, kv)
}
}
return nil
}()
if err != nil {
return err
}
}
creator.curSet = &e
creator.DH.Entries = append(creator.DH.Entries, e)
} else if creator.curSet != nil {
// check the attributes of the /set keywords and re-set if changed
klist := []KeyVal{}
for _, keyword := range SetKeywords {
err := func() error {
var r io.Reader
if info.Mode().IsRegular() {
fh, err := creator.fs.Open(path)
if err != nil {
return err
}
defer fh.Close()
r = fh
}
keyFunc, ok := KeywordFuncs[keyword.Prefix()]
if !ok {
return fmt.Errorf("Unknown keyword %q for file %q", keyword.Prefix(), path)
}
kvs, err := creator.fs.KeywordFunc(keyFunc)(path, info, r)
if err != nil {
return err
}
for _, kv := range kvs {
if kv != "" {
klist = append(klist, kv)
}
}
return nil
}()
if err != nil {
return err
}
}
needNewSet := false
for _, k := range klist {
if !inKeyValSlice(k, creator.curSet.Keywords) {
needNewSet = true
}
}
if needNewSet {
e := Entry{
Name: "/set",
Type: SpecialType,
Pos: len(creator.DH.Entries),
Keywords: keyvalSelector(append(defaultSetKeyVals, klist...), keywords),
}
creator.curSet = &e
creator.DH.Entries = append(creator.DH.Entries, e)
}
}
}
encodedEntryName, err := govis.Vis(entryPathName, DefaultVisFlags)
if err != nil {
return err
}
e := Entry{
Name: encodedEntryName,
Pos: len(creator.DH.Entries),
Type: RelativeType,
Set: creator.curSet,
Parent: creator.curDir,
}
for _, keyword := range keywords {
err := func() error {
var r io.Reader
if info.Mode().IsRegular() {
fh, err := creator.fs.Open(path)
if err != nil {
return err
}
defer fh.Close()
r = fh
}
keyFunc, ok := KeywordFuncs[keyword.Prefix()]
if !ok {
return fmt.Errorf("Unknown keyword %q for file %q", keyword.Prefix(), path)
}
kvs, err := creator.fs.KeywordFunc(keyFunc)(path, info, r)
if err != nil {
return err
}
for _, kv := range kvs {
if kv != "" && !inKeyValSlice(kv, creator.curSet.Keywords) {
e.Keywords = append(e.Keywords, kv)
}
}
return nil
}()
if err != nil {
return err
}
}
if info.IsDir() {
if creator.curDir != nil {
creator.curDir.Next = &e
}
e.Prev = creator.curDir
creator.curDir = &e
} else {
if creator.curEnt != nil {
creator.curEnt.Next = &e
}
e.Prev = creator.curEnt
creator.curEnt = &e
}
creator.DH.Entries = append(creator.DH.Entries, e)
return nil
})
return creator.DH, err
}
// startWalk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by walkFn. The files are walked in lexical
// order, which makes the output deterministic but means that for very
// large directories Walk can be inefficient.
// Walk does not follow symbolic links.
func startWalk(c *dhCreator, root string, walkFn filepath.WalkFunc) error {
info, err := c.fs.Lstat(root)
if err != nil {
return walkFn(root, nil, err)
}
return walk(c, root, info, walkFn)
}
// walk recursively descends path, calling w.
func walk(c *dhCreator, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
err := walkFn(path, info, nil)
if err != nil {
if info.IsDir() && err == filepath.SkipDir {
return nil
}
return err
}
if !info.IsDir() {
return nil
}
names, err := readOrderedDirNames(c, path)
if err != nil {
return walkFn(path, info, err)
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := c.fs.Lstat(filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(c, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
c.DH.Entries = append(c.DH.Entries, Entry{
Name: "..",
Type: DotDotType,
Pos: len(c.DH.Entries),
})
if c.curDir != nil {
c.curDir = c.curDir.Parent
}
return nil
}
// readOrderedDirNames reads the directory and returns a sorted list of all
// entries with non-directories first, followed by directories.
func readOrderedDirNames(c *dhCreator, dirname string) ([]string, error) {
infos, err := c.fs.Readdir(dirname)
if err != nil {
return nil, err
}
names := []string{}
dirnames := []string{}
for _, info := range infos {
if info.IsDir() {
dirnames = append(dirnames, info.Name())
continue
}
names = append(names, info.Name())
}
sort.Strings(names)
sort.Strings(dirnames)
return append(names, dirnames...), nil
}
// signatureEntries is a simple helper function that returns a slice of Entry's
// that describe the metadata signature about the host. Items like date, user,
// machine, and tree (which is specified by argument `root`), are considered.
// These Entry's construct comments in the mtree specification, so if there is
// an error trying to obtain a particular metadata, we simply don't construct
// the Entry.
func signatureEntries(root string) []Entry {
var sigEntries []Entry
user, err := user.Current()
if err == nil {
userEntry := Entry{
Type: CommentType,
Raw: fmt.Sprintf("#%16s%s", "user: ", user.Username),
}
sigEntries = append(sigEntries, userEntry)
}
hostname, err := os.Hostname()
if err == nil {
hostEntry := Entry{
Type: CommentType,
Raw: fmt.Sprintf("#%16s%s", "machine: ", hostname),
}
sigEntries = append(sigEntries, hostEntry)
}
if tree := filepath.Clean(root); tree == "." || tree == ".." {
root, err := os.Getwd()
if err == nil {
// use parent directory of current directory
if tree == ".." {
root = filepath.Dir(root)
}
treeEntry := Entry{
Type: CommentType,
Raw: fmt.Sprintf("#%16s%s", "tree: ", filepath.Clean(root)),
}
sigEntries = append(sigEntries, treeEntry)
}
} else {
treeEntry := Entry{
Type: CommentType,
Raw: fmt.Sprintf("#%16s%s", "tree: ", filepath.Clean(root)),
}
sigEntries = append(sigEntries, treeEntry)
}
dateEntry := Entry{
Type: CommentType,
Raw: fmt.Sprintf("#%16s%s", "date: ", time.Now().Format("Mon Jan 2 15:04:05 2006")),
}
sigEntries = append(sigEntries, dateEntry)
return sigEntries
}
// keywordEntries returns a slice of entries including a comment of the
// keywords requested when generating this manifest.
func keywordEntries(keywords []Keyword) []Entry {
// Convert all of the keywords to zero-value keyvals.
return []Entry{
{
Type: CommentType,
Raw: fmt.Sprintf("#%16s%s", "keywords: ", strings.Join(FromKeywords(keywords), ",")),
},
}
}

42
vendor/github.com/vbatts/go-mtree/xattr/xattr.go generated vendored Normal file
View File

@@ -0,0 +1,42 @@
// +build linux
package xattr
import (
"strings"
"syscall"
)
// Get returns the extended attributes (xattr) on file `path`, for the given `name`.
func Get(path, name string) ([]byte, error) {
dest := make([]byte, 1024)
i, err := syscall.Getxattr(path, name, dest)
if err != nil {
return nil, err
}
return dest[:i], nil
}
// Set sets the extended attributes (xattr) on file `path`, for the given `name` and `value`
func Set(path, name string, value []byte) error {
return syscall.Setxattr(path, name, value, 0)
}
// List returns a list of all the extended attributes (xattr) for file `path`
func List(path string) ([]string, error) {
dest := make([]byte, 1024)
i, err := syscall.Listxattr(path, dest)
if err != nil {
return nil, err
}
// If the returned list is empty, return nil instead of []string{""}
str := string(dest[:i])
if str == "" {
return nil, nil
}
return strings.Split(strings.TrimRight(str, nilByte), nilByte), nil
}
const nilByte = "\x00"

View File

@@ -0,0 +1,21 @@
// +build !linux
package xattr
// Get would return the extended attributes, but this unsupported feature
// returns nil, nil
func Get(path, name string) ([]byte, error) {
return nil, nil
}
// Set would set the extended attributes, but this unsupported feature returns
// nil
func Set(path, name string, value []byte) error {
return nil
}
// List would return the keys of extended attributes, but this unsupported
// feature returns nil, nil
func List(path string) ([]string, error) {
return nil, nil
}

124
vendor/golang.org/x/crypto/ripemd160/ripemd160.go generated vendored Normal file
View File

@@ -0,0 +1,124 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ripemd160 implements the RIPEMD-160 hash algorithm.
//
// Deprecated: RIPEMD-160 is a legacy hash and should not be used for new
// applications. Also, this package does not and will not provide an optimized
// implementation. Instead, use a modern hash like SHA-256 (from crypto/sha256).
package ripemd160 // import "golang.org/x/crypto/ripemd160"
// RIPEMD-160 is designed by Hans Dobbertin, Antoon Bosselaers, and Bart
// Preneel with specifications available at:
// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf.
import (
"crypto"
"hash"
)
func init() {
crypto.RegisterHash(crypto.RIPEMD160, New)
}
// The size of the checksum in bytes.
const Size = 20
// The block size of the hash algorithm in bytes.
const BlockSize = 64
const (
_s0 = 0x67452301
_s1 = 0xefcdab89
_s2 = 0x98badcfe
_s3 = 0x10325476
_s4 = 0xc3d2e1f0
)
// digest represents the partial evaluation of a checksum.
type digest struct {
s [5]uint32 // running context
x [BlockSize]byte // temporary buffer
nx int // index into x
tc uint64 // total count of bytes processed
}
func (d *digest) Reset() {
d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4
d.nx = 0
d.tc = 0
}
// New returns a new hash.Hash computing the checksum.
func New() hash.Hash {
result := new(digest)
result.Reset()
return result
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
nn = len(p)
d.tc += uint64(nn)
if d.nx > 0 {
n := len(p)
if n > BlockSize-d.nx {
n = BlockSize - d.nx
}
for i := 0; i < n; i++ {
d.x[d.nx+i] = p[i]
}
d.nx += n
if d.nx == BlockSize {
_Block(d, d.x[0:])
d.nx = 0
}
p = p[n:]
}
n := _Block(d, p)
p = p[n:]
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
func (d0 *digest) Sum(in []byte) []byte {
// Make a copy of d0 so that caller can keep writing and summing.
d := *d0
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
tc := d.tc
var tmp [64]byte
tmp[0] = 0x80
if tc%64 < 56 {
d.Write(tmp[0 : 56-tc%64])
} else {
d.Write(tmp[0 : 64+56-tc%64])
}
// Length in bits.
tc <<= 3
for i := uint(0); i < 8; i++ {
tmp[i] = byte(tc >> (8 * i))
}
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
for i, s := range d.s {
digest[i*4] = byte(s)
digest[i*4+1] = byte(s >> 8)
digest[i*4+2] = byte(s >> 16)
digest[i*4+3] = byte(s >> 24)
}
return append(in, digest[:]...)
}

165
vendor/golang.org/x/crypto/ripemd160/ripemd160block.go generated vendored Normal file
View File

@@ -0,0 +1,165 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// RIPEMD-160 block step.
// In its own file so that a faster assembly or C version
// can be substituted easily.
package ripemd160
import (
"math/bits"
)
// work buffer indices and roll amounts for one line
var _n = [80]uint{
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8,
3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12,
1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2,
4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13,
}
var _r = [80]uint{
11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8,
7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12,
11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5,
11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12,
9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6,
}
// same for the other parallel one
var n_ = [80]uint{
5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12,
6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2,
15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13,
8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14,
12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11,
}
var r_ = [80]uint{
8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6,
9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11,
9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5,
15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8,
8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11,
}
func _Block(md *digest, p []byte) int {
n := 0
var x [16]uint32
var alpha, beta uint32
for len(p) >= BlockSize {
a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4]
aa, bb, cc, dd, ee := a, b, c, d, e
j := 0
for i := 0; i < 16; i++ {
x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24
j += 4
}
// round 1
i := 0
for i < 16 {
alpha = a + (b ^ c ^ d) + x[_n[i]]
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 2
for i < 32 {
alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 3
for i < 48 {
alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 4
for i < 64 {
alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// round 5
for i < 80 {
alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e
s := int(_r[i])
alpha = bits.RotateLeft32(alpha, s) + e
beta = bits.RotateLeft32(c, 10)
a, b, c, d, e = e, alpha, b, beta, d
// parallel line
alpha = aa + (bb ^ cc ^ dd) + x[n_[i]]
s = int(r_[i])
alpha = bits.RotateLeft32(alpha, s) + ee
beta = bits.RotateLeft32(cc, 10)
aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd
i++
}
// combine results
dd += c + md.s[1]
md.s[1] = md.s[2] + d + ee
md.s[2] = md.s[3] + e + aa
md.s[3] = md.s[4] + a + bb
md.s[4] = md.s[0] + b + cc
md.s[0] = dd
p = p[BlockSize:]
n += BlockSize
}
return n
}

7
vendor/modules.txt vendored
View File

@@ -139,7 +139,6 @@ github.com/docker/docker/api/types/versions
github.com/docker/docker/api/types/volume
github.com/docker/docker/client
github.com/docker/docker/errdefs
github.com/docker/docker/pkg/archive
github.com/docker/docker/pkg/fileutils
github.com/docker/docker/pkg/homedir
github.com/docker/docker/pkg/idtools
@@ -474,6 +473,11 @@ github.com/theupdateframework/notary/tuf/data
github.com/theupdateframework/notary/tuf/signed
github.com/theupdateframework/notary/tuf/utils
github.com/theupdateframework/notary/tuf/validation
# github.com/vbatts/go-mtree v0.5.0
## explicit
github.com/vbatts/go-mtree
github.com/vbatts/go-mtree/pkg/govis
github.com/vbatts/go-mtree/xattr
# github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f
github.com/xeipuuv/gojsonpointer
# github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415
@@ -511,6 +515,7 @@ golang.org/x/crypto/blowfish
golang.org/x/crypto/ed25519
golang.org/x/crypto/ed25519/internal/edwards25519
golang.org/x/crypto/pbkdf2
golang.org/x/crypto/ripemd160
golang.org/x/crypto/scrypt
golang.org/x/crypto/ssh/terminal
# golang.org/x/mod v0.4.2