Merge pull request #7859 from yifan-gu/rkt_remove_store

kubelet/rkt: Remove dependencies on rkt.store
This commit is contained in:
Victor Marmol
2015-05-06 17:45:29 -07:00
327 changed files with 107 additions and 104226 deletions

102
Godeps/Godeps.json generated
View File

@@ -48,21 +48,6 @@
"ImportPath": "github.com/abbot/go-http-auth",
"Rev": "c0ef4539dfab4d21c8ef20ba2924f9fc6f186d35"
},
{
"ImportPath": "github.com/appc/spec/aci",
"Comment": "v0.5.1-55-g87808a3",
"Rev": "87808a37061a4a2e6204ccea5fd2fc930576db94"
},
{
"ImportPath": "github.com/appc/spec/pkg/acirenderer",
"Comment": "v0.5.1-55-g87808a3",
"Rev": "87808a37061a4a2e6204ccea5fd2fc930576db94"
},
{
"ImportPath": "github.com/appc/spec/pkg/tarheader",
"Comment": "v0.5.1-55-g87808a3",
"Rev": "87808a37061a4a2e6204ccea5fd2fc930576db94"
},
{
"ImportPath": "github.com/appc/spec/schema",
"Comment": "v0.5.1-55-g87808a3",
@@ -72,25 +57,11 @@
"ImportPath": "github.com/beorn7/perks/quantile",
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
},
{
"ImportPath": "github.com/camlistore/lock",
"Rev": "ae27720f340952636b826119b58130b9c1a847a0"
},
{
"ImportPath": "github.com/codegangsta/negroni",
"Comment": "v0.1-62-g8d75e11",
"Rev": "8d75e11374a1928608c906fe745b538483e7aeb2"
},
{
"ImportPath": "github.com/coreos/etcd/etcdserver/etcdhttp/httptypes",
"Comment": "v2.0.4-288-g866a9d4",
"Rev": "866a9d4e41401657ea44bf539b2c5561d6fdcd67"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.0.4-288-g866a9d4",
"Rev": "866a9d4e41401657ea44bf539b2c5561d6fdcd67"
},
{
"ImportPath": "github.com/coreos/go-etcd/etcd",
"Comment": "v2.0.0-3-g0424b5f",
@@ -110,68 +81,11 @@
"Comment": "v2-27-g97e243d",
"Rev": "97e243d21a8e232e9d8af38ba2366dfcfceebeba"
},
{
"ImportPath": "github.com/coreos/rkt/pkg/aci",
"Comment": "v0.5.4",
"Rev": "c8a7050a883653266137ae05f6e8f166db52eb67"
},
{
"ImportPath": "github.com/coreos/rkt/pkg/lock",
"Comment": "v0.5.4",
"Rev": "c8a7050a883653266137ae05f6e8f166db52eb67"
},
{
"ImportPath": "github.com/coreos/rkt/pkg/sys",
"Comment": "v0.5.4",
"Rev": "c8a7050a883653266137ae05f6e8f166db52eb67"
},
{
"ImportPath": "github.com/coreos/rkt/pkg/tar",
"Comment": "v0.5.4",
"Rev": "c8a7050a883653266137ae05f6e8f166db52eb67"
},
{
"ImportPath": "github.com/coreos/rkt/store",
"Comment": "v0.5.4",
"Rev": "c8a7050a883653266137ae05f6e8f166db52eb67"
},
{
"ImportPath": "github.com/cpuguy83/go-md2man/mangen",
"Comment": "v1.0.2-5-g2831f11",
"Rev": "2831f11f66ff4008f10e2cd7ed9a85e3d3fc2bed"
},
{
"ImportPath": "github.com/cznic/bufs",
"Rev": "3dcccbd7064a1689f9c093a988ea11ac00e21f51"
},
{
"ImportPath": "github.com/cznic/exp/lldb",
"Rev": "9b0e4be12fbdb7b843e0a658a04c35d160371789"
},
{
"ImportPath": "github.com/cznic/fileutil",
"Rev": "21ae57c9dce724a15e88bd9cd46d5668f3e880a5"
},
{
"ImportPath": "github.com/cznic/mathutil",
"Rev": "250d0b9d3304c5ea0c4cfc7d9efc7ee528b81f3b"
},
{
"ImportPath": "github.com/cznic/ql",
"Rev": "fc1b91b82089d3f132fbed8a7c9f349c3133eb96"
},
{
"ImportPath": "github.com/cznic/sortutil",
"Rev": "d4401851b4c370f979b842fa1e45e0b3b718b391"
},
{
"ImportPath": "github.com/cznic/strutil",
"Rev": "97bc31f80ac4c9fa9c5dc5fea74c383858988ea2"
},
{
"ImportPath": "github.com/cznic/zappy",
"Rev": "47331054e4f96186e3ff772877c0443909368a45"
},
{
"ImportPath": "github.com/davecgh/go-spew/spew",
"Rev": "3e6e67c4dcea3ac2f25fd4731abc0e1deaf36216"
@@ -431,14 +345,6 @@
"Comment": "v1.0-28-g8adf9e1730c5",
"Rev": "8adf9e1730c55cdc590de7d49766cb2acc88d8f2"
},
{
"ImportPath": "github.com/petar/GoLLRB/llrb",
"Rev": "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
},
{
"ImportPath": "github.com/peterbourgon/diskv",
"Rev": "508f5671a72eeaef05cf8c24abe7fbc1c07faf69"
},
{
"ImportPath": "github.com/prometheus/client_golang/model",
"Comment": "0.4.0-1-g692492e",
@@ -519,14 +425,6 @@
"Comment": "v1.0-13-g5292687",
"Rev": "5292687f5379e01054407da44d7c4590a61fd3de"
},
{
"ImportPath": "golang.org/x/crypto/cast5",
"Rev": "a7ead6ddf06233883deca151dffaef2effbf498f"
},
{
"ImportPath": "golang.org/x/crypto/openpgp",
"Rev": "a7ead6ddf06233883deca151dffaef2effbf498f"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"

View File

@@ -1,81 +0,0 @@
package aci
import (
"archive/tar"
"io"
"os"
"path/filepath"
"github.com/appc/spec/pkg/tarheader"
)
// BuildWalker creates a filepath.WalkFunc that walks over the given root
// (which should represent an ACI layout on disk) and adds the files in the
// rootfs/ subdirectory to the given ArchiveWriter
func BuildWalker(root string, aw ArchiveWriter) filepath.WalkFunc {
// cache of inode -> filepath, used to leverage hard links in the archive
inos := map[uint64]string{}
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relpath, err := filepath.Rel(root, path)
if err != nil {
return err
}
if relpath == "." {
return nil
}
if relpath == ManifestFile {
// ignore; this will be written by the archive writer
// TODO(jonboulle): does this make sense? maybe just remove from archivewriter?
return nil
}
link := ""
var r io.Reader
switch info.Mode() & os.ModeType {
case os.ModeSocket:
return nil
case os.ModeNamedPipe:
case os.ModeCharDevice:
case os.ModeDevice:
case os.ModeDir:
case os.ModeSymlink:
target, err := os.Readlink(path)
if err != nil {
return err
}
link = target
default:
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
r = file
}
hdr, err := tar.FileInfoHeader(info, link)
if err != nil {
panic(err)
}
// Because os.FileInfo's Name method returns only the base
// name of the file it describes, it may be necessary to
// modify the Name field of the returned header to provide the
// full path name of the file.
hdr.Name = relpath
tarheader.Populate(hdr, info, inos)
// If the file is a hard link to a file we've already seen, we
// don't need the contents
if hdr.Typeflag == tar.TypeLink {
hdr.Size = 0
r = nil
}
if err := aw.AddFile(hdr, r); err != nil {
return err
}
return nil
}
}

View File

@@ -1,2 +0,0 @@
// Package aci contains various functions for working with App Container Images.
package aci

View File

@@ -1,194 +0,0 @@
package aci
import (
"archive/tar"
"bytes"
"compress/bzip2"
"compress/gzip"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os/exec"
"path/filepath"
"github.com/appc/spec/schema"
)
type FileType string
const (
TypeGzip = FileType("gz")
TypeBzip2 = FileType("bz2")
TypeXz = FileType("xz")
TypeTar = FileType("tar")
TypeText = FileType("text")
TypeUnknown = FileType("unknown")
readLen = 512 // max bytes to sniff
hexHdrGzip = "1f8b"
hexHdrBzip2 = "425a68"
hexHdrXz = "fd377a585a00"
hexSigTar = "7573746172"
tarOffset = 257
textMime = "text/plain; charset=utf-8"
)
var (
hdrGzip []byte
hdrBzip2 []byte
hdrXz []byte
sigTar []byte
tarEnd int
)
func mustDecodeHex(s string) []byte {
b, err := hex.DecodeString(s)
if err != nil {
panic(err)
}
return b
}
func init() {
hdrGzip = mustDecodeHex(hexHdrGzip)
hdrBzip2 = mustDecodeHex(hexHdrBzip2)
hdrXz = mustDecodeHex(hexHdrXz)
sigTar = mustDecodeHex(hexSigTar)
tarEnd = tarOffset + len(sigTar)
}
// DetectFileType attempts to detect the type of file that the given reader
// represents by comparing it against known file signatures (magic numbers)
func DetectFileType(r io.Reader) (FileType, error) {
var b bytes.Buffer
n, err := io.CopyN(&b, r, readLen)
if err != nil && err != io.EOF {
return TypeUnknown, err
}
bs := b.Bytes()
switch {
case bytes.HasPrefix(bs, hdrGzip):
return TypeGzip, nil
case bytes.HasPrefix(bs, hdrBzip2):
return TypeBzip2, nil
case bytes.HasPrefix(bs, hdrXz):
return TypeXz, nil
case n > int64(tarEnd) && bytes.Equal(bs[tarOffset:tarEnd], sigTar):
return TypeTar, nil
case http.DetectContentType(bs) == textMime:
return TypeText, nil
default:
return TypeUnknown, nil
}
}
// XzReader shells out to a command line xz executable (if
// available) to decompress the given io.Reader using the xz
// compression format
func XzReader(r io.Reader) io.ReadCloser {
rpipe, wpipe := io.Pipe()
ex, err := exec.LookPath("xz")
if err != nil {
log.Fatalf("couldn't find xz executable: %v", err)
}
cmd := exec.Command(ex, "--decompress", "--stdout")
cmd.Stdin = r
cmd.Stdout = wpipe
go func() {
err := cmd.Run()
wpipe.CloseWithError(err)
}()
return rpipe
}
// ManifestFromImage extracts a new schema.ImageManifest from the given ACI image.
func ManifestFromImage(rs io.ReadSeeker) (*schema.ImageManifest, error) {
var im schema.ImageManifest
tr, err := NewCompressedTarReader(rs)
if err != nil {
return nil, err
}
for {
hdr, err := tr.Next()
switch err {
case io.EOF:
return nil, errors.New("missing manifest")
case nil:
if filepath.Clean(hdr.Name) == ManifestFile {
data, err := ioutil.ReadAll(tr)
if err != nil {
return nil, err
}
if err := im.UnmarshalJSON(data); err != nil {
return nil, err
}
return &im, nil
}
default:
return nil, fmt.Errorf("error extracting tarball: %v", err)
}
}
}
// NewCompressedTarReader creates a new tar.Reader reading from the given ACI image.
func NewCompressedTarReader(rs io.ReadSeeker) (*tar.Reader, error) {
cr, err := NewCompressedReader(rs)
if err != nil {
return nil, err
}
return tar.NewReader(cr), nil
}
// NewCompressedReader creates a new io.Reader from the given ACI image.
func NewCompressedReader(rs io.ReadSeeker) (io.Reader, error) {
var (
dr io.Reader
err error
)
_, err = rs.Seek(0, 0)
if err != nil {
return nil, err
}
ftype, err := DetectFileType(rs)
if err != nil {
return nil, err
}
_, err = rs.Seek(0, 0)
if err != nil {
return nil, err
}
switch ftype {
case TypeGzip:
dr, err = gzip.NewReader(rs)
if err != nil {
return nil, err
}
case TypeBzip2:
dr = bzip2.NewReader(rs)
case TypeXz:
dr = XzReader(rs)
case TypeTar:
dr = rs
case TypeUnknown:
return nil, errors.New("error: unknown image filetype")
default:
return nil, errors.New("no type returned from DetectFileType?")
}
return dr, nil
}

View File

@@ -1,136 +0,0 @@
package aci
import (
"archive/tar"
"compress/gzip"
"io/ioutil"
"os"
"testing"
)
func newTestACI(usedotslash bool) (*os.File, error) {
tf, err := ioutil.TempFile("", "")
if err != nil {
return nil, err
}
manifestBody := `{"acKind":"ImageManifest","acVersion":"0.5.1","name":"example.com/app"}`
gw := gzip.NewWriter(tf)
tw := tar.NewWriter(gw)
manifestPath := "manifest"
if usedotslash {
manifestPath = "./" + manifestPath
}
hdr := &tar.Header{
Name: manifestPath,
Size: int64(len(manifestBody)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(manifestBody)); err != nil {
return nil, err
}
if err := tw.Close(); err != nil {
return nil, err
}
if err := gw.Close(); err != nil {
return nil, err
}
return tf, nil
}
func newEmptyTestACI() (*os.File, error) {
tf, err := ioutil.TempFile("", "")
if err != nil {
return nil, err
}
gw := gzip.NewWriter(tf)
tw := tar.NewWriter(gw)
if err := tw.Close(); err != nil {
return nil, err
}
if err := gw.Close(); err != nil {
return nil, err
}
return tf, nil
}
func TestManifestFromImage(t *testing.T) {
for _, usedotslash := range []bool{false, true} {
img, err := newTestACI(usedotslash)
if err != nil {
t.Fatalf("newTestACI: unexpected error: %v", err)
}
defer img.Close()
defer os.Remove(img.Name())
im, err := ManifestFromImage(img)
if err != nil {
t.Fatalf("ManifestFromImage: unexpected error: %v", err)
}
if im.Name.String() != "example.com/app" {
t.Errorf("expected %s, got %s", "example.com/app", im.Name.String())
}
emptyImg, err := newEmptyTestACI()
if err != nil {
t.Fatalf("newEmptyTestACI: unexpected error: %v", err)
}
defer emptyImg.Close()
defer os.Remove(emptyImg.Name())
im, err = ManifestFromImage(emptyImg)
if err == nil {
t.Fatalf("ManifestFromImage: expected error")
}
}
}
func TestNewCompressedTarReader(t *testing.T) {
img, err := newTestACI(false)
if err != nil {
t.Fatalf("newTestACI: unexpected error: %v", err)
}
defer img.Close()
defer os.Remove(img.Name())
cr, err := NewCompressedTarReader(img)
if err != nil {
t.Fatalf("NewCompressedTarReader: unexpected error: %v", err)
}
ftype, err := DetectFileType(cr)
if err != nil {
t.Fatalf("DetectFileType: unexpected error: %v", err)
}
if ftype != TypeText {
t.Errorf("expected %v, got %v", TypeText, ftype)
}
}
func TestNewCompressedReader(t *testing.T) {
img, err := newTestACI(false)
if err != nil {
t.Fatalf("newTestACI: unexpected error: %v", err)
}
defer img.Close()
defer os.Remove(img.Name())
cr, err := NewCompressedReader(img)
if err != nil {
t.Fatalf("NewCompressedReader: unexpected error: %v", err)
}
ftype, err := DetectFileType(cr)
if err != nil {
t.Fatalf("DetectFileType: unexpected error: %v", err)
}
if ftype != TypeTar {
t.Errorf("expected %v, got %v", TypeTar, ftype)
}
}

View File

@@ -1,159 +0,0 @@
package aci
/*
Image Layout
The on-disk layout of an app container is straightforward.
It includes a rootfs with all of the files that will exist in the root of the app and a manifest describing the image.
The layout MUST contain an image manifest.
/manifest
/rootfs/
/rootfs/usr/bin/mysql
*/
import (
"archive/tar"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/appc/spec/schema"
)
const (
// Path to manifest file inside the layout
ManifestFile = "manifest"
// Path to rootfs directory inside the layout
RootfsDir = "rootfs"
)
var (
ErrNoRootFS = errors.New("no rootfs found in layout")
ErrNoManifest = errors.New("no image manifest found in layout")
)
// ValidateLayout takes a directory and validates that the layout of the directory
// matches that expected by the Application Container Image format.
// If any errors are encountered during the validation, it will abort and
// return the first one.
func ValidateLayout(dir string) error {
fi, err := os.Stat(dir)
if err != nil {
return fmt.Errorf("error accessing layout: %v", err)
}
if !fi.IsDir() {
return fmt.Errorf("given path %q is not a directory", dir)
}
var flist []string
var imOK, rfsOK bool
var im io.Reader
walkLayout := func(fpath string, fi os.FileInfo, err error) error {
rpath, err := filepath.Rel(dir, fpath)
if err != nil {
return err
}
switch rpath {
case ".":
case ManifestFile:
im, err = os.Open(fpath)
if err != nil {
return err
}
imOK = true
case RootfsDir:
if !fi.IsDir() {
return errors.New("rootfs is not a directory")
}
rfsOK = true
default:
flist = append(flist, rpath)
}
return nil
}
if err := filepath.Walk(dir, walkLayout); err != nil {
return err
}
return validate(imOK, im, rfsOK, flist)
}
// ValidateArchive takes a *tar.Reader and validates that the layout of the
// filesystem the reader encapsulates matches that expected by the
// Application Container Image format. If any errors are encountered during
// the validation, it will abort and return the first one.
func ValidateArchive(tr *tar.Reader) error {
var fseen map[string]bool = make(map[string]bool)
var imOK, rfsOK bool
var im bytes.Buffer
Tar:
for {
hdr, err := tr.Next()
switch {
case err == nil:
case err == io.EOF:
break Tar
default:
return err
}
name := filepath.Clean(hdr.Name)
switch name {
case ".":
case ManifestFile:
_, err := io.Copy(&im, tr)
if err != nil {
return err
}
imOK = true
case RootfsDir:
if !hdr.FileInfo().IsDir() {
return fmt.Errorf("rootfs is not a directory")
}
rfsOK = true
default:
if _, seen := fseen[name]; seen {
return fmt.Errorf("duplicate file entry in archive: %s", name)
}
fseen[name] = true
}
}
var flist []string
for key := range fseen {
flist = append(flist, key)
}
return validate(imOK, &im, rfsOK, flist)
}
func validate(imOK bool, im io.Reader, rfsOK bool, files []string) error {
defer func() {
if rc, ok := im.(io.Closer); ok {
rc.Close()
}
}()
if !imOK {
return ErrNoManifest
}
if !rfsOK {
return ErrNoRootFS
}
b, err := ioutil.ReadAll(im)
if err != nil {
return fmt.Errorf("error reading image manifest: %v", err)
}
var a schema.ImageManifest
if err := a.UnmarshalJSON(b); err != nil {
return fmt.Errorf("image manifest validation failed: %v", err)
}
for _, f := range files {
if !strings.HasPrefix(f, "rootfs") {
return fmt.Errorf("unrecognized file path in layout: %q", f)
}
}
return nil
}

View File

@@ -1,62 +0,0 @@
package aci
import (
"io/ioutil"
"os"
"path"
"testing"
)
func newValidateLayoutTest() (string, error) {
td, err := ioutil.TempDir("", "")
if err != nil {
return "", err
}
if err := os.MkdirAll(path.Join(td, "rootfs"), 0755); err != nil {
return "", err
}
if err := os.MkdirAll(path.Join(td, "rootfs", "dir", "rootfs"), 0755); err != nil {
return "", err
}
evilManifestBody := "malformedManifest"
manifestBody := `{"acKind":"ImageManifest","acVersion":"0.3.0","name":"example.com/app"}`
evilManifestPath := "rootfs/manifest"
evilManifestPath = path.Join(td, evilManifestPath)
em, err := os.Create(evilManifestPath)
if err != nil {
return "", err
}
em.WriteString(evilManifestBody)
em.Close()
manifestPath := path.Join(td, "manifest")
m, err := os.Create(manifestPath)
if err != nil {
return "", err
}
m.WriteString(manifestBody)
m.Close()
return td, nil
}
func TestValidateLayout(t *testing.T) {
layoutPath, err := newValidateLayoutTest()
if err != nil {
t.Fatalf("newValidateLayoutTest: unexpected error: %v", err)
}
defer os.RemoveAll(layoutPath)
err = ValidateLayout(layoutPath)
if err != nil {
t.Fatalf("ValidateLayout: unexpected error: %v", err)
}
}

View File

@@ -1,84 +0,0 @@
package aci
import (
"archive/tar"
"bytes"
"encoding/json"
"io"
"time"
"github.com/appc/spec/schema"
)
// ArchiveWriter writes App Container Images. Users wanting to create an ACI or
// should create an ArchiveWriter and add files to it; the ACI will be written
// to the underlying tar.Writer
type ArchiveWriter interface {
AddFile(hdr *tar.Header, r io.Reader) error
Close() error
}
type imageArchiveWriter struct {
*tar.Writer
am *schema.ImageManifest
}
// NewImageWriter creates a new ArchiveWriter which will generate an App
// Container Image based on the given manifest and write it to the given
// tar.Writer
func NewImageWriter(am schema.ImageManifest, w *tar.Writer) ArchiveWriter {
aw := &imageArchiveWriter{
w,
&am,
}
return aw
}
func (aw *imageArchiveWriter) AddFile(hdr *tar.Header, r io.Reader) error {
err := aw.Writer.WriteHeader(hdr)
if err != nil {
return err
}
if r != nil {
_, err := io.Copy(aw.Writer, r)
if err != nil {
return err
}
}
return nil
}
func (aw *imageArchiveWriter) addFileNow(path string, contents []byte) error {
buf := bytes.NewBuffer(contents)
now := time.Now()
hdr := tar.Header{
Name: path,
Mode: 0644,
Uid: 0,
Gid: 0,
Size: int64(buf.Len()),
ModTime: now,
Typeflag: tar.TypeReg,
Uname: "root",
Gname: "root",
ChangeTime: now,
}
return aw.AddFile(&hdr, buf)
}
func (aw *imageArchiveWriter) addManifest(name string, m json.Marshaler) error {
out, err := m.MarshalJSON()
if err != nil {
return err
}
return aw.addFileNow(name, out)
}
func (aw *imageArchiveWriter) Close() error {
if err := aw.addManifest(ManifestFile, aw.am); err != nil {
return err
}
return aw.Writer.Close()
}

View File

@@ -1,223 +0,0 @@
package acirenderer
import (
"archive/tar"
"crypto/sha512"
"fmt"
"hash"
"io"
"io/ioutil"
"path/filepath"
"strings"
"github.com/appc/spec/schema"
"github.com/appc/spec/schema/types"
)
// An ACIRegistry provides all functions of an ACIProvider plus functions to
// search for an aci and get its contents
type ACIRegistry interface {
ACIProvider
GetImageManifest(key string) (*schema.ImageManifest, error)
GetACI(name types.ACName, labels types.Labels) (string, error)
}
// An ACIProvider provides functions to get an ACI contents, to convert an
// ACI hash to the key under which the ACI is known to the provider and to resolve an
// ImageID to the key under which it's known to the provider.
type ACIProvider interface {
// Read the ACI contents stream given the key. Use ResolveKey to
// convert an ImageID to the relative provider's key.
ReadStream(key string) (io.ReadCloser, error)
// Converts an ImageID to the, if existent, key under which the
// ACI is known to the provider
ResolveKey(key string) (string, error)
// Converts a Hash to the provider's key
HashToKey(h hash.Hash) string
}
// An Image contains the ImageManifest, the ACIProvider's key and its Level in
// the dependency tree.
type Image struct {
Im *schema.ImageManifest
Key string
Level uint16
}
// Images encapsulates an ordered slice of Image structs. It represents a flat
// dependency tree.
// The upper Image should be the first in the slice with a level of 0.
// For example if A is the upper image and has two deps (in order B and C). And C has one dep (D),
// the slice (reporting the app name and excluding im and Hash) should be:
// [{A, Level: 0}, {C, Level:1}, {D, Level: 2}, {B, Level: 1}]
type Images []Image
// ACIFiles represents which files to extract for every ACI
type ACIFiles struct {
Key string
FileMap map[string]struct{}
}
// RenderedACI is an (ordered) slice of ACIFiles
type RenderedACI []*ACIFiles
// GetRenderedACIWithImageID, given an imageID, starts with the matching image
// available in the store, creates the dependencies list and returns the
// RenderedACI list.
func GetRenderedACIWithImageID(imageID types.Hash, ap ACIRegistry) (RenderedACI, error) {
imgs, err := CreateDepListFromImageID(imageID, ap)
if err != nil {
return nil, err
}
return GetRenderedACIFromList(imgs, ap)
}
// GetRenderedACI, given an image app name and optional labels, starts with the
// best matching image available in the store, creates the dependencies list
// and returns the RenderedACI list.
func GetRenderedACI(name types.ACName, labels types.Labels, ap ACIRegistry) (RenderedACI, error) {
imgs, err := CreateDepListFromNameLabels(name, labels, ap)
if err != nil {
return nil, err
}
return GetRenderedACIFromList(imgs, ap)
}
// GetRenderedACIFromList returns the RenderedACI list. All file outside rootfs
// are excluded (at the moment only "manifest").
func GetRenderedACIFromList(imgs Images, ap ACIProvider) (RenderedACI, error) {
if len(imgs) == 0 {
return nil, fmt.Errorf("image list empty")
}
allFiles := make(map[string]struct{})
renderedACI := RenderedACI{}
first := true
for i, img := range imgs {
pwlm := getUpperPWLM(imgs, i)
ra, err := getACIFiles(img, ap, allFiles, pwlm)
if err != nil {
return nil, err
}
// Use the manifest from the upper ACI
if first {
ra.FileMap["manifest"] = struct{}{}
first = false
}
renderedACI = append(renderedACI, ra)
}
return renderedACI, nil
}
// getUpperPWLM returns the pwl at the lower level for the branch where
// img[pos] lives.
func getUpperPWLM(imgs Images, pos int) map[string]struct{} {
var pwlm map[string]struct{}
curlevel := imgs[pos].Level
// Start from our position and go back ignoring the other leafs.
for i := pos; i >= 0; i-- {
img := imgs[i]
if img.Level < curlevel && len(img.Im.PathWhitelist) > 0 {
pwlm = pwlToMap(img.Im.PathWhitelist)
}
curlevel = img.Level
}
return pwlm
}
// getACIFiles returns the ACIFiles struct for the given image. All files
// outside rootfs are excluded (at the moment only "manifest").
func getACIFiles(img Image, ap ACIProvider, allFiles map[string]struct{}, pwlm map[string]struct{}) (*ACIFiles, error) {
rs, err := ap.ReadStream(img.Key)
if err != nil {
return nil, err
}
defer rs.Close()
hash := sha512.New()
r := io.TeeReader(rs, hash)
thispwlm := pwlToMap(img.Im.PathWhitelist)
ra := &ACIFiles{FileMap: make(map[string]struct{})}
if err = Walk(tar.NewReader(r), func(hdr *tar.Header) error {
name := hdr.Name
cleanName := filepath.Clean(name)
// Ignore files outside /rootfs/ (at the moment only "manifest")
if !strings.HasPrefix(cleanName, "rootfs/") {
return nil
}
// Is the file in our PathWhiteList?
// If the file is a directory continue also if not in PathWhiteList
if hdr.Typeflag != tar.TypeDir {
if len(img.Im.PathWhitelist) > 0 {
if _, ok := thispwlm[cleanName]; !ok {
return nil
}
}
}
// Is the file in the lower level PathWhiteList of this img branch?
if pwlm != nil {
if _, ok := pwlm[cleanName]; !ok {
return nil
}
}
// Is the file already provided by a previous image?
if _, ok := allFiles[cleanName]; ok {
return nil
}
ra.FileMap[cleanName] = struct{}{}
allFiles[cleanName] = struct{}{}
return nil
}); err != nil {
return nil, err
}
// Tar does not necessarily read the complete file, so ensure we read the entirety into the hash
if _, err := io.Copy(ioutil.Discard, r); err != nil {
return nil, fmt.Errorf("error reading ACI: %v", err)
}
if g := ap.HashToKey(hash); g != img.Key {
return nil, fmt.Errorf("image hash does not match expected (%s != %s)", g, img.Key)
}
ra.Key = img.Key
return ra, nil
}
// pwlToMap converts a pathWhiteList slice to a map for faster search
// It will also prepend "rootfs/" to the provided paths and they will be
// relative to "/" so they can be easily compared with the tar.Header.Name
// If pwl length is 0, a nil map is returned
func pwlToMap(pwl []string) map[string]struct{} {
if len(pwl) == 0 {
return nil
}
m := make(map[string]struct{}, len(pwl))
for _, name := range pwl {
relpath := filepath.Join("rootfs", name)
m[relpath] = struct{}{}
}
return m
}
func Walk(tarReader *tar.Reader, walkFunc func(hdr *tar.Header) error) error {
for {
hdr, err := tarReader.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return fmt.Errorf("Error reading tar entry: %v", err)
}
if err := walkFunc(hdr); err != nil {
return err
}
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,74 +0,0 @@
package acirenderer
import (
"container/list"
"github.com/appc/spec/schema/types"
)
// CreateDepListFromImageID returns the flat dependency tree of the image with
// the provided imageID
func CreateDepListFromImageID(imageID types.Hash, ap ACIRegistry) (Images, error) {
key, err := ap.ResolveKey(imageID.String())
if err != nil {
return nil, err
}
return createDepList(key, ap)
}
// CreateDepListFromNameLabels returns the flat dependency tree of the image
// with the provided app name and optional labels.
func CreateDepListFromNameLabels(name types.ACName, labels types.Labels, ap ACIRegistry) (Images, error) {
key, err := ap.GetACI(name, labels)
if err != nil {
return nil, err
}
return createDepList(key, ap)
}
// createDepList returns the flat dependency tree as a list of Image type
func createDepList(key string, ap ACIRegistry) (Images, error) {
imgsl := list.New()
im, err := ap.GetImageManifest(key)
if err != nil {
return nil, err
}
img := Image{Im: im, Key: key, Level: 0}
imgsl.PushFront(img)
// Create a flat dependency tree. Use a LinkedList to be able to
// insert elements in the list while working on it.
for el := imgsl.Front(); el != nil; el = el.Next() {
img := el.Value.(Image)
dependencies := img.Im.Dependencies
for _, d := range dependencies {
var depimg Image
var depKey string
if d.ImageID != nil && !d.ImageID.Empty() {
depKey, err = ap.ResolveKey(d.ImageID.String())
if err != nil {
return nil, err
}
} else {
var err error
depKey, err = ap.GetACI(d.App, d.Labels)
if err != nil {
return nil, err
}
}
im, err := ap.GetImageManifest(depKey)
if err != nil {
return nil, err
}
depimg = Image{Im: im, Key: depKey, Level: img.Level + 1}
imgsl.InsertAfter(depimg, el)
}
}
imgs := Images{}
for el := imgsl.Front(); el != nil; el = el.Next() {
imgs = append(imgs, el.Value.(Image))
}
return imgs, nil
}

View File

@@ -1,91 +0,0 @@
package acirenderer
import (
"bytes"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
"github.com/appc/spec/aci"
"github.com/appc/spec/schema"
"github.com/appc/spec/schema/types"
)
const (
hashPrefix = "sha512-"
)
type TestStoreAci struct {
data []byte
key string
ImageManifest *schema.ImageManifest
}
type TestStore struct {
acis map[string]*TestStoreAci
}
func NewTestStore() *TestStore {
return &TestStore{acis: make(map[string]*TestStoreAci)}
}
func (ts *TestStore) WriteACI(path string) (string, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return "", err
}
imageID := types.NewHashSHA512(data)
rs, err := os.Open(path)
if err != nil {
return "", err
}
defer rs.Close()
im, err := aci.ManifestFromImage(rs)
if err != nil {
return "", fmt.Errorf("error retrieving ImageManifest: %v", err)
}
key := imageID.String()
ts.acis[key] = &TestStoreAci{data: data, key: key, ImageManifest: im}
return key, nil
}
func (ts *TestStore) GetImageManifest(key string) (*schema.ImageManifest, error) {
aci, ok := ts.acis[key]
if !ok {
return nil, fmt.Errorf("aci with key: %s not found", key)
}
return aci.ImageManifest, nil
}
func (ts *TestStore) GetACI(name types.ACName, labels types.Labels) (string, error) {
for _, aci := range ts.acis {
if aci.ImageManifest.Name.String() == name.String() {
return aci.key, nil
}
}
return "", fmt.Errorf("aci not found")
}
func (ts *TestStore) ReadStream(key string) (io.ReadCloser, error) {
aci, ok := ts.acis[key]
if !ok {
return nil, fmt.Errorf("stream for key: %s not found", key)
}
return ioutil.NopCloser(bytes.NewReader(aci.data)), nil
}
func (ts *TestStore) ResolveKey(key string) (string, error) {
return key, nil
}
// HashToKey takes a hash.Hash (which currently _MUST_ represent a full SHA512),
// calculates its sum, and returns a string which should be used as the key to
// store the data matching the hash.
func (ts *TestStore) HashToKey(h hash.Hash) string {
s := h.Sum(nil)
return fmt.Sprintf("%s%x", hashPrefix, s)
}

View File

@@ -1,3 +0,0 @@
// Package tarheader contains a simple abstraction to accurately create
// tar.Headers on different operating systems.
package tarheader

View File

@@ -1,25 +0,0 @@
//+build darwin
package tarheader
import (
"archive/tar"
"os"
"syscall"
"time"
)
func init() {
populateHeaderStat = append(populateHeaderStat, populateHeaderCtime)
}
func populateHeaderCtime(h *tar.Header, fi os.FileInfo, _ map[uint64]string) {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return
}
sec, nsec := st.Ctimespec.Unix()
ctime := time.Unix(sec, nsec)
h.ChangeTime = ctime
}

View File

@@ -1,23 +0,0 @@
package tarheader
import (
"archive/tar"
"os"
"syscall"
"time"
)
func init() {
populateHeaderStat = append(populateHeaderStat, populateHeaderCtime)
}
func populateHeaderCtime(h *tar.Header, fi os.FileInfo, _ map[uint64]string) {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return
}
sec, nsec := st.Ctim.Unix()
ctime := time.Unix(sec, nsec)
h.ChangeTime = ctime
}

View File

@@ -1,51 +0,0 @@
package tarheader
/*
#define _BSD_SOURCE
#define _DEFAULT_SOURCE
#include <sys/types.h>
unsigned int
my_major(dev_t dev)
{
return major(dev);
}
unsigned int
my_minor(dev_t dev)
{
return minor(dev);
}
*/
import "C"
import (
"archive/tar"
"os"
"syscall"
)
func init() {
populateHeaderStat = append(populateHeaderStat, populateHeaderUnix)
}
func populateHeaderUnix(h *tar.Header, fi os.FileInfo, seen map[uint64]string) {
st, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return
}
h.Uid = int(st.Uid)
h.Gid = int(st.Gid)
if st.Mode&syscall.S_IFMT == syscall.S_IFBLK || st.Mode&syscall.S_IFMT == syscall.S_IFCHR {
h.Devminor = int64(C.my_minor(C.dev_t(st.Rdev)))
h.Devmajor = int64(C.my_major(C.dev_t(st.Rdev)))
}
// If we have already seen this inode, generate a hardlink
p, ok := seen[uint64(st.Ino)]
if ok {
h.Linkname = p
h.Typeflag = tar.TypeLink
} else {
seen[uint64(st.Ino)] = h.Name
}
}

View File

@@ -1,61 +0,0 @@
package tarheader
import (
"archive/tar"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"testing"
)
// mknod requires privilege ...
func TestHeaderUnixDev(t *testing.T) {
hExpect := tar.Header{
Name: "./dev/test0",
Size: 0,
Typeflag: tar.TypeBlock,
Devminor: 5,
Devmajor: 233,
}
// make our test block device
var path string
{
var err error
path, err = ioutil.TempDir("", "tarheader-test-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(path)
if err := os.Mkdir(filepath.Join(path, "dev"), os.FileMode(0755)); err != nil {
t.Fatal(err)
}
mode := uint32(hExpect.Mode&07777) | syscall.S_IFBLK
dev := uint32(((hExpect.Devminor & 0xfff00) << 12) | ((hExpect.Devmajor & 0xfff) << 8) | (hExpect.Devminor & 0xff))
if err := syscall.Mknod(filepath.Join(path, hExpect.Name), mode, int(dev)); err != nil {
if err == syscall.EPERM {
t.Skip("no permission to CAP_MKNOD")
}
t.Fatal(err)
}
}
fi, err := os.Stat(filepath.Join(path, hExpect.Name))
if err != nil {
t.Fatal(err)
}
hGot := tar.Header{
Name: "./dev/test0",
Size: 0,
Typeflag: tar.TypeBlock,
}
seen := map[uint64]string{}
populateHeaderUnix(&hGot, fi, seen)
if hGot.Devminor != hExpect.Devminor {
t.Errorf("dev minor: got %d, expected %d", hGot.Devminor, hExpect.Devminor)
}
if hGot.Devmajor != hExpect.Devmajor {
t.Errorf("dev major: got %d, expected %d", hGot.Devmajor, hExpect.Devmajor)
}
}

View File

@@ -1,14 +0,0 @@
package tarheader
import (
"archive/tar"
"os"
)
var populateHeaderStat []func(h *tar.Header, fi os.FileInfo, seen map[uint64]string)
func Populate(h *tar.Header, fi os.FileInfo, seen map[uint64]string) {
for _, pop := range populateHeaderStat {
pop(h, fi, seen)
}
}

View File

@@ -1 +0,0 @@
*~

View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,3 +0,0 @@
File locking library.
See http://godoc.org/github.com/camlistore/lock

View File

@@ -1,158 +0,0 @@
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"sync"
)
// Lock locks the given file, creating the file if necessary. If the
// file already exists, it must have zero size or an error is returned.
// The lock is an exclusive lock (a write lock), but locked files
// should neither be read from nor written to. Such files should have
// zero size and only exist to co-ordinate ownership across processes.
//
// A nil Closer is returned if an error occurred. Otherwise, close that
// Closer to release the lock.
//
// On Linux, FreeBSD and OSX, a lock has the same semantics as fcntl(2)'s
// advisory locks. In particular, closing any other file descriptor for the
// same file will release the lock prematurely.
//
// Attempting to lock a file that is already locked by the current process
// has undefined behavior.
//
// On other operating systems, lock will fallback to using the presence and
// content of a file named name + '.lock' to implement locking behavior.
func Lock(name string) (io.Closer, error) {
return lockFn(name)
}
var lockFn = lockPortable
// Portable version not using fcntl. Doesn't handle crashes as gracefully,
// since it can leave stale lock files.
// TODO: write pid of owner to lock file and on race see if pid is
// still alive?
func lockPortable(name string) (io.Closer, error) {
absName, err := filepath.Abs(name)
if err != nil {
return nil, fmt.Errorf("can't Lock file %q: can't find abs path: %v", name, err)
}
fi, err := os.Stat(absName)
if err == nil && fi.Size() > 0 {
if isStaleLock(absName) {
os.Remove(absName)
} else {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
}
f, err := os.OpenFile(absName, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0666)
if err != nil {
return nil, fmt.Errorf("failed to create lock file %s %v", absName, err)
}
if err := json.NewEncoder(f).Encode(&pidLockMeta{OwnerPID: os.Getpid()}); err != nil {
return nil, err
}
return &lockCloser{f: f, abs: absName}, nil
}
type pidLockMeta struct {
OwnerPID int
}
func isStaleLock(path string) bool {
f, err := os.Open(path)
if err != nil {
return false
}
defer f.Close()
var meta pidLockMeta
if json.NewDecoder(f).Decode(&meta) != nil {
return false
}
if meta.OwnerPID == 0 {
return false
}
p, err := os.FindProcess(meta.OwnerPID)
if err != nil {
// e.g. on Windows
return true
}
// On unix, os.FindProcess always is true, so we have to send
// it a signal to see if it's alive.
if signalZero != nil {
if p.Signal(signalZero) != nil {
return true
}
}
return false
}
var signalZero os.Signal // nil or set by lock_sigzero.go
type lockCloser struct {
f *os.File
abs string
once sync.Once
err error
}
func (lc *lockCloser) Close() error {
lc.once.Do(lc.close)
return lc.err
}
func (lc *lockCloser) close() {
if err := lc.f.Close(); err != nil {
lc.err = err
}
if err := os.Remove(lc.abs); err != nil {
lc.err = err
}
}
var (
lockmu sync.Mutex
locked = map[string]bool{} // abs path -> true
)
// unlocker is used by the darwin and linux implementations with fcntl
// advisory locks.
type unlocker struct {
f *os.File
abs string
}
func (u *unlocker) Close() error {
lockmu.Lock()
// Remove is not necessary but it's nice for us to clean up.
// If we do do this, though, it needs to be before the
// u.f.Close below.
os.Remove(u.abs)
if err := u.f.Close(); err != nil {
return err
}
delete(locked, u.abs)
lockmu.Unlock()
return nil
}

View File

@@ -1,32 +0,0 @@
// +build appengine
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"errors"
"io"
)
func init() {
lockFn = lockAppEngine
}
func lockAppEngine(name string) (io.Closer, error) {
return nil, errors.New("Lock not available on App Engine")
}

View File

@@ -1,80 +0,0 @@
// +build darwin,amd64
// +build !appengine
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
"path/filepath"
"syscall"
"unsafe"
)
func init() {
lockFn = lockFcntl
}
func lockFcntl(name string) (io.Closer, error) {
abs, err := filepath.Abs(name)
if err != nil {
return nil, err
}
lockmu.Lock()
if locked[abs] {
lockmu.Unlock()
return nil, fmt.Errorf("file %q already locked", abs)
}
locked[abs] = true
lockmu.Unlock()
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err := os.Create(name)
if err != nil {
return nil, fmt.Errorf("Lock Create of %s (abs: %s) failed: %v", name, abs, err)
}
// This type matches C's "struct flock" defined in /usr/include/sys/fcntl.h.
// TODO: move this into the standard syscall package.
k := struct {
Start uint64 // sizeof(off_t): 8
Len uint64 // sizeof(off_t): 8
Pid uint32 // sizeof(pid_t): 4
Type uint16 // sizeof(short): 2
Whence uint16 // sizeof(short): 2
}{
Type: syscall.F_WRLCK,
Whence: uint16(os.SEEK_SET),
Start: 0,
Len: 0, // 0 means to lock the entire file.
Pid: uint32(os.Getpid()),
}
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
if errno != 0 {
f.Close()
return nil, errno
}
return &unlocker{f, abs}, nil
}

View File

@@ -1,79 +0,0 @@
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
"path/filepath"
"syscall"
"unsafe"
)
func init() {
lockFn = lockFcntl
}
func lockFcntl(name string) (io.Closer, error) {
abs, err := filepath.Abs(name)
if err != nil {
return nil, err
}
lockmu.Lock()
if locked[abs] {
lockmu.Unlock()
return nil, fmt.Errorf("file %q already locked", abs)
}
locked[abs] = true
lockmu.Unlock()
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err := os.Create(name)
if err != nil {
return nil, err
}
// This type matches C's "struct flock" defined in /usr/include/fcntl.h.
// TODO: move this into the standard syscall package.
k := struct {
Start int64 /* off_t starting offset */
Len int64 /* off_t len = 0 means until end of file */
Pid int32 /* pid_t lock owner */
Type int16 /* short lock type: read/write, etc. */
Whence int16 /* short type of l_start */
Sysid int32 /* int remote system id or zero for local */
}{
Start: 0,
Len: 0, // 0 means to lock the entire file.
Pid: int32(os.Getpid()),
Type: syscall.F_WRLCK,
Whence: int16(os.SEEK_SET),
Sysid: 0,
}
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
if errno != 0 {
f.Close()
return nil, errno
}
return &unlocker{f, abs}, nil
}

View File

@@ -1,80 +0,0 @@
// +build linux,amd64
// +build !appengine
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
"path/filepath"
"syscall"
"unsafe"
)
func init() {
lockFn = lockFcntl
}
func lockFcntl(name string) (io.Closer, error) {
abs, err := filepath.Abs(name)
if err != nil {
return nil, err
}
lockmu.Lock()
if locked[abs] {
lockmu.Unlock()
return nil, fmt.Errorf("file %q already locked", abs)
}
locked[abs] = true
lockmu.Unlock()
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err := os.Create(name)
if err != nil {
return nil, err
}
// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
// TODO: move this into the standard syscall package.
k := struct {
Type uint32
Whence uint32
Start uint64
Len uint64
Pid uint32
}{
Type: syscall.F_WRLCK,
Whence: uint32(os.SEEK_SET),
Start: 0,
Len: 0, // 0 means to lock the entire file.
Pid: uint32(os.Getpid()),
}
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_SETLK), uintptr(unsafe.Pointer(&k)))
if errno != 0 {
f.Close()
return nil, errno
}
return &unlocker{f, abs}, nil
}

View File

@@ -1,81 +0,0 @@
// +build linux,arm
// +build !appengine
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
"path/filepath"
"syscall"
"unsafe"
)
func init() {
lockFn = lockFcntl
}
func lockFcntl(name string) (io.Closer, error) {
abs, err := filepath.Abs(name)
if err != nil {
return nil, err
}
lockmu.Lock()
if locked[abs] {
lockmu.Unlock()
return nil, fmt.Errorf("file %q already locked", abs)
}
locked[abs] = true
lockmu.Unlock()
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err := os.Create(name)
if err != nil {
return nil, err
}
// This type matches C's "struct flock" defined in /usr/include/bits/fcntl.h.
// TODO: move this into the standard syscall package.
k := struct {
Type uint16
Whence uint16
Start uint32
Len uint32
Pid uint32
}{
Type: syscall.F_WRLCK,
Whence: uint16(os.SEEK_SET),
Start: 0,
Len: 0, // 0 means to lock the entire file.
Pid: uint32(os.Getpid()),
}
const F_SETLK = 6 // actual value. syscall package is wrong: golang.org/issue/7059
_, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(F_SETLK), uintptr(unsafe.Pointer(&k)))
if errno != 0 {
f.Close()
return nil, errno
}
return &unlocker{f, abs}, nil
}

View File

@@ -1,55 +0,0 @@
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io"
"os"
"path/filepath"
)
func init() {
lockFn = lockPlan9
}
func lockPlan9(name string) (io.Closer, error) {
var f *os.File
abs, err := filepath.Abs(name)
if err != nil {
return nil, err
}
lockmu.Lock()
if locked[abs] {
lockmu.Unlock()
return nil, fmt.Errorf("file %q already locked", abs)
}
locked[abs] = true
lockmu.Unlock()
fi, err := os.Stat(name)
if err == nil && fi.Size() > 0 {
return nil, fmt.Errorf("can't Lock file %q: has non-zero size", name)
}
f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)
if err != nil {
return nil, fmt.Errorf("Lock Create of %s (abs: %s) failed: %v", name, abs, err)
}
return &unlocker{f, abs}, nil
}

View File

@@ -1,26 +0,0 @@
// +build !appengine
// +build linux darwin freebsd openbsd netbsd dragonfly
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import "syscall"
func init() {
signalZero = syscall.Signal(0)
}

View File

@@ -1,131 +0,0 @@
/*
Copyright 2013 The Go Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lock
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"testing"
)
func TestLock(t *testing.T) {
testLock(t, false)
}
func TestLockPortable(t *testing.T) {
testLock(t, true)
}
func TestLockInChild(t *testing.T) {
f := os.Getenv("TEST_LOCK_FILE")
if f == "" {
// not child
return
}
lock := Lock
if v, _ := strconv.ParseBool(os.Getenv("TEST_LOCK_PORTABLE")); v {
lock = lockPortable
}
lk, err := lock(f)
if err != nil {
log.Fatalf("Lock failed: %v", err)
}
if v, _ := strconv.ParseBool(os.Getenv("TEST_LOCK_CRASH")); v {
// Simulate a crash, or at least not unlocking the
// lock. We still exit 0 just to simplify the parent
// process exec code.
os.Exit(0)
}
lk.Close()
}
func testLock(t *testing.T, portable bool) {
lock := Lock
if portable {
lock = lockPortable
}
td, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(td)
path := filepath.Join(td, "foo.lock")
childLock := func(crash bool) error {
cmd := exec.Command(os.Args[0], "-test.run=LockInChild$")
cmd.Env = []string{"TEST_LOCK_FILE=" + path}
if portable {
cmd.Env = append(cmd.Env, "TEST_LOCK_PORTABLE=1")
}
if crash {
cmd.Env = append(cmd.Env, "TEST_LOCK_CRASH=1")
}
out, err := cmd.CombinedOutput()
t.Logf("Child output: %q (err %v)", out, err)
if err != nil {
return fmt.Errorf("Child Process lock of %s failed: %v %s", path, err, out)
}
return nil
}
t.Logf("Locking in crashing child...")
if err := childLock(true); err != nil {
t.Fatalf("first lock in child process: %v", err)
}
t.Logf("Locking+unlocking in child...")
if err := childLock(false); err != nil {
t.Fatalf("lock in child process after crashing child: %v", err)
}
t.Logf("Locking in parent...")
lk1, err := lock(path)
if err != nil {
t.Fatal(err)
}
t.Logf("Again in parent...")
_, err = lock(path)
if err == nil {
t.Fatal("expected second lock to fail")
}
t.Logf("Locking in child...")
if childLock(false) == nil {
t.Fatalf("expected lock in child process to fail")
}
t.Logf("Unlocking lock in parent")
if err := lk1.Close(); err != nil {
t.Fatal(err)
}
lk3, err := lock(path)
if err != nil {
t.Fatal(err)
}
lk3.Close()
}

View File

@@ -1,19 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package httptypes defines how etcd's HTTP API entities are serialized to and deserialized from JSON.
*/
package httptypes

View File

@@ -1,49 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httptypes
import (
"encoding/json"
"log"
"net/http"
)
type HTTPError struct {
Message string `json:"message"`
// HTTP return code
Code int `json:"-"`
}
func (e HTTPError) Error() string {
return e.Message
}
// TODO(xiangli): handle http write errors
func (e HTTPError) WriteTo(w http.ResponseWriter) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(e.Code)
b, err := json.Marshal(e)
if err != nil {
log.Panicf("marshal HTTPError should never fail: %v", err)
}
w.Write(b)
}
func NewHTTPError(code int, m string) *HTTPError {
return &HTTPError{
Message: m,
Code: code,
}
}

View File

@@ -1,47 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httptypes
import (
"net/http"
"net/http/httptest"
"reflect"
"testing"
)
func TestHTTPErrorWriteTo(t *testing.T) {
err := NewHTTPError(http.StatusBadRequest, "what a bad request you made!")
rr := httptest.NewRecorder()
err.WriteTo(rr)
wcode := http.StatusBadRequest
wheader := http.Header(map[string][]string{
"Content-Type": []string{"application/json"},
})
wbody := `{"message":"what a bad request you made!"}`
if wcode != rr.Code {
t.Errorf("HTTP status code %d, want %d", rr.Code, wcode)
}
if !reflect.DeepEqual(wheader, rr.HeaderMap) {
t.Errorf("HTTP headers %v, want %v", rr.HeaderMap, wheader)
}
gbody := rr.Body.String()
if wbody != gbody {
t.Errorf("HTTP body %q, want %q", gbody, wbody)
}
}

View File

@@ -1,67 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httptypes
import (
"encoding/json"
"github.com/coreos/etcd/pkg/types"
)
type Member struct {
ID string `json:"id"`
Name string `json:"name"`
PeerURLs []string `json:"peerURLs"`
ClientURLs []string `json:"clientURLs"`
}
type MemberCreateRequest struct {
PeerURLs types.URLs
}
type MemberUpdateRequest struct {
MemberCreateRequest
}
func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error {
s := struct {
PeerURLs []string `json:"peerURLs"`
}{}
err := json.Unmarshal(data, &s)
if err != nil {
return err
}
urls, err := types.NewURLs(s.PeerURLs)
if err != nil {
return err
}
m.PeerURLs = urls
return nil
}
type MemberCollection []Member
func (c *MemberCollection) MarshalJSON() ([]byte, error) {
d := struct {
Members []Member `json:"members"`
}{
Members: []Member(*c),
}
return json.Marshal(d)
}

View File

@@ -1,135 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httptypes
import (
"encoding/json"
"net/url"
"reflect"
"testing"
"github.com/coreos/etcd/pkg/types"
)
func TestMemberUnmarshal(t *testing.T) {
tests := []struct {
body []byte
wantMember Member
wantError bool
}{
// no URLs, just check ID & Name
{
body: []byte(`{"id": "c", "name": "dungarees"}`),
wantMember: Member{ID: "c", Name: "dungarees", PeerURLs: nil, ClientURLs: nil},
},
// both client and peer URLs
{
body: []byte(`{"peerURLs": ["http://127.0.0.1:4001"], "clientURLs": ["http://127.0.0.1:4001"]}`),
wantMember: Member{
PeerURLs: []string{
"http://127.0.0.1:4001",
},
ClientURLs: []string{
"http://127.0.0.1:4001",
},
},
},
// multiple peer URLs
{
body: []byte(`{"peerURLs": ["http://127.0.0.1:4001", "https://example.com"]}`),
wantMember: Member{
PeerURLs: []string{
"http://127.0.0.1:4001",
"https://example.com",
},
ClientURLs: nil,
},
},
// multiple client URLs
{
body: []byte(`{"clientURLs": ["http://127.0.0.1:4001", "https://example.com"]}`),
wantMember: Member{
PeerURLs: nil,
ClientURLs: []string{
"http://127.0.0.1:4001",
"https://example.com",
},
},
},
// invalid JSON
{
body: []byte(`{"peerU`),
wantError: true,
},
}
for i, tt := range tests {
got := Member{}
err := json.Unmarshal(tt.body, &got)
if tt.wantError != (err != nil) {
t.Errorf("#%d: want error %t, got %v", i, tt.wantError, err)
continue
}
if !reflect.DeepEqual(tt.wantMember, got) {
t.Errorf("#%d: incorrect output: want=%#v, got=%#v", i, tt.wantMember, got)
}
}
}
func TestMemberCreateRequestUnmarshal(t *testing.T) {
body := []byte(`{"peerURLs": ["http://127.0.0.1:8081", "https://127.0.0.1:8080"]}`)
want := MemberCreateRequest{
PeerURLs: types.URLs([]url.URL{
url.URL{Scheme: "http", Host: "127.0.0.1:8081"},
url.URL{Scheme: "https", Host: "127.0.0.1:8080"},
}),
}
var req MemberCreateRequest
if err := json.Unmarshal(body, &req); err != nil {
t.Fatalf("Unmarshal returned unexpected err=%v", err)
}
if !reflect.DeepEqual(want, req) {
t.Fatalf("Failed to unmarshal MemberCreateRequest: want=%#v, got=%#v", want, req)
}
}
func TestMemberCreateRequestUnmarshalFail(t *testing.T) {
tests := [][]byte{
// invalid JSON
[]byte(``),
[]byte(`{`),
// spot-check validation done in types.NewURLs
[]byte(`{"peerURLs": "foo"}`),
[]byte(`{"peerURLs": ["."]}`),
[]byte(`{"peerURLs": []}`),
[]byte(`{"peerURLs": ["http://127.0.0.1:4001/foo"]}`),
[]byte(`{"peerURLs": ["http://127.0.0.1"]}`),
}
for i, tt := range tests {
var req MemberCreateRequest
if err := json.Unmarshal(tt, &req); err == nil {
t.Errorf("#%d: expected err, got nil", i)
}
}
}

View File

@@ -1,41 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"strconv"
)
// ID represents a generic identifier which is canonically
// stored as a uint64 but is typically represented as a
// base-16 string for input/output
type ID uint64
func (i ID) String() string {
return strconv.FormatUint(uint64(i), 16)
}
// IDFromString attempts to create an ID from a base-16 string.
func IDFromString(s string) (ID, error) {
i, err := strconv.ParseUint(s, 16, 64)
return ID(i), err
}
// IDSlice implements the sort interface
type IDSlice []ID
func (p IDSlice) Len() int { return len(p) }
func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View File

@@ -1,95 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"reflect"
"sort"
"testing"
)
func TestIDString(t *testing.T) {
tests := []struct {
input ID
want string
}{
{
input: 12,
want: "c",
},
{
input: 4918257920282737594,
want: "444129853c343bba",
},
}
for i, tt := range tests {
got := tt.input.String()
if tt.want != got {
t.Errorf("#%d: ID.String failure: want=%v, got=%v", i, tt.want, got)
}
}
}
func TestIDFromString(t *testing.T) {
tests := []struct {
input string
want ID
}{
{
input: "17",
want: 23,
},
{
input: "612840dae127353",
want: 437557308098245459,
},
}
for i, tt := range tests {
got, err := IDFromString(tt.input)
if err != nil {
t.Errorf("#%d: IDFromString failure: err=%v", i, err)
continue
}
if tt.want != got {
t.Errorf("#%d: IDFromString failure: want=%v, got=%v", i, tt.want, got)
}
}
}
func TestIDFromStringFail(t *testing.T) {
tests := []string{
"",
"XXX",
"612840dae127353612840dae127353",
}
for i, tt := range tests {
_, err := IDFromString(tt)
if err == nil {
t.Fatalf("#%d: IDFromString expected error, but err=nil", i)
}
}
}
func TestIDSlice(t *testing.T) {
g := []ID{10, 500, 5, 1, 100, 25}
w := []ID{1, 5, 10, 25, 100, 500}
sort.Sort(IDSlice(g))
if !reflect.DeepEqual(g, w) {
t.Errorf("slice after sort = %#v, want %#v", g, w)
}
}

View File

@@ -1,178 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"reflect"
"sort"
"sync"
)
type Set interface {
Add(string)
Remove(string)
Contains(string) bool
Equals(Set) bool
Length() int
Values() []string
Copy() Set
Sub(Set) Set
}
func NewUnsafeSet(values ...string) *unsafeSet {
set := &unsafeSet{make(map[string]struct{})}
for _, v := range values {
set.Add(v)
}
return set
}
func NewThreadsafeSet(values ...string) *tsafeSet {
us := NewUnsafeSet(values...)
return &tsafeSet{us, sync.RWMutex{}}
}
type unsafeSet struct {
d map[string]struct{}
}
// Add adds a new value to the set (no-op if the value is already present)
func (us *unsafeSet) Add(value string) {
us.d[value] = struct{}{}
}
// Remove removes the given value from the set
func (us *unsafeSet) Remove(value string) {
delete(us.d, value)
}
// Contains returns whether the set contains the given value
func (us *unsafeSet) Contains(value string) (exists bool) {
_, exists = us.d[value]
return
}
// ContainsAll returns whether the set contains all given values
func (us *unsafeSet) ContainsAll(values []string) bool {
for _, s := range values {
if !us.Contains(s) {
return false
}
}
return true
}
// Equals returns whether the contents of two sets are identical
func (us *unsafeSet) Equals(other Set) bool {
v1 := sort.StringSlice(us.Values())
v2 := sort.StringSlice(other.Values())
v1.Sort()
v2.Sort()
return reflect.DeepEqual(v1, v2)
}
// Length returns the number of elements in the set
func (us *unsafeSet) Length() int {
return len(us.d)
}
// Values returns the values of the Set in an unspecified order.
func (us *unsafeSet) Values() (values []string) {
values = make([]string, 0)
for val, _ := range us.d {
values = append(values, val)
}
return
}
// Copy creates a new Set containing the values of the first
func (us *unsafeSet) Copy() Set {
cp := NewUnsafeSet()
for val, _ := range us.d {
cp.Add(val)
}
return cp
}
// Sub removes all elements in other from the set
func (us *unsafeSet) Sub(other Set) Set {
oValues := other.Values()
result := us.Copy().(*unsafeSet)
for _, val := range oValues {
if _, ok := result.d[val]; !ok {
continue
}
delete(result.d, val)
}
return result
}
type tsafeSet struct {
us *unsafeSet
m sync.RWMutex
}
func (ts *tsafeSet) Add(value string) {
ts.m.Lock()
defer ts.m.Unlock()
ts.us.Add(value)
}
func (ts *tsafeSet) Remove(value string) {
ts.m.Lock()
defer ts.m.Unlock()
ts.us.Remove(value)
}
func (ts *tsafeSet) Contains(value string) (exists bool) {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Contains(value)
}
func (ts *tsafeSet) Equals(other Set) bool {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Equals(other)
}
func (ts *tsafeSet) Length() int {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Length()
}
func (ts *tsafeSet) Values() (values []string) {
ts.m.RLock()
defer ts.m.RUnlock()
return ts.us.Values()
}
func (ts *tsafeSet) Copy() Set {
ts.m.RLock()
defer ts.m.RUnlock()
usResult := ts.us.Copy().(*unsafeSet)
return &tsafeSet{usResult, sync.RWMutex{}}
}
func (ts *tsafeSet) Sub(other Set) Set {
ts.m.RLock()
defer ts.m.RUnlock()
usResult := ts.us.Sub(other).(*unsafeSet)
return &tsafeSet{usResult, sync.RWMutex{}}
}

View File

@@ -1,186 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"reflect"
"sort"
"testing"
)
func TestUnsafeSet(t *testing.T) {
driveSetTests(t, NewUnsafeSet())
}
func TestThreadsafeSet(t *testing.T) {
driveSetTests(t, NewThreadsafeSet())
}
// Check that two slices contents are equal; order is irrelevant
func equal(a, b []string) bool {
as := sort.StringSlice(a)
bs := sort.StringSlice(b)
as.Sort()
bs.Sort()
return reflect.DeepEqual(as, bs)
}
func driveSetTests(t *testing.T, s Set) {
// Verify operations on an empty set
eValues := []string{}
values := s.Values()
if !reflect.DeepEqual(values, eValues) {
t.Fatalf("Expect values=%v got %v", eValues, values)
}
if l := s.Length(); l != 0 {
t.Fatalf("Expected length=0, got %d", l)
}
for _, v := range []string{"foo", "bar", "baz"} {
if s.Contains(v) {
t.Fatalf("Expect s.Contains(%q) to be fale, got true", v)
}
}
// Add three items, ensure they show up
s.Add("foo")
s.Add("bar")
s.Add("baz")
eValues = []string{"foo", "bar", "baz"}
values = s.Values()
if !equal(values, eValues) {
t.Fatalf("Expect values=%v got %v", eValues, values)
}
for _, v := range eValues {
if !s.Contains(v) {
t.Fatalf("Expect s.Contains(%q) to be true, got false", v)
}
}
if l := s.Length(); l != 3 {
t.Fatalf("Expected length=3, got %d", l)
}
// Add the same item a second time, ensuring it is not duplicated
s.Add("foo")
values = s.Values()
if !equal(values, eValues) {
t.Fatalf("Expect values=%v got %v", eValues, values)
}
if l := s.Length(); l != 3 {
t.Fatalf("Expected length=3, got %d", l)
}
// Remove all items, ensure they are gone
s.Remove("foo")
s.Remove("bar")
s.Remove("baz")
eValues = []string{}
values = s.Values()
if !equal(values, eValues) {
t.Fatalf("Expect values=%v got %v", eValues, values)
}
if l := s.Length(); l != 0 {
t.Fatalf("Expected length=0, got %d", l)
}
// Create new copies of the set, and ensure they are unlinked to the
// original Set by making modifications
s.Add("foo")
s.Add("bar")
cp1 := s.Copy()
cp2 := s.Copy()
s.Remove("foo")
cp3 := s.Copy()
cp1.Add("baz")
for i, tt := range []struct {
want []string
got []string
}{
{[]string{"bar"}, s.Values()},
{[]string{"foo", "bar", "baz"}, cp1.Values()},
{[]string{"foo", "bar"}, cp2.Values()},
{[]string{"bar"}, cp3.Values()},
} {
if !equal(tt.want, tt.got) {
t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got)
}
}
for i, tt := range []struct {
want bool
got bool
}{
{true, s.Equals(cp3)},
{true, cp3.Equals(s)},
{false, s.Equals(cp2)},
{false, s.Equals(cp1)},
{false, cp1.Equals(s)},
{false, cp2.Equals(s)},
{false, cp2.Equals(cp1)},
} {
if tt.got != tt.want {
t.Fatalf("case %d: want %t, got %t", i, tt.want, tt.got)
}
}
// Subtract values from a Set, ensuring a new Set is created and
// the original Sets are unmodified
sub1 := cp1.Sub(s)
sub2 := cp2.Sub(cp1)
for i, tt := range []struct {
want []string
got []string
}{
{[]string{"foo", "bar", "baz"}, cp1.Values()},
{[]string{"foo", "bar"}, cp2.Values()},
{[]string{"bar"}, s.Values()},
{[]string{"foo", "baz"}, sub1.Values()},
{[]string{}, sub2.Values()},
} {
if !equal(tt.want, tt.got) {
t.Fatalf("case %d: expect values=%v got %v", i, tt.want, tt.got)
}
}
}
func TestUnsafeSetContainsAll(t *testing.T) {
vals := []string{"foo", "bar", "baz"}
s := NewUnsafeSet(vals...)
tests := []struct {
strs []string
wcontain bool
}{
{[]string{}, true},
{vals[:1], true},
{vals[:2], true},
{vals, true},
{[]string{"cuz"}, false},
{[]string{vals[0], "cuz"}, false},
}
for i, tt := range tests {
if g := s.ContainsAll(tt.strs); g != tt.wcontain {
t.Errorf("#%d: ok = %v, want %v", i, g, tt.wcontain)
}
}
}

View File

@@ -1,22 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
// Uint64Slice implements sort interface
type Uint64Slice []uint64
func (p Uint64Slice) Len() int { return len(p) }
func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }

View File

@@ -1,30 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"reflect"
"sort"
"testing"
)
func TestUint64Slice(t *testing.T) {
g := Uint64Slice{10, 500, 5, 1, 100, 25}
w := Uint64Slice{1, 5, 10, 25, 100, 500}
sort.Sort(g)
if !reflect.DeepEqual(g, w) {
t.Errorf("slice after sort = %#v, want %#v", g, w)
}
}

View File

@@ -1,74 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"errors"
"fmt"
"net"
"net/url"
"sort"
"strings"
)
type URLs []url.URL
func NewURLs(strs []string) (URLs, error) {
all := make([]url.URL, len(strs))
if len(all) == 0 {
return nil, errors.New("no valid URLs given")
}
for i, in := range strs {
in = strings.TrimSpace(in)
u, err := url.Parse(in)
if err != nil {
return nil, err
}
if u.Scheme != "http" && u.Scheme != "https" {
return nil, fmt.Errorf("URL scheme must be http or https: %s", in)
}
if _, _, err := net.SplitHostPort(u.Host); err != nil {
return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
}
if u.Path != "" {
return nil, fmt.Errorf("URL must not contain a path: %s", in)
}
all[i] = *u
}
us := URLs(all)
us.Sort()
return us, nil
}
func (us URLs) String() string {
return strings.Join(us.StringSlice(), ",")
}
func (us *URLs) Sort() {
sort.Sort(us)
}
func (us URLs) Len() int { return len(us) }
func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
func (us URLs) StringSlice() []string {
out := make([]string, len(us))
for i := range us {
out[i] = us[i].String()
}
return out
}

View File

@@ -1,169 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package types
import (
"reflect"
"testing"
"github.com/coreos/etcd/pkg/testutil"
)
func TestNewURLs(t *testing.T) {
tests := []struct {
strs []string
wurls URLs
}{
{
[]string{"http://127.0.0.1:4001"},
testutil.MustNewURLs(t, []string{"http://127.0.0.1:4001"}),
},
// it can trim space
{
[]string{" http://127.0.0.1:4001 "},
testutil.MustNewURLs(t, []string{"http://127.0.0.1:4001"}),
},
// it does sort
{
[]string{
"http://127.0.0.2:4001",
"http://127.0.0.1:4001",
},
testutil.MustNewURLs(t, []string{
"http://127.0.0.1:4001",
"http://127.0.0.2:4001",
}),
},
}
for i, tt := range tests {
urls, _ := NewURLs(tt.strs)
if !reflect.DeepEqual(urls, tt.wurls) {
t.Errorf("#%d: urls = %+v, want %+v", i, urls, tt.wurls)
}
}
}
func TestURLsString(t *testing.T) {
tests := []struct {
us URLs
wstr string
}{
{
URLs{},
"",
},
{
testutil.MustNewURLs(t, []string{"http://127.0.0.1:4001"}),
"http://127.0.0.1:4001",
},
{
testutil.MustNewURLs(t, []string{
"http://127.0.0.1:4001",
"http://127.0.0.2:4001",
}),
"http://127.0.0.1:4001,http://127.0.0.2:4001",
},
{
testutil.MustNewURLs(t, []string{
"http://127.0.0.2:4001",
"http://127.0.0.1:4001",
}),
"http://127.0.0.2:4001,http://127.0.0.1:4001",
},
}
for i, tt := range tests {
g := tt.us.String()
if g != tt.wstr {
t.Errorf("#%d: string = %s, want %s", i, g, tt.wstr)
}
}
}
func TestURLsSort(t *testing.T) {
g := testutil.MustNewURLs(t, []string{
"http://127.0.0.4:4001",
"http://127.0.0.2:4001",
"http://127.0.0.1:4001",
"http://127.0.0.3:4001",
})
w := testutil.MustNewURLs(t, []string{
"http://127.0.0.1:4001",
"http://127.0.0.2:4001",
"http://127.0.0.3:4001",
"http://127.0.0.4:4001",
})
gurls := URLs(g)
gurls.Sort()
if !reflect.DeepEqual(g, w) {
t.Errorf("URLs after sort = %#v, want %#v", g, w)
}
}
func TestURLsStringSlice(t *testing.T) {
tests := []struct {
us URLs
wstr []string
}{
{
URLs{},
[]string{},
},
{
testutil.MustNewURLs(t, []string{"http://127.0.0.1:4001"}),
[]string{"http://127.0.0.1:4001"},
},
{
testutil.MustNewURLs(t, []string{
"http://127.0.0.1:4001",
"http://127.0.0.2:4001",
}),
[]string{"http://127.0.0.1:4001", "http://127.0.0.2:4001"},
},
{
testutil.MustNewURLs(t, []string{
"http://127.0.0.2:4001",
"http://127.0.0.1:4001",
}),
[]string{"http://127.0.0.2:4001", "http://127.0.0.1:4001"},
},
}
for i, tt := range tests {
g := tt.us.StringSlice()
if !reflect.DeepEqual(g, tt.wstr) {
t.Errorf("#%d: string slice = %+v, want %+v", i, g, tt.wstr)
}
}
}
func TestNewURLsFail(t *testing.T) {
tests := [][]string{
// no urls given
{},
// missing protocol scheme
{"://127.0.0.1:4001"},
// unsupported scheme
{"mailto://127.0.0.1:4001"},
// not conform to host:port
{"http://127.0.0.1"},
// contain a path
{"http://127.0.0.1:4001/path"},
}
for i, tt := range tests {
_, err := NewURLs(tt)
if err == nil {
t.Errorf("#%d: err = nil, but error", i)
}
}
}

View File

@@ -1,170 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package aci implements helper functions for working with ACIs
package aci
import (
"archive/tar"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"time"
"github.com/appc/spec/aci"
"github.com/appc/spec/schema"
"golang.org/x/crypto/openpgp"
)
type ACIEntry struct {
Header *tar.Header
Contents string
}
type imageArchiveWriter struct {
*tar.Writer
am *schema.ImageManifest
}
// NewImageWriter creates a new ArchiveWriter which will generate an App
// Container Image based on the given manifest and write it to the given
// tar.Writer
// TODO(sgotti) this is a copy of appc/spec/aci.imageArchiveWriter with
// addFileNow changed to create the file with the current user. needed for
// testing as non root user.
func NewImageWriter(am schema.ImageManifest, w *tar.Writer) aci.ArchiveWriter {
aw := &imageArchiveWriter{
w,
&am,
}
return aw
}
func (aw *imageArchiveWriter) AddFile(hdr *tar.Header, r io.Reader) error {
err := aw.Writer.WriteHeader(hdr)
if err != nil {
return err
}
if r != nil {
_, err := io.Copy(aw.Writer, r)
if err != nil {
return err
}
}
return nil
}
func (aw *imageArchiveWriter) addFileNow(path string, contents []byte) error {
buf := bytes.NewBuffer(contents)
now := time.Now()
hdr := tar.Header{
Name: path,
Mode: 0644,
Uid: os.Getuid(),
Gid: os.Getgid(),
Size: int64(buf.Len()),
ModTime: now,
Typeflag: tar.TypeReg,
ChangeTime: now,
}
return aw.AddFile(&hdr, buf)
}
func (aw *imageArchiveWriter) addManifest(name string, m json.Marshaler) error {
out, err := m.MarshalJSON()
if err != nil {
return err
}
return aw.addFileNow(name, out)
}
func (aw *imageArchiveWriter) Close() error {
if err := aw.addManifest(aci.ManifestFile, aw.am); err != nil {
return err
}
return aw.Writer.Close()
}
// NewBasicACI creates a new ACI in the given directory with the given name.
// Used for testing.
func NewBasicACI(dir string, name string) (*os.File, error) {
manifest := fmt.Sprintf(`{"acKind":"ImageManifest","acVersion":"0.5.4","name":"%s"}`, name)
return NewACI(dir, manifest, nil)
}
// NewACI creates a new ACI in the given directory with the given image
// manifest and entries.
// Used for testing.
func NewACI(dir string, manifest string, entries []*ACIEntry) (*os.File, error) {
var im schema.ImageManifest
if err := im.UnmarshalJSON([]byte(manifest)); err != nil {
return nil, err
}
tf, err := ioutil.TempFile(dir, "")
if err != nil {
return nil, err
}
defer os.Remove(tf.Name())
tw := tar.NewWriter(tf)
aw := NewImageWriter(im, tw)
for _, entry := range entries {
// Add default mode
if entry.Header.Mode == 0 {
if entry.Header.Typeflag == tar.TypeDir {
entry.Header.Mode = 0755
} else {
entry.Header.Mode = 0644
}
}
// Add calling user uid and gid or tests will fail
entry.Header.Uid = os.Getuid()
entry.Header.Gid = os.Getgid()
sr := strings.NewReader(entry.Contents)
if err := aw.AddFile(entry.Header, sr); err != nil {
return nil, err
}
}
if err := aw.Close(); err != nil {
return nil, err
}
return tf, nil
}
// NewDetachedSignature creates a new openpgp armored detached signature for the given ACI
// signed with armoredPrivateKey.
func NewDetachedSignature(armoredPrivateKey string, aci io.Reader) (io.Reader, error) {
entityList, err := openpgp.ReadArmoredKeyRing(bytes.NewBufferString(armoredPrivateKey))
if err != nil {
return nil, err
}
if len(entityList) < 1 {
return nil, errors.New("empty entity list")
}
signature := &bytes.Buffer{}
if err := openpgp.ArmoredDetachSign(signature, entityList[0], aci, nil); err != nil {
return nil, err
}
return signature, nil
}

View File

@@ -1,61 +0,0 @@
package aci
import (
"archive/tar"
"fmt"
"github.com/appc/spec/pkg/acirenderer"
"github.com/appc/spec/schema/types"
ptar "github.com/coreos/rkt/pkg/tar"
)
// Given an imageID, start with the matching image available in the store,
// build its dependency list and render it inside dir
func RenderACIWithImageID(imageID types.Hash, dir string, ap acirenderer.ACIRegistry) error {
renderedACI, err := acirenderer.GetRenderedACIWithImageID(imageID, ap)
if err != nil {
return err
}
return renderImage(renderedACI, dir, ap)
}
// Given an image app name and optional labels, get the best matching image
// available in the store, build its dependency list and render it inside dir
func RenderACI(name types.ACName, labels types.Labels, dir string, ap acirenderer.ACIRegistry) error {
renderedACI, err := acirenderer.GetRenderedACI(name, labels, ap)
if err != nil {
return err
}
return renderImage(renderedACI, dir, ap)
}
// Given an already populated dependency list, it will extract, under the provided
// directory, the rendered ACI
func RenderACIFromList(imgs acirenderer.Images, dir string, ap acirenderer.ACIProvider) error {
renderedACI, err := acirenderer.GetRenderedACIFromList(imgs, ap)
if err != nil {
return err
}
return renderImage(renderedACI, dir, ap)
}
// Given a RenderedACI, it will extract, under the provided directory, the
// needed files from the right source ACI.
// The manifest will be extracted from the upper ACI.
// No file overwriting is done as it should usually be called
// providing an empty directory.
func renderImage(renderedACI acirenderer.RenderedACI, dir string, ap acirenderer.ACIProvider) error {
for _, ra := range renderedACI {
rs, err := ap.ReadStream(ra.Key)
if err != nil {
return err
}
defer rs.Close()
// Overwrite is not needed. If a file needs to be overwritten then the renderedACI builder has a bug
if err := ptar.ExtractTar(tar.NewReader(rs), dir, false, ra.FileMap); err != nil {
return fmt.Errorf("error extracting ACI: %v", err)
}
}
return nil
}

View File

@@ -1,190 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package lock implements simple locking primitives on a
// regular file or directory using flock
package lock
import (
"errors"
"syscall"
)
var (
ErrLocked = errors.New("file already locked")
ErrNotExist = errors.New("file does not exist")
ErrPermission = errors.New("permission denied")
ErrNotRegular = errors.New("not a regular file")
)
// FileLock represents a lock on a regular file or a directory
type FileLock struct {
path string
fd int
}
type LockType int
const (
Dir LockType = iota
RegFile
)
// TryExclusiveLock takes an exclusive lock without blocking.
// This is idempotent when the Lock already represents an exclusive lock,
// and tries promote a shared lock to exclusive atomically.
// It will return ErrLocked if any lock is already held.
func (l *FileLock) TryExclusiveLock() error {
err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB)
if err == syscall.EWOULDBLOCK {
err = ErrLocked
}
return err
}
// TryExclusiveLock takes an exclusive lock on a file/directory without blocking.
// It will return ErrLocked if any lock is already held on the file/directory.
func TryExclusiveLock(path string, lockType LockType) (*FileLock, error) {
l, err := NewLock(path, lockType)
if err != nil {
return nil, err
}
err = l.TryExclusiveLock()
if err != nil {
return nil, err
}
return l, err
}
// ExclusiveLock takes an exclusive lock.
// This is idempotent when the Lock already represents an exclusive lock,
// and promotes a shared lock to exclusive atomically.
// It will block if an exclusive lock is already held.
func (l *FileLock) ExclusiveLock() error {
return syscall.Flock(l.fd, syscall.LOCK_EX)
}
// ExclusiveLock takes an exclusive lock on a file/directory.
// It will block if an exclusive lock is already held on the file/directory.
func ExclusiveLock(path string, lockType LockType) (*FileLock, error) {
l, err := NewLock(path, lockType)
if err == nil {
err = l.ExclusiveLock()
}
if err != nil {
return nil, err
}
return l, nil
}
// TrySharedLock takes a co-operative (shared) lock without blocking.
// This is idempotent when the Lock already represents a shared lock,
// and tries demote an exclusive lock to shared atomically.
// It will return ErrLocked if an exclusive lock already exists.
func (l *FileLock) TrySharedLock() error {
err := syscall.Flock(l.fd, syscall.LOCK_SH|syscall.LOCK_NB)
if err == syscall.EWOULDBLOCK {
err = ErrLocked
}
return err
}
// TrySharedLock takes a co-operative (shared) lock on a file/directory without blocking.
// It will return ErrLocked if an exclusive lock already exists on the file/directory.
func TrySharedLock(path string, lockType LockType) (*FileLock, error) {
l, err := NewLock(path, lockType)
if err != nil {
return nil, err
}
err = l.TrySharedLock()
if err != nil {
return nil, err
}
return l, nil
}
// SharedLock takes a co-operative (shared) lock on.
// This is idempotent when the Lock already represents a shared lock,
// and demotes an exclusive lock to shared atomically.
// It will block if an exclusive lock is already held.
func (l *FileLock) SharedLock() error {
return syscall.Flock(l.fd, syscall.LOCK_SH)
}
// SharedLock takes a co-operative (shared) lock on a file/directory.
// It will block if an exclusive lock is already held on the file/directory.
func SharedLock(path string, lockType LockType) (*FileLock, error) {
l, err := NewLock(path, lockType)
if err != nil {
return nil, err
}
err = l.SharedLock()
if err != nil {
return nil, err
}
return l, nil
}
// Unlock unlocks the lock
func (l *FileLock) Unlock() error {
return syscall.Flock(l.fd, syscall.LOCK_UN)
}
// Fd returns the lock's file descriptor, or an error if the lock is closed
func (l *FileLock) Fd() (int, error) {
var err error
if l.fd == -1 {
err = errors.New("lock closed")
}
return l.fd, err
}
// Close closes the lock which implicitly unlocks it as well
func (l *FileLock) Close() error {
fd := l.fd
l.fd = -1
return syscall.Close(fd)
}
// NewLock opens a new lock on a file without acquisition
func NewLock(path string, lockType LockType) (*FileLock, error) {
l := &FileLock{path: path, fd: -1}
mode := syscall.O_RDONLY | syscall.O_CLOEXEC
if lockType == Dir {
mode |= syscall.O_DIRECTORY
}
lfd, err := syscall.Open(l.path, mode, 0)
if err != nil {
if err == syscall.ENOENT {
err = ErrNotExist
} else if err == syscall.EACCES {
err = ErrPermission
}
return nil, err
}
l.fd = lfd
var stat syscall.Stat_t
err = syscall.Fstat(lfd, &stat)
if err != nil {
return nil, err
}
// Check if the file is a regular file
if lockType == RegFile && !(stat.Mode&syscall.S_IFMT == syscall.S_IFREG) {
return nil, ErrNotRegular
}
return l, nil
}

View File

@@ -1,156 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import (
"io/ioutil"
"os"
"testing"
)
func TestNewLock(t *testing.T) {
f, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("error creating tmpfile: %v", err)
}
defer os.Remove(f.Name())
f.Close()
l, err := NewLock(f.Name(), RegFile)
if err != nil {
t.Fatalf("error creating NewFileLock: %v", err)
}
l.Close()
d, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("error creating tmpdir: %v", err)
}
defer os.Remove(d)
l, err = NewLock(d, Dir)
if err != nil {
t.Fatalf("error creating NewLock: %v", err)
}
err = l.Close()
if err != nil {
t.Fatalf("error unlocking lock: %v", err)
}
if err = os.Remove(d); err != nil {
t.Fatalf("error removing tmpdir: %v", err)
}
l, err = NewLock(d, Dir)
if err == nil {
t.Fatalf("expected error creating lock on nonexistent path")
}
}
func TestExclusiveLock(t *testing.T) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("error creating tmpdir: %v", err)
}
defer os.Remove(dir)
// Set up the initial exclusive lock
l, err := ExclusiveLock(dir, Dir)
if err != nil {
t.Fatalf("error creating lock: %v", err)
}
// reacquire the exclusive lock using the receiver interface
err = l.TryExclusiveLock()
if err != nil {
t.Fatalf("error reacquiring exclusive lock: %v", err)
}
// Now try another exclusive lock, should fail
_, err = TryExclusiveLock(dir, Dir)
if err == nil {
t.Fatalf("expected err trying exclusive lock")
}
// Unlock the original lock
err = l.Close()
if err != nil {
t.Fatalf("error closing lock: %v", err)
}
// Now another exclusive lock should succeed
_, err = TryExclusiveLock(dir, Dir)
if err != nil {
t.Fatalf("error creating lock: %v", err)
}
}
func TestSharedLock(t *testing.T) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("error creating tmpdir: %v", err)
}
defer os.Remove(dir)
// Set up the initial shared lock
l1, err := SharedLock(dir, Dir)
if err != nil {
t.Fatalf("error creating new shared lock: %v", err)
}
err = l1.TrySharedLock()
if err != nil {
t.Fatalf("error reacquiring shared lock: %v", err)
}
// Subsequent shared locks should succeed
l2, err := TrySharedLock(dir, Dir)
if err != nil {
t.Fatalf("error creating shared lock: %v", err)
}
l3, err := TrySharedLock(dir, Dir)
if err != nil {
t.Fatalf("error creating shared lock: %v", err)
}
// But an exclusive lock should fail
_, err = TryExclusiveLock(dir, Dir)
if err == nil {
t.Fatal("expected exclusive lock to fail")
}
// Close the locks
err = l1.Close()
if err != nil {
t.Fatalf("error closing lock: %v", err)
}
err = l2.Close()
if err != nil {
t.Fatalf("error closing lock: %v", err)
}
// Only unlock one of them
err = l3.Unlock()
if err != nil {
t.Fatalf("error unlocking lock: %v", err)
}
// Now try an exclusive lock, should succeed
_, err = TryExclusiveLock(dir, Dir)
if err != nil {
t.Fatalf("error creating lock: %v", err)
}
}

View File

@@ -1,274 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import (
"fmt"
"os"
"path/filepath"
"syscall"
)
const (
defaultDirPerm os.FileMode = 0660
defaultFilePerm os.FileMode = 0660
defaultLockRetries = 3
)
type keyLockMode uint
const (
keyLockExclusive keyLockMode = 1 << iota
keyLockShared
keyLockNonBlocking
)
// KeyLock is a lock for a specific key. The lock file is created inside a
// directory using the key name.
// This is useful when multiple processes want to take a lock but cannot use
// FileLock as they don't have a well defined file on the filesystem.
// key value must be a valid file name (as the lock file is named after the key
// value).
type KeyLock struct {
lockDir string
key string
// The lock on the key
keyLock *FileLock
}
// NewKeyLock returns a KeyLock for the specified key without acquisition.
// lockdir is the directory where the lock file will be created. If lockdir
// doesn't exists it will be created.
// key value must be a valid file name (as the lock file is named after the key
// value).
func NewKeyLock(lockDir string, key string) (*KeyLock, error) {
err := os.MkdirAll(lockDir, defaultDirPerm)
if err != nil {
return nil, err
}
keyLockFile := filepath.Join(lockDir, key)
// create the file if it doesn't exists
f, err := os.OpenFile(keyLockFile, os.O_RDONLY|os.O_CREATE, defaultFilePerm)
if err != nil {
return nil, fmt.Errorf("error creating key lock file: %v", err)
}
f.Close()
keyLock, err := NewLock(keyLockFile, RegFile)
if err != nil {
return nil, fmt.Errorf("error opening key lock file: %v", err)
}
return &KeyLock{lockDir: lockDir, key: key, keyLock: keyLock}, nil
}
// Close closes the key lock which implicitly unlocks it as well
func (l *KeyLock) Close() {
l.keyLock.Close()
}
// TryExclusiveLock takes an exclusive lock on a key without blocking.
// This is idempotent when the KeyLock already represents an exclusive lock,
// and tries promote a shared lock to exclusive atomically.
// It will return ErrLocked if any lock is already held on the key.
func (l *KeyLock) TryExclusiveKeyLock() error {
return l.lock(keyLockExclusive|keyLockNonBlocking, defaultLockRetries)
}
// TryExclusiveLock takes an exclusive lock on the key without blocking.
// lockDir is the directory where the lock file will be created.
// It will return ErrLocked if any lock is already held.
func TryExclusiveKeyLock(lockDir string, key string) (*KeyLock, error) {
return createAndLock(lockDir, key, keyLockExclusive|keyLockNonBlocking)
}
// ExclusiveLock takes an exclusive lock on a key.
// This is idempotent when the KeyLock already represents an exclusive lock,
// and promotes a shared lock to exclusive atomically.
// It will block if an exclusive lock is already held on the key.
func (l *KeyLock) ExclusiveKeyLock() error {
return l.lock(keyLockExclusive, defaultLockRetries)
}
// ExclusiveLock takes an exclusive lock on a key.
// lockDir is the directory where the lock file will be created.
// It will block if an exclusive lock is already held on the key.
func ExclusiveKeyLock(lockDir string, key string) (*KeyLock, error) {
return createAndLock(lockDir, key, keyLockExclusive)
}
// TrySharedLock takes a co-operative (shared) lock on the key without blocking.
// This is idempotent when the KeyLock already represents a shared lock,
// and tries demote an exclusive lock to shared atomically.
// It will return ErrLocked if an exclusive lock already exists on the key.
func (l *KeyLock) TrySharedKeyLock() error {
return l.lock(keyLockShared|keyLockNonBlocking, defaultLockRetries)
}
// TrySharedLock takes a co-operative (shared) lock on a key without blocking.
// lockDir is the directory where the lock file will be created.
// It will return ErrLocked if an exclusive lock already exists on the key.
func TrySharedKeyLock(lockDir string, key string) (*KeyLock, error) {
return createAndLock(lockDir, key, keyLockShared|keyLockNonBlocking)
}
// SharedLock takes a co-operative (shared) lock on a key.
// This is idempotent when the KeyLock already represents a shared lock,
// and demotes an exclusive lock to shared atomically.
// It will block if an exclusive lock is already held on the key.
func (l *KeyLock) SharedKeyLock() error {
return l.lock(keyLockShared, defaultLockRetries)
}
// SharedLock takes a co-operative (shared) lock on a key.
// lockDir is the directory where the lock file will be created.
// It will block if an exclusive lock is already held on the key.
func SharedKeyLock(lockDir string, key string) (*KeyLock, error) {
return createAndLock(lockDir, key, keyLockShared)
}
func createAndLock(lockDir string, key string, mode keyLockMode) (*KeyLock, error) {
keyLock, err := NewKeyLock(lockDir, key)
if err != nil {
return nil, err
}
err = keyLock.lock(mode, defaultLockRetries)
if err != nil {
keyLock.Close()
return nil, err
}
return keyLock, nil
}
// lock is the base function to take a lock and handle changed lock files
// As there's the need to remove unused (see CleanKeyLocks) lock files without
// races, a changed file detection is needed.
//
// Without changed file detection this can happen:
//
// Process A takes exclusive lock on file01
// Process B waits for exclusive lock on file01.
// Process A deletes file01 and then releases the lock.
// Process B takes the lock on the removed file01 as it has the fd opened
// Process C comes, creates the file as it doesn't exists, and it also takes an exclusive lock.
// Now B and C thinks to own an exclusive lock.
//
// maxRetries can be passed, useful for testing.
func (l *KeyLock) lock(mode keyLockMode, maxRetries int) error {
retries := 0
for {
var err error
var isExclusive bool
var isNonBlocking bool
if mode&keyLockExclusive != 0 {
isExclusive = true
}
if mode&keyLockNonBlocking != 0 {
isNonBlocking = true
}
switch {
case isExclusive && !isNonBlocking:
err = l.keyLock.ExclusiveLock()
case isExclusive && isNonBlocking:
err = l.keyLock.TryExclusiveLock()
case !isExclusive && !isNonBlocking:
err = l.keyLock.SharedLock()
case !isExclusive && isNonBlocking:
err = l.keyLock.TrySharedLock()
}
if err != nil {
return err
}
// Check that the file referenced by the lock fd is the same as
// the current file on the filesystem
var lockStat, curStat syscall.Stat_t
lfd, err := l.keyLock.Fd()
if err != nil {
return err
}
err = syscall.Fstat(lfd, &lockStat)
if err != nil {
return err
}
keyLockFile := filepath.Join(l.lockDir, l.key)
fd, err := syscall.Open(keyLockFile, syscall.O_RDONLY, 0)
// If there's an error opening the file return an error
if err != nil {
return err
}
if err := syscall.Fstat(fd, &curStat); err != nil {
syscall.Close(fd)
return err
}
syscall.Close(fd)
if lockStat.Ino == curStat.Ino && lockStat.Dev == curStat.Dev {
return nil
}
if retries >= maxRetries {
return fmt.Errorf("cannot acquire lock after %d retries", retries)
}
// If the file has changed discard this lock and try to take another lock.
l.keyLock.Close()
nl, err := NewKeyLock(l.lockDir, l.key)
if err != nil {
return err
}
l.keyLock = nl.keyLock
retries++
}
}
// Unlock unlocks the key lock.
func (l *KeyLock) Unlock() error {
err := l.keyLock.Unlock()
if err != nil {
return err
}
return nil
}
// CleanKeyLocks remove lock files from the lockDir.
// For every key it tries to take an Exclusive lock on it and skip it if it
// fails with ErrLocked
func CleanKeyLocks(lockDir string) error {
f, err := os.Open(lockDir)
if err != nil {
return fmt.Errorf("error opening lockDir: %v", err)
}
defer f.Close()
files, err := f.Readdir(0)
if err != nil {
return fmt.Errorf("error getting lock files list: %v", err)
}
for _, f := range files {
filename := filepath.Join(lockDir, f.Name())
keyLock, err := TryExclusiveKeyLock(lockDir, f.Name())
if err == ErrLocked {
continue
}
if err != nil {
return err
}
err = os.Remove(filename)
if err != nil {
keyLock.Close()
return fmt.Errorf("error removing lock file: %v", err)
}
keyLock.Close()
}
return nil
}

View File

@@ -1,203 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lock
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
)
func TestExclusiveKeyLock(t *testing.T) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("error creating tmpdir: %v", err)
}
defer os.RemoveAll(dir)
l1, err := ExclusiveKeyLock(dir, "key01")
if err != nil {
t.Fatalf("error creating key lock: %v", err)
}
_, err = TryExclusiveKeyLock(dir, "key01")
if err == nil {
t.Fatalf("expected err trying exclusive key lock")
}
l1.Close()
}
func TestCleanKeyLocks(t *testing.T) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("error creating tmpdir: %v", err)
}
defer os.RemoveAll(dir)
l1, err := ExclusiveKeyLock(dir, "key01")
if err != nil {
t.Fatalf("error creating keyLock: %v", err)
}
err = CleanKeyLocks(dir)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
filesnum, err := countFiles(dir)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if filesnum != 1 {
t.Fatalf("expected 1 file in lock dir. found %d files", filesnum)
}
l2, err := SharedKeyLock(dir, "key02")
if err != nil {
t.Fatalf("error creating keyLock: %v", err)
}
l1.Close()
l2.Close()
err = CleanKeyLocks(dir)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
filesnum, err = countFiles(dir)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if filesnum != 0 {
t.Fatalf("expected empty lock dir. found %d files", filesnum)
}
}
func TestFileChangedLock(t *testing.T) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatalf("error creating tmpdir: %v", err)
}
defer os.RemoveAll(dir)
l1, err := ExclusiveKeyLock(dir, "key01")
if err != nil {
t.Fatalf("error creating keyLock: %v", err)
}
l2, err := NewKeyLock(dir, "key01")
if err != nil {
t.Fatalf("error creating keyLock: %v", err)
}
// Simulate that l1 owner removes the actual key1 lock file
err = os.Remove(filepath.Join(dir, "key01"))
if err != nil {
t.Fatalf("error creating NewLock: %v", err)
}
l1.Close()
// Now l2 owner takes a lock, using the fd of the removed file
err = l2.lock(keyLockShared, 0)
if err == nil {
t.Fatalf("expected error")
}
l2.Close()
// Do the same with a new file created after removal
dir, err = ioutil.TempDir("", "")
if err != nil {
t.Fatalf("error creating tmpdir: %v", err)
}
defer os.RemoveAll(dir)
l1, err = ExclusiveKeyLock(dir, "key01")
if err != nil {
t.Fatalf("error creating keyLock: %v", err)
}
l2, err = NewKeyLock(dir, "key01")
if err != nil {
t.Fatalf("error creating keyLock: %v", err)
}
// Simulate that l1 owner removes the actual key1 lock file
err = os.Remove(filepath.Join(dir, "key01"))
if err != nil {
t.Fatalf("error creating NewLock: %v", err)
}
l1.Close()
// Simulate that another user comes and takes a lock, this will create
// a new lock file as it was removed.
l3, err := ExclusiveKeyLock(dir, "key01")
l3.Close()
// Now l2 owner takes a lock, using the fd of the old file
err = l2.lock(keyLockShared, 0)
if err == nil {
t.Fatalf("expected error")
}
// Do the same but with a retry so if should work.
dir, err = ioutil.TempDir("", "")
if err != nil {
t.Fatalf("error creating tmpdir: %v", err)
}
defer os.RemoveAll(dir)
l1, err = ExclusiveKeyLock(dir, "key01")
if err != nil {
t.Fatalf("error creating keyLock: %v", err)
}
l2, err = NewKeyLock(dir, "key01")
if err != nil {
t.Fatalf("error creating keyLock: %v", err)
}
// Simulate that l1 owner removes the actual key1 lock file
err = os.Remove(filepath.Join(dir, "key01"))
if err != nil {
t.Fatalf("error creating NewLock: %v", err)
}
l1.Close()
// Simulate that another user comes and takes a lock, this will create
// a new lock file as it was removed.
l3, err = ExclusiveKeyLock(dir, "key01")
l3.Close()
// Now l2 owner takes a lock, using the fd of the old file
err = l2.lock(keyLockShared, 1)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
func countFiles(dir string) (int, error) {
f, err := os.Open(dir)
if err != nil {
return -1, err
}
defer f.Close()
files, err := f.Readdir(0)
if err != nil {
return -1, err
}
return len(files), nil
}

View File

@@ -1,18 +0,0 @@
package sys
import (
"syscall"
)
// CloseOnExec sets or clears FD_CLOEXEC flag on a file descriptor
func CloseOnExec(fd int, set bool) error {
flag := uintptr(0)
if set {
flag = syscall.FD_CLOEXEC
}
_, _, err := syscall.RawSyscall(syscall.SYS_FCNTL, uintptr(fd), syscall.F_SETFD, flag)
if err != 0 {
return syscall.Errno(err)
}
return nil
}

View File

@@ -1,11 +0,0 @@
package sys
import "syscall"
func Syncfs(fd int) error {
_, _, err := syscall.RawSyscall(SYS_SYNCFS, uintptr(fd), 0, 0)
if err != 0 {
return syscall.Errno(err)
}
return nil
}

View File

@@ -1,5 +0,0 @@
package sys
const (
SYS_SYNCFS = 344
)

View File

@@ -1,5 +0,0 @@
package sys
const (
SYS_SYNCFS = 306
)

View File

@@ -1,5 +0,0 @@
package sys
const (
SYS_SYNCFS = 373
)

View File

@@ -1,251 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package tar contains helper functions for working with tar files
package tar
import (
"archive/tar"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"syscall"
"time"
)
const DEFAULT_DIR_MODE os.FileMode = 0755
type insecureLinkError error
var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
// Map of paths that should be whitelisted. The paths should be relative to the
// root of the tar file and should be cleaned (for example using filepath.Clean)
type PathWhitelistMap map[string]struct{}
// ExtractTar extracts a tarball (from a tar.Reader) into the given directory
// if pwl is not nil, only the paths in the map are extracted.
// If overwrite is true, existing files will be overwritten.
func ExtractTar(tr *tar.Reader, dir string, overwrite bool, pwl PathWhitelistMap) error {
um := syscall.Umask(0)
defer syscall.Umask(um)
dirhdrs := []*tar.Header{}
Tar:
for {
hdr, err := tr.Next()
switch err {
case io.EOF:
break Tar
case nil:
if pwl != nil {
relpath := filepath.Clean(hdr.Name)
if _, ok := pwl[relpath]; !ok {
continue
}
}
err = ExtractFile(tr, hdr, dir, overwrite)
if err != nil {
return fmt.Errorf("error extracting tarball: %v", err)
}
if hdr.Typeflag == tar.TypeDir {
dirhdrs = append(dirhdrs, hdr)
}
default:
return fmt.Errorf("error extracting tarball: %v", err)
}
}
// Restore dirs atime and mtime. This has to be done after extracting
// as a file extraction will change its parent directory's times.
for _, hdr := range dirhdrs {
p := filepath.Join(dir, hdr.Name)
if err := syscall.UtimesNano(p, HdrToTimespec(hdr)); err != nil {
return err
}
}
return nil
}
// ExtractFile extracts the file described by hdr from the given tarball into
// the provided directory.
// If overwrite is true, existing files will be overwritten.
func ExtractFile(tr *tar.Reader, hdr *tar.Header, dir string, overwrite bool) error {
p := filepath.Join(dir, hdr.Name)
fi := hdr.FileInfo()
typ := hdr.Typeflag
if overwrite {
info, err := os.Lstat(p)
switch {
case os.IsNotExist(err):
case err == nil:
// If the old and new paths are both dirs do nothing or
// RemoveAll will remove all dir's contents
if !info.IsDir() || typ != tar.TypeDir {
err := os.RemoveAll(p)
if err != nil {
return err
}
}
default:
return err
}
}
// Create parent dir if it doesn't exist
if err := os.MkdirAll(filepath.Dir(p), DEFAULT_DIR_MODE); err != nil {
return err
}
switch {
case typ == tar.TypeReg || typ == tar.TypeRegA:
f, err := os.OpenFile(p, os.O_CREATE|os.O_RDWR, fi.Mode())
if err != nil {
return err
}
_, err = io.Copy(f, tr)
if err != nil {
f.Close()
return err
}
f.Close()
case typ == tar.TypeDir:
if err := os.MkdirAll(p, fi.Mode()); err != nil {
return err
}
dir, err := os.Open(p)
if err != nil {
return err
}
if err := dir.Chmod(fi.Mode()); err != nil {
dir.Close()
return err
}
dir.Close()
case typ == tar.TypeLink:
dest := filepath.Join(dir, hdr.Linkname)
if !strings.HasPrefix(dest, dir) {
return insecureLinkError(fmt.Errorf("insecure link %q -> %q", p, hdr.Linkname))
}
if err := os.Link(dest, p); err != nil {
return err
}
case typ == tar.TypeSymlink:
dest := filepath.Join(filepath.Dir(p), hdr.Linkname)
if !strings.HasPrefix(dest, dir) {
return insecureLinkError(fmt.Errorf("insecure symlink %q -> %q", p, hdr.Linkname))
}
if err := os.Symlink(hdr.Linkname, p); err != nil {
return err
}
case typ == tar.TypeChar:
dev := makedev(int(hdr.Devmajor), int(hdr.Devminor))
mode := uint32(fi.Mode()) | syscall.S_IFCHR
if err := syscall.Mknod(p, mode, dev); err != nil {
return err
}
case typ == tar.TypeBlock:
dev := makedev(int(hdr.Devmajor), int(hdr.Devminor))
mode := uint32(fi.Mode()) | syscall.S_IFBLK
if err := syscall.Mknod(p, mode, dev); err != nil {
return err
}
case typ == tar.TypeFifo:
if err := syscall.Mkfifo(p, uint32(fi.Mode())); err != nil {
return err
}
// TODO(jonboulle): implement other modes
default:
return fmt.Errorf("unsupported type: %v", typ)
}
if err := os.Lchown(p, hdr.Uid, hdr.Gid); err != nil {
return err
}
// lchown(2) says that, depending on the linux kernel version, it
// can change the file's mode also if executed as root. So call
// os.Chmod after it.
if typ != tar.TypeSymlink {
if err := os.Chmod(p, fi.Mode()); err != nil {
return err
}
}
// Restore entry atime and mtime.
// Use special function LUtimesNano not available on go's syscall package because we
// have to restore symlink's times and not the referenced file times.
ts := HdrToTimespec(hdr)
if hdr.Typeflag != tar.TypeSymlink {
if err := syscall.UtimesNano(p, ts); err != nil {
return err
}
} else {
if err := LUtimesNano(p, ts); err != nil && err != ErrNotSupportedPlatform {
return err
}
}
return nil
}
// ExtractFileFromTar extracts a regular file from the given tar, returning its
// contents as a byte slice
func ExtractFileFromTar(tr *tar.Reader, file string) ([]byte, error) {
for {
hdr, err := tr.Next()
switch err {
case io.EOF:
return nil, fmt.Errorf("file not found")
case nil:
if filepath.Clean(hdr.Name) != filepath.Clean(file) {
continue
}
switch hdr.Typeflag {
case tar.TypeReg:
case tar.TypeRegA:
default:
return nil, fmt.Errorf("requested file not a regular file")
}
buf, err := ioutil.ReadAll(tr)
if err != nil {
return nil, fmt.Errorf("error extracting tarball: %v", err)
}
return buf, nil
default:
return nil, fmt.Errorf("error extracting tarball: %v", err)
}
}
}
// makedev mimics glib's gnu_dev_makedev
func makedev(major, minor int) int {
return (minor & 0xff) | (major & 0xfff << 8) | int((uint64(minor & ^0xff) << 12)) | int(uint64(major & ^0xfff)<<32)
}
func HdrToTimespec(hdr *tar.Header) []syscall.Timespec {
return []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
}
// TODO(sgotti) use UTIMES_OMIT on linux if Time.IsZero ?
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View File

@@ -1,732 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tar
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
"time"
)
type testTarEntry struct {
header *tar.Header
contents string
}
func newTestTar(entries []*testTarEntry) (string, error) {
t, err := ioutil.TempFile("", "test-tar")
if err != nil {
return "", err
}
defer t.Close()
tw := tar.NewWriter(t)
for _, entry := range entries {
// Add default mode
if entry.header.Mode == 0 {
if entry.header.Typeflag == tar.TypeDir {
entry.header.Mode = 0755
} else {
entry.header.Mode = 0644
}
}
// Add calling user uid and gid or tests will fail
entry.header.Uid = os.Getuid()
entry.header.Gid = os.Getgid()
if err := tw.WriteHeader(entry.header); err != nil {
return "", err
}
if _, err := io.WriteString(tw, entry.contents); err != nil {
return "", err
}
}
if err := tw.Close(); err != nil {
return "", err
}
return t.Name(), nil
}
type fileInfo struct {
path string
typeflag byte
size int64
contents string
mode os.FileMode
}
func fileInfoSliceToMap(slice []*fileInfo) map[string]*fileInfo {
fim := make(map[string]*fileInfo, len(slice))
for _, fi := range slice {
fim[fi.path] = fi
}
return fim
}
func checkExpectedFiles(dir string, expectedFiles map[string]*fileInfo) error {
files := make(map[string]*fileInfo)
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
fm := info.Mode()
if path == dir {
return nil
}
relpath, err := filepath.Rel(dir, path)
if err != nil {
return err
}
switch {
case fm.IsRegular():
files[relpath] = &fileInfo{path: relpath, typeflag: tar.TypeReg, size: info.Size(), mode: info.Mode().Perm()}
case info.IsDir():
files[relpath] = &fileInfo{path: relpath, typeflag: tar.TypeDir, mode: info.Mode().Perm()}
case fm&os.ModeSymlink != 0:
files[relpath] = &fileInfo{path: relpath, typeflag: tar.TypeSymlink, mode: info.Mode()}
default:
return fmt.Errorf("file mode not handled: %v", fm)
}
return nil
})
if err != nil {
return err
}
// Set defaults for not specified expected file mode
for _, ef := range expectedFiles {
if ef.mode == 0 {
if ef.typeflag == tar.TypeDir {
ef.mode = 0755
} else {
ef.mode = 0644
}
}
}
for _, ef := range expectedFiles {
_, ok := files[ef.path]
if !ok {
return fmt.Errorf("Expected file %q not in files", ef.path)
}
}
for _, file := range files {
ef, ok := expectedFiles[file.path]
if !ok {
return fmt.Errorf("file %q not in expectedFiles", file.path)
}
if ef.typeflag != file.typeflag {
return fmt.Errorf("file %q: file type differs: wanted: %d, got: %d", file.path, ef.typeflag, file.typeflag)
}
if ef.typeflag == tar.TypeReg {
if ef.size != file.size {
return fmt.Errorf("file %q: size differs: wanted %d, wanted: %d", file.path, ef.size, file.size)
}
if ef.contents != "" {
buf, err := ioutil.ReadFile(filepath.Join(dir, file.path))
if err != nil {
return fmt.Errorf("unexpected error: %v", err)
}
if string(buf) != ef.contents {
return fmt.Errorf("unexpected contents, wanted: %s, got: %s", ef.contents, buf)
}
}
}
// Check modes but ignore symlinks
if ef.mode != file.mode && ef.typeflag != tar.TypeSymlink {
return fmt.Errorf("file %q: mode differs: wanted %#o, got: %#o", file.path, ef.mode, file.mode)
}
}
return nil
}
func TestExtractTarInsecureSymlink(t *testing.T) {
entries := []*testTarEntry{
{
contents: "hello",
header: &tar.Header{
Name: "hello.txt",
Size: 5,
},
},
{
header: &tar.Header{
Name: "link.txt",
Linkname: "hello.txt",
Typeflag: tar.TypeSymlink,
},
},
}
insecureSymlinkEntries := append(entries, &testTarEntry{
header: &tar.Header{
Name: "../etc/secret.conf",
Linkname: "secret.conf",
Typeflag: tar.TypeSymlink,
},
})
insecureHardlinkEntries := append(entries, &testTarEntry{
header: &tar.Header{
Name: "../etc/secret.conf",
Linkname: "secret.conf",
Typeflag: tar.TypeLink,
},
})
for _, entries := range [][]*testTarEntry{insecureSymlinkEntries, insecureHardlinkEntries} {
testTarPath, err := newTestTar(entries)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer os.Remove(testTarPath)
containerTar, err := os.Open(testTarPath)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
tr := tar.NewReader(containerTar)
tmpdir, err := ioutil.TempDir("", "rkt-temp-dir")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
os.RemoveAll(tmpdir)
err = os.MkdirAll(tmpdir, 0755)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer os.RemoveAll(tmpdir)
err = ExtractTar(tr, tmpdir, false, nil)
if _, ok := err.(insecureLinkError); !ok {
t.Errorf("expected insecureSymlinkError error")
}
}
}
func TestExtractTarFolders(t *testing.T) {
entries := []*testTarEntry{
{
contents: "foo",
header: &tar.Header{
Name: "deep/folder/foo.txt",
Size: 3,
},
},
{
header: &tar.Header{
Name: "deep/folder/",
Typeflag: tar.TypeDir,
Mode: int64(0747),
},
},
{
contents: "bar",
header: &tar.Header{
Name: "deep/folder/bar.txt",
Size: 3,
},
},
{
header: &tar.Header{
Name: "deep/folder2/symlink.txt",
Typeflag: tar.TypeSymlink,
Linkname: "deep/folder/foo.txt",
},
},
{
header: &tar.Header{
Name: "deep/folder2/",
Typeflag: tar.TypeDir,
Mode: int64(0747),
},
},
{
contents: "bar",
header: &tar.Header{
Name: "deep/folder2/bar.txt",
Size: 3,
},
},
{
header: &tar.Header{
Name: "deep/deep/folder",
Typeflag: tar.TypeDir,
Mode: int64(0755),
},
},
{
header: &tar.Header{
Name: "deep/deep/",
Typeflag: tar.TypeDir,
Mode: int64(0747),
},
},
}
testTarPath, err := newTestTar(entries)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer os.Remove(testTarPath)
containerTar, err := os.Open(testTarPath)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
tr := tar.NewReader(containerTar)
tmpdir, err := ioutil.TempDir("", "rkt-temp-dir")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
os.RemoveAll(tmpdir)
err = os.MkdirAll(tmpdir, 0755)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer os.RemoveAll(tmpdir)
err = ExtractTar(tr, tmpdir, false, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
matches, err := filepath.Glob(filepath.Join(tmpdir, "deep/folder/*.txt"))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(matches) != 2 {
t.Errorf("unexpected number of files found: %d, wanted 2", len(matches))
}
matches, err = filepath.Glob(filepath.Join(tmpdir, "deep/folder2/*.txt"))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(matches) != 2 {
t.Errorf("unexpected number of files found: %d, wanted 2", len(matches))
}
dirInfo, err := os.Lstat(filepath.Join(tmpdir, "deep/folder"))
if err != nil {
t.Errorf("unexpected error: %v", err)
} else if dirInfo.Mode().Perm() != os.FileMode(0747) {
t.Errorf("unexpected dir mode: %s", dirInfo.Mode())
}
dirInfo, err = os.Lstat(filepath.Join(tmpdir, "deep/deep"))
if err != nil {
t.Errorf("unexpected error: %v", err)
} else if dirInfo.Mode().Perm() != os.FileMode(0747) {
t.Errorf("unexpected dir mode: %s", dirInfo.Mode())
}
}
func TestExtractTarFileToBuf(t *testing.T) {
entries := []*testTarEntry{
{
header: &tar.Header{
Name: "folder/",
Typeflag: tar.TypeDir,
Mode: int64(0747),
},
},
{
contents: "foo",
header: &tar.Header{
Name: "folder/foo.txt",
Size: 3,
},
},
{
contents: "bar",
header: &tar.Header{
Name: "folder/bar.txt",
Size: 3,
},
},
{
header: &tar.Header{
Name: "folder/symlink.txt",
Typeflag: tar.TypeSymlink,
Linkname: "folder/foo.txt",
},
},
}
testTarPath, err := newTestTar(entries)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer os.Remove(testTarPath)
containerTar, err := os.Open(testTarPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
tr := tar.NewReader(containerTar)
buf, err := ExtractFileFromTar(tr, "folder/foo.txt")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if string(buf) != "foo" {
t.Errorf("unexpected contents, wanted: %s, got: %s", "foo", buf)
}
containerTar.Close()
containerTar, err = os.Open(testTarPath)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
tr = tar.NewReader(containerTar)
buf, err = ExtractFileFromTar(tr, "folder/symlink.txt")
if err == nil {
t.Errorf("expected error")
}
containerTar.Close()
}
func TestExtractTarPWL(t *testing.T) {
entries := []*testTarEntry{
{
header: &tar.Header{
Name: "folder/",
Typeflag: tar.TypeDir,
Mode: int64(0747),
},
},
{
contents: "foo",
header: &tar.Header{
Name: "folder/foo.txt",
Size: 3,
},
},
{
contents: "bar",
header: &tar.Header{
Name: "folder/bar.txt",
Size: 3,
},
},
{
header: &tar.Header{
Name: "folder/symlink.txt",
Typeflag: tar.TypeSymlink,
Linkname: "folder/foo.txt",
},
},
}
testTarPath, err := newTestTar(entries)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer os.Remove(testTarPath)
containerTar, err := os.Open(testTarPath)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
tr := tar.NewReader(containerTar)
tmpdir, err := ioutil.TempDir("", "rkt-temp-dir")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer os.RemoveAll(tmpdir)
pwl := make(PathWhitelistMap)
pwl["folder/foo.txt"] = struct{}{}
err = ExtractTar(tr, tmpdir, false, pwl)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
matches, err := filepath.Glob(filepath.Join(tmpdir, "folder/*.txt"))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(matches) != 1 {
t.Errorf("unexpected number of files found: %d, wanted 1", len(matches))
}
}
func TestExtractTarOverwrite(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "rkt-temp-dir")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer os.RemoveAll(tmpdir)
entries := []*testTarEntry{
{
contents: "hello",
header: &tar.Header{
Name: "hello.txt",
Size: 5,
},
},
{
header: &tar.Header{
Name: "afolder",
Typeflag: tar.TypeDir,
},
},
{
contents: "hello",
header: &tar.Header{
Name: "afolder/hello.txt",
Size: 5,
},
},
{
contents: "hello",
header: &tar.Header{
Name: "afile",
Size: 5,
},
},
{
header: &tar.Header{
Name: "folder01",
Typeflag: tar.TypeDir,
},
},
{
contents: "hello",
header: &tar.Header{
Name: "folder01/file01",
Size: 5,
},
},
{
contents: "hello",
header: &tar.Header{
Name: "filesymlinked",
Size: 5,
},
},
{
header: &tar.Header{
Name: "linktofile",
Linkname: "filesymlinked",
Typeflag: tar.TypeSymlink,
},
},
{
header: &tar.Header{
Name: "dirsymlinked",
Typeflag: tar.TypeDir,
},
},
{
header: &tar.Header{
Name: "linktodir",
Linkname: "dirsymlinked",
Typeflag: tar.TypeSymlink,
},
},
}
testTarPath, err := newTestTar(entries)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
defer os.Remove(testTarPath)
containerTar, err := os.Open(testTarPath)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
tr := tar.NewReader(containerTar)
err = ExtractTar(tr, tmpdir, false, nil)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Now overwrite:
// a file with a new file
// a dir with a file
entries = []*testTarEntry{
{
contents: "newhello",
header: &tar.Header{
Name: "hello.txt",
Size: 8,
},
},
// Now this is a file
{
contents: "nowafile",
header: &tar.Header{
Name: "afolder",
Typeflag: tar.TypeReg,
Size: 8,
},
},
// Now this is a dir
{
header: &tar.Header{
Name: "afile",
Typeflag: tar.TypeDir,
},
},
// Overwrite symlink to a file with a regular file
// the linked file shouldn't be removed
{
contents: "filereplacingsymlink",
header: &tar.Header{
Name: "linktofile",
Typeflag: tar.TypeReg,
Size: 20,
},
},
// Overwrite symlink to a dir with a regular file
// the linked directory and all its contents shouldn't be
// removed
{
contents: "filereplacingsymlink",
header: &tar.Header{
Name: "linktodir",
Typeflag: tar.TypeReg,
Size: 20,
},
},
// folder01 already exists and shouldn't be removed (keeping folder01/file01)
{
header: &tar.Header{
Name: "folder01",
Typeflag: tar.TypeDir,
Mode: int64(0755),
},
},
{
contents: "hello",
header: &tar.Header{
Name: "folder01/file02",
Size: 5,
Mode: int64(0644),
},
},
}
testTarPath, err = newTestTar(entries)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer os.Remove(testTarPath)
containerTar, err = os.Open(testTarPath)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
tr = tar.NewReader(containerTar)
err = ExtractTar(tr, tmpdir, true, nil)
expectedFiles := []*fileInfo{
&fileInfo{path: "hello.txt", typeflag: tar.TypeReg, size: 8, contents: "newhello"},
&fileInfo{path: "linktofile", typeflag: tar.TypeReg, size: 20},
&fileInfo{path: "linktodir", typeflag: tar.TypeReg, size: 20},
&fileInfo{path: "afolder", typeflag: tar.TypeReg, size: 8},
&fileInfo{path: "dirsymlinked", typeflag: tar.TypeDir},
&fileInfo{path: "afile", typeflag: tar.TypeDir},
&fileInfo{path: "filesymlinked", typeflag: tar.TypeReg, size: 5},
&fileInfo{path: "folder01", typeflag: tar.TypeDir},
&fileInfo{path: "folder01/file01", typeflag: tar.TypeReg, size: 5},
&fileInfo{path: "folder01/file02", typeflag: tar.TypeReg, size: 5},
}
err = checkExpectedFiles(tmpdir, fileInfoSliceToMap(expectedFiles))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestExtractTarTimes(t *testing.T) {
// Do not set ns as tar has second precision
time1 := time.Unix(100000, 0)
time2 := time.Unix(200000, 0)
time3 := time.Unix(300000, 0)
entries := []*testTarEntry{
{
header: &tar.Header{
Name: "folder/",
Typeflag: tar.TypeDir,
Mode: int64(0747),
ModTime: time1,
},
},
{
contents: "foo",
header: &tar.Header{
Name: "folder/foo.txt",
Size: 3,
ModTime: time2,
},
},
{
header: &tar.Header{
Name: "folder/symlink.txt",
Typeflag: tar.TypeSymlink,
Linkname: "folder/foo.txt",
ModTime: time3,
},
},
}
testTarPath, err := newTestTar(entries)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer os.Remove(testTarPath)
containerTar, err := os.Open(testTarPath)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
tr := tar.NewReader(containerTar)
tmpdir, err := ioutil.TempDir("", "rkt-temp-dir")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
os.RemoveAll(tmpdir)
err = os.MkdirAll(tmpdir, 0755)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
err = ExtractTar(tr, tmpdir, false, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
err = checkTime(filepath.Join(tmpdir, "folder/"), time1)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
err = checkTime(filepath.Join(tmpdir, "folder/foo.txt"), time2)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
//Check only (by now) on linux
if runtime.GOOS == "linux" {
err = checkTime(filepath.Join(tmpdir, "folder/symlink.txt"), time3)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
}
func checkTime(path string, time time.Time) error {
info, err := os.Lstat(path)
if err != nil {
return err
}
if info.ModTime() != time {
return fmt.Errorf("%s: info.ModTime: %s, different from expected time: %s", path, info.ModTime(), time)
}
return nil
}

View File

@@ -1,43 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// These functions are from github.com/docker/docker/pkg/system
// TODO(sgotti) waiting for a utimensat functions accepting flags and a
// LUtimesNano using it in https://github.com/golang/sys/
package tar
import (
"syscall"
"unsafe"
)
func LUtimesNano(path string, ts []syscall.Timespec) error {
// These are not currently available in syscall
AT_FDCWD := -100
AT_SYMLINK_NOFOLLOW := 0x100
var _path *byte
_path, err := syscall.BytePtrFromString(path)
if err != nil {
return err
}
if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS {
return err
}
return nil
}

View File

@@ -1,25 +0,0 @@
// +build !linux
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// These functions are from github.com/docker/docker/pkg/system
package tar
import "syscall"
func LUtimesNano(path string, ts []syscall.Timespec) error {
return ErrNotSupportedPlatform
}

View File

@@ -1,87 +0,0 @@
package store
import (
"database/sql"
"time"
)
// ACIInfo is used to store information about an ACI.
type ACIInfo struct {
// BlobKey is the key in the blob/imageManifest store of the related
// ACI file and is the db primary key.
BlobKey string
// AppName is the app name provided by the ACI.
AppName string
// ImportTime is the time this ACI was imported in the store.
ImportTime time.Time
// Latest defines if the ACI was imported using the latest pattern (no
// version label was provided on ACI discovery)
Latest bool
}
func NewACIInfo(blobKey string, latest bool, t time.Time) *ACIInfo {
return &ACIInfo{
BlobKey: blobKey,
Latest: latest,
ImportTime: t,
}
}
// GetAciInfosWithKeyPrefix returns all the ACIInfos with a blobkey starting with the given prefix.
func GetACIInfosWithKeyPrefix(tx *sql.Tx, prefix string) ([]*ACIInfo, error) {
aciinfos := []*ACIInfo{}
rows, err := tx.Query("SELECT * from aciinfo WHERE hasPrefix(blobkey, $1)", prefix)
if err != nil {
return nil, err
}
for rows.Next() {
aciinfo := &ACIInfo{}
if err := rows.Scan(&aciinfo.BlobKey, &aciinfo.AppName, &aciinfo.ImportTime, &aciinfo.Latest); err != nil {
return nil, err
}
aciinfos = append(aciinfos, aciinfo)
}
if err := rows.Err(); err != nil {
return nil, err
}
return aciinfos, err
}
// GetAciInfosWithAppName returns all the ACIInfos for a given appname. found will be
// false if no aciinfo exists.
func GetACIInfosWithAppName(tx *sql.Tx, appname string) ([]*ACIInfo, bool, error) {
aciinfos := []*ACIInfo{}
found := false
rows, err := tx.Query("SELECT * from aciinfo WHERE appname == $1", appname)
if err != nil {
return nil, false, err
}
for rows.Next() {
found = true
aciinfo := &ACIInfo{}
if err := rows.Scan(&aciinfo.BlobKey, &aciinfo.AppName, &aciinfo.ImportTime, &aciinfo.Latest); err != nil {
return nil, false, err
}
aciinfos = append(aciinfos, aciinfo)
}
if err := rows.Err(); err != nil {
return nil, false, err
}
return aciinfos, found, err
}
// WriteACIInfo adds or updates the provided aciinfo.
func WriteACIInfo(tx *sql.Tx, aciinfo *ACIInfo) error {
// ql doesn't have an INSERT OR UPDATE function so
// it's faster to remove and reinsert the row
_, err := tx.Exec("DELETE from aciinfo where blobkey == $1", aciinfo.BlobKey)
if err != nil {
return err
}
_, err = tx.Exec("INSERT into aciinfo values ($1, $2, $3, $4)", aciinfo.BlobKey, aciinfo.AppName, aciinfo.ImportTime, aciinfo.Latest)
if err != nil {
return err
}
return nil
}

View File

@@ -1,79 +0,0 @@
package store
import (
"database/sql"
"io/ioutil"
"os"
"testing"
)
func TestWriteACIInfo(t *testing.T) {
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
t.Fatalf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
s, err := NewStore(dir)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err = s.db.Do(func(tx *sql.Tx) error {
aciinfo := &ACIInfo{
BlobKey: "key01",
AppName: "name01",
}
if err := WriteACIInfo(tx, aciinfo); err != nil {
return err
}
// Insert it another time to check that is should be overwritten
if err := WriteACIInfo(tx, aciinfo); err != nil {
return err
}
return nil
}); err != nil {
t.Fatalf("unexpected error: %v", err)
}
aciinfos := []*ACIInfo{}
ok := false
if err = s.db.Do(func(tx *sql.Tx) error {
aciinfos, ok, err = GetACIInfosWithAppName(tx, "name01")
return err
}); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !ok {
t.Fatalf("expected some records but none found")
}
if len(aciinfos) != 1 {
t.Fatalf("wrong number of records returned, wanted: 1, got: %d", len(aciinfos))
}
// Add another ACIInfo for the same app name
if err = s.db.Do(func(tx *sql.Tx) error {
aciinfo := &ACIInfo{
BlobKey: "key02",
AppName: "name01",
}
if err := WriteACIInfo(tx, aciinfo); err != nil {
return err
}
return nil
}); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err = s.db.Do(func(tx *sql.Tx) error {
aciinfos, ok, err = GetACIInfosWithAppName(tx, "name01")
return err
}); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if !ok {
t.Fatalf("expected some records but none found")
}
if len(aciinfos) != 2 {
t.Fatalf("wrong number of records returned, wanted: 2, got: %d", len(aciinfos))
}
}

View File

@@ -1,114 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"database/sql"
"fmt"
"os"
"path/filepath"
"github.com/coreos/rkt/pkg/lock"
_ "github.com/cznic/ql/driver"
)
const (
DbFilename = "ql.db"
)
type DB struct {
dbdir string
lock *lock.FileLock
sqldb *sql.DB
}
func NewDB(dbdir string) (*DB, error) {
if err := os.MkdirAll(dbdir, defaultPathPerm); err != nil {
return nil, err
}
return &DB{dbdir: dbdir}, nil
}
func (db *DB) Open() error {
// take a lock on db dir
if db.lock != nil {
panic("cas db lock already gained")
}
dl, err := lock.ExclusiveLock(db.dbdir, lock.Dir)
if err != nil {
return err
}
db.lock = dl
sqldb, err := sql.Open("ql", filepath.Join(db.dbdir, DbFilename))
if err != nil {
dl.Close()
return err
}
db.sqldb = sqldb
return nil
}
func (db *DB) Close() error {
if db.lock == nil {
panic("cas db, Close called without lock")
}
if db.sqldb == nil {
panic("cas db, Close called without an open sqldb")
}
if err := db.sqldb.Close(); err != nil {
return fmt.Errorf("cas db close failed: %v", err)
}
db.sqldb = nil
db.lock.Close()
db.lock = nil
return nil
}
func (db *DB) Begin() (*sql.Tx, error) {
return db.sqldb.Begin()
}
type txfunc func(*sql.Tx) error
// Do Opens the db, executes DoTx and then Closes the DB
func (db *DB) Do(fns ...txfunc) error {
err := db.Open()
if err != nil {
return err
}
defer db.Close()
return db.DoTx(fns...)
}
// DoTx executes the provided txfuncs inside a unique transaction.
// If one of the functions returns an error the whole transaction is rolled back.
func (db *DB) DoTx(fns ...txfunc) error {
tx, err := db.Begin()
if err != nil {
return err
}
for _, fn := range fns {
if err := fn(tx); err != nil {
tx.Rollback()
return err
}
}
tx.Commit()
return nil
}

View File

@@ -1,59 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"database/sql"
"fmt"
)
type migrateFunc func(*sql.Tx) error
var (
// migrateTable is a map of migrate functions. The key is the db
// version to migrate to.
migrateTable = map[int]migrateFunc{
1: migrateToV1,
}
)
func migrate(tx *sql.Tx, finalVersion int) error {
if finalVersion > dbVersion {
return fmt.Errorf("required migrate final version greater than the last supported db version")
}
version, err := getDBVersion(tx)
if err != nil {
return err
}
for v := version + 1; v <= finalVersion; v++ {
f, ok := migrateTable[v]
if !ok {
return fmt.Errorf("missing migrate function for version %d", v)
}
err := f(tx)
if err == nil {
updateDBVersion(tx, v)
}
if err != nil {
return fmt.Errorf("failed to migrate db to version %d: %v", v, err)
}
}
return nil
}
func migrateToV1(tx *sql.Tx) error {
return nil
}

View File

@@ -1,396 +0,0 @@
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"database/sql"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
"time"
)
type testdb interface {
version() int
populate(db *DB) error
load(db *DB) error
compare(db testdb) bool
}
type DBV0 struct {
aciinfos []*ACIInfoV0_1
remotes []*RemoteV0_1
}
func (d *DBV0) version() int {
return 0
}
func (d *DBV0) populate(db *DB) error {
// As DBV0 and DBV1 have the same schema use a common populate
// function.
return populateDBV0_1(db, d.version(), d.aciinfos, d.remotes)
}
// load populates the given struct with the data in db.
// the given struct d should be empty
func (d *DBV0) load(db *DB) error {
fn := func(tx *sql.Tx) error {
var err error
d.aciinfos, err = getAllACIInfosV0_1(tx)
if err != nil {
return err
}
d.remotes, err = getAllRemoteV0_1(tx)
if err != nil {
return err
}
return nil
}
if err := db.Do(fn); err != nil {
return err
}
return nil
}
func (d *DBV0) compare(td testdb) bool {
d2, ok := td.(*DBV0)
if !ok {
return false
}
if !compareSlicesNoOrder(d.aciinfos, d2.aciinfos) {
return false
}
if !compareSlicesNoOrder(d.remotes, d2.remotes) {
return false
}
return true
}
type DBV1 struct {
aciinfos []*ACIInfoV0_1
remotes []*RemoteV0_1
}
func (d *DBV1) version() int {
return 1
}
func (d *DBV1) populate(db *DB) error {
return populateDBV0_1(db, d.version(), d.aciinfos, d.remotes)
}
func (d *DBV1) load(db *DB) error {
fn := func(tx *sql.Tx) error {
var err error
d.aciinfos, err = getAllACIInfosV0_1(tx)
if err != nil {
return err
}
d.remotes, err = getAllRemoteV0_1(tx)
if err != nil {
return err
}
return nil
}
if err := db.Do(fn); err != nil {
return err
}
return nil
}
func (d *DBV1) compare(td testdb) bool {
d2, ok := td.(*DBV1)
if !ok {
return false
}
if !compareSlicesNoOrder(d.aciinfos, d2.aciinfos) {
return false
}
if !compareSlicesNoOrder(d.remotes, d2.remotes) {
return false
}
return true
}
// The ACIInfo struct for different db versions. The ending VX_Y represent the
// first and the last version where the format isn't changed
// The latest existing struct should to be updated when updating the db version
// without changing the struct format (ex. V0_1 to V0_2).
// A new struct and its relative function should be added if the format is changed.
// The same applies for all of the the other structs.
type ACIInfoV0_1 struct {
BlobKey string
AppName string
ImportTime time.Time
Latest bool
}
func getAllACIInfosV0_1(tx *sql.Tx) ([]*ACIInfoV0_1, error) {
aciinfos := []*ACIInfoV0_1{}
rows, err := tx.Query("SELECT * from aciinfo")
if err != nil {
return nil, err
}
for rows.Next() {
aciinfo := &ACIInfoV0_1{}
if err := rows.Scan(&aciinfo.BlobKey, &aciinfo.AppName, &aciinfo.ImportTime, &aciinfo.Latest); err != nil {
return nil, err
}
aciinfos = append(aciinfos, aciinfo)
}
if err := rows.Err(); err != nil {
return nil, err
}
return aciinfos, nil
}
type RemoteV0_1 struct {
ACIURL string
SigURL string
ETag string
BlobKey string
}
func getAllRemoteV0_1(tx *sql.Tx) ([]*RemoteV0_1, error) {
remotes := []*RemoteV0_1{}
rows, err := tx.Query("SELECT * from remote")
if err != nil {
return nil, err
}
for rows.Next() {
remote := &RemoteV0_1{}
if err := rows.Scan(&remote.ACIURL, &remote.SigURL, &remote.ETag, &remote.BlobKey); err != nil {
return nil, err
}
remotes = append(remotes, remote)
}
if err := rows.Err(); err != nil {
return nil, err
}
return remotes, nil
}
func populateDBV0_1(db *DB, dbVersion int, aciInfos []*ACIInfoV0_1, remotes []*RemoteV0_1) error {
var dbCreateStmts = [...]string{
// version table
"CREATE TABLE IF NOT EXISTS version (version int);",
fmt.Sprintf("INSERT INTO version VALUES (%d)", dbVersion),
// remote table. The primary key is "aciurl".
"CREATE TABLE IF NOT EXISTS remote (aciurl string, sigurl string, etag string, blobkey string);",
"CREATE UNIQUE INDEX IF NOT EXISTS aciurlidx ON remote (aciurl)",
// aciinfo table. The primary key is "blobkey" and it matches the key used to save that aci in the blob store
"CREATE TABLE IF NOT EXISTS aciinfo (blobkey string, appname string, importtime time, latest bool);",
"CREATE UNIQUE INDEX IF NOT EXISTS blobkeyidx ON aciinfo (blobkey)",
"CREATE INDEX IF NOT EXISTS appnameidx ON aciinfo (appname)",
}
fn := func(tx *sql.Tx) error {
for _, stmt := range dbCreateStmts {
_, err := tx.Exec(stmt)
if err != nil {
return err
}
}
return nil
}
if err := db.Do(fn); err != nil {
return err
}
fn = func(tx *sql.Tx) error {
for _, aciinfo := range aciInfos {
_, err := tx.Exec("INSERT into aciinfo values ($1, $2, $3, $4)", aciinfo.BlobKey, aciinfo.AppName, aciinfo.ImportTime, aciinfo.Latest)
if err != nil {
return err
}
}
return nil
}
if err := db.Do(fn); err != nil {
return err
}
fn = func(tx *sql.Tx) error {
for _, remote := range remotes {
_, err := tx.Exec("INSERT into remote values ($1, $2, $3, $4)", remote.ACIURL, remote.SigURL, remote.ETag, remote.BlobKey)
if err != nil {
return err
}
}
return nil
}
if err := db.Do(fn); err != nil {
return err
}
return nil
}
type migrateTest struct {
predb testdb
postdb testdb
// Needed to have the right DB type to load from
curdb testdb
}
func testMigrate(tt migrateTest) error {
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
return fmt.Errorf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
casDir := filepath.Join(dir, "cas")
db, err := NewDB(filepath.Join(casDir, "db"))
if err != nil {
return err
}
if err = tt.predb.populate(db); err != nil {
return err
}
fn := func(tx *sql.Tx) error {
err := migrate(tx, tt.postdb.version())
if err != nil {
return err
}
return nil
}
if err = db.Do(fn); err != nil {
return err
}
var curDBVersion int
fn = func(tx *sql.Tx) error {
var err error
curDBVersion, err = getDBVersion(tx)
if err != nil {
return err
}
return nil
}
if err = db.Do(fn); err != nil {
return err
}
if curDBVersion != tt.postdb.version() {
return fmt.Errorf("wrong db version: got %#v, want %#v", curDBVersion, tt.postdb.version())
}
if err := tt.curdb.load(db); err != nil {
return err
}
if !tt.curdb.compare(tt.postdb) {
// TODO(sgotti) not very useful as these are pointers.
// Use something like go-spew to write the full data?
return fmt.Errorf("got %#v, want %#v", tt.curdb, tt.postdb)
}
return nil
}
func TestMigrate(t *testing.T) {
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
t.Fatalf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
now := time.Now()
tests := []migrateTest{
// Test migration from V0 to V1
// Empty db
{
&DBV0{},
&DBV1{},
&DBV1{},
},
{
&DBV0{
[]*ACIInfoV0_1{
{"sha512-aaaaaaaa", "example.com/app01", now, false},
{"sha512-bbbbbbbb", "example.com/app02", now, true},
},
[]*RemoteV0_1{
{"http://example.com/app01.aci", "http://example.com/app01.aci.asc", "", "sha512-aaaaaaaa"},
{"http://example.com/app02.aci", "http://example.com/app02.aci.asc", "", "sha512-bbbbbbbb"},
},
},
&DBV1{
[]*ACIInfoV0_1{
{"sha512-aaaaaaaa", "example.com/app01", now, false},
{"sha512-bbbbbbbb", "example.com/app02", now, true},
},
[]*RemoteV0_1{
{"http://example.com/app01.aci", "http://example.com/app01.aci.asc", "", "sha512-aaaaaaaa"},
{"http://example.com/app02.aci", "http://example.com/app02.aci.asc", "", "sha512-bbbbbbbb"},
},
},
&DBV1{},
},
}
for i, tt := range tests {
if err := testMigrate(tt); err != nil {
t.Errorf("#%d: unexpected error: %v", i, err)
}
}
}
// compareSlices compare slices regardless of the slice elements order
func compareSlicesNoOrder(i1 interface{}, i2 interface{}) bool {
s1 := interfaceToSlice(i1)
s2 := interfaceToSlice(i2)
if len(s1) != len(s2) {
return false
}
seen := map[int]bool{}
for _, v1 := range s1 {
found := false
for i2, v2 := range s2 {
if _, ok := seen[i2]; ok {
continue
}
if reflect.DeepEqual(v1, v2) {
found = true
seen[i2] = true
continue
}
}
if !found {
return false
}
}
return true
}
func interfaceToSlice(s interface{}) []interface{} {
v := reflect.ValueOf(s)
if v.Kind() != reflect.Slice && v.Kind() != reflect.Array {
panic(fmt.Errorf("Expected slice or array, got %T", s))
}
l := v.Len()
m := make([]interface{}, l)
for i := 0; i < l; i++ {
m[i] = v.Index(i).Interface()
}
return m
}

View File

@@ -1,72 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package store implements a content-addressable-store on disk.
// It leverages the `diskv` package to store items in a simple
// key-value blob store: https://github.com/peterbourgon/diskv
package store
import "database/sql"
func NewRemote(aciurl, sigurl string) *Remote {
r := &Remote{
ACIURL: aciurl,
SigURL: sigurl,
}
return r
}
type Remote struct {
ACIURL string
SigURL string
ETag string
// The key in the blob store under which the ACI has been saved.
BlobKey string
}
// GetRemote tries to retrieve a remote with the given aciURL. found will be
// false if remote doesn't exist.
func GetRemote(tx *sql.Tx, aciURL string) (remote *Remote, found bool, err error) {
remote = &Remote{}
rows, err := tx.Query("SELECT sigurl, etag, blobkey FROM remote WHERE aciurl == $1", aciURL)
if err != nil {
return nil, false, err
}
for rows.Next() {
found = true
if err := rows.Scan(&remote.SigURL, &remote.ETag, &remote.BlobKey); err != nil {
return nil, false, err
}
}
if err := rows.Err(); err != nil {
return nil, false, err
}
return remote, found, err
}
// WriteRemote adds or updates the provided Remote.
func WriteRemote(tx *sql.Tx, remote *Remote) error {
// ql doesn't have an INSERT OR UPDATE function so
// it's faster to remove and reinsert the row
_, err := tx.Exec("DELETE FROM remote WHERE aciurl == $1", remote.ACIURL)
if err != nil {
return err
}
_, err = tx.Exec("INSERT INTO remote VALUES ($1, $2, $3, $4)", remote.ACIURL, remote.SigURL, remote.ETag, remote.BlobKey)
if err != nil {
return err
}
return nil
}

View File

@@ -1,66 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"io/ioutil"
"os"
"testing"
)
func TestNewRemote(t *testing.T) {
const (
u1 = "https://example.com"
u2 = "https://foo.com"
data = "asdf"
)
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
s, err := NewStore(dir)
if err != nil {
t.Fatal(err)
}
// Create our first Remote, and simulate Store() to create row in the table
na := NewRemote(u1, "")
na.BlobKey = data
s.WriteRemote(na)
// Get a new remote w the same parameters, reading from table should be fine
nb, ok, err := s.GetRemote(u1)
if err != nil {
t.Fatalf("unexpected error reading index: %v", err)
}
if !ok {
t.Fatalf("unexpected index not found")
}
if nb.BlobKey != data {
t.Fatalf("bad data returned from store: got %v, want %v", nb.BlobKey, data)
}
// Get a remote with a different URI
nc, ok, err := s.GetRemote(u2)
// Should get an error, since the URI shouldn't be present in the table
if ok {
t.Fatalf("unexpected index found")
}
// Remote shouldn't be populated
if nc.BlobKey != "" {
t.Errorf("unexpected blob: got %v", nc.BlobKey)
}
}

View File

@@ -1,87 +0,0 @@
package store
import (
"database/sql"
"fmt"
)
const (
// Incremental db version at the current code revision.
dbVersion = 1
)
// Statement to run when creating a db. These are the statements to create the
// db at the latest db version (dbVersion) provided by this rkt version.
// If the db already exists migration statements should be executed
var dbCreateStmts = [...]string{
// version table
"CREATE TABLE IF NOT EXISTS version (version int);",
fmt.Sprintf("INSERT INTO version VALUES (%d)", dbVersion),
// remote table. The primary key is "aciurl".
"CREATE TABLE IF NOT EXISTS remote (aciurl string, sigurl string, etag string, blobkey string);",
"CREATE UNIQUE INDEX IF NOT EXISTS aciurlidx ON remote (aciurl)",
// aciinfo table. The primary key is "blobkey" and it matches the key used to save that aci in the blob store
"CREATE TABLE IF NOT EXISTS aciinfo (blobkey string, appname string, importtime time, latest bool);",
"CREATE UNIQUE INDEX IF NOT EXISTS blobkeyidx ON aciinfo (blobkey)",
"CREATE INDEX IF NOT EXISTS appnameidx ON aciinfo (appname)",
}
// dbIsPopulated checks if the db is already populated (at any version) verifing if the "version" table exists
func dbIsPopulated(tx *sql.Tx) (bool, error) {
rows, err := tx.Query("SELECT Name FROM __Table where Name == $1", "version")
if err != nil {
return false, err
}
count := 0
for rows.Next() {
count++
}
if err := rows.Err(); err != nil {
return false, err
}
if count > 0 {
return true, nil
}
return false, nil
}
// getDBVersion retrieves the current db version
func getDBVersion(tx *sql.Tx) (int, error) {
var version int
rows, err := tx.Query("SELECT version FROM version")
if err != nil {
return -1, err
}
found := false
for rows.Next() {
if err := rows.Scan(&version); err != nil {
return -1, err
}
found = true
break
}
if err := rows.Err(); err != nil {
return -1, err
}
if !found {
return -1, fmt.Errorf("db version table empty")
}
return version, nil
}
// updateDBVersion updates the db version
func updateDBVersion(tx *sql.Tx, version int) error {
// ql doesn't have an INSERT OR UPDATE function so
// it's faster to remove and reinsert the row
_, err := tx.Exec("DELETE FROM version")
if err != nil {
return err
}
_, err = tx.Exec("INSERT INTO version VALUES ($1)", version)
if err != nil {
return err
}
return nil
}

View File

@@ -1,547 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"bufio"
"bytes"
"crypto/sha512"
"database/sql"
"encoding/json"
"fmt"
"hash"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/coreos/rkt/pkg/lock"
"github.com/appc/spec/aci"
"github.com/appc/spec/schema"
"github.com/appc/spec/schema/types"
"github.com/peterbourgon/diskv"
)
const (
blobType int64 = iota
imageManifestType
defaultPathPerm os.FileMode = 0777
defaultFilePerm os.FileMode = 0660
// To ameliorate excessively long paths, keys for the (blob)store use
// only the first half of a sha512 rather than the entire sum
hashPrefix = "sha512-"
lenHash = sha512.Size // raw byte size
lenHashKey = (lenHash / 2) * 2 // half length, in hex characters
lenKey = len(hashPrefix) + lenHashKey
minlenKey = len(hashPrefix) + 2 // at least sha512-aa
)
var diskvStores = [...]string{
"blob",
"imageManifest",
}
// Store encapsulates a content-addressable-storage for storing ACIs on disk.
type Store struct {
base string
stores []*diskv.Diskv
db *DB
treestore *TreeStore
// storeLock is a lock on the whole store. It's used for store migration. If
// a previous version of rkt is using the store and in the meantime a
// new version is installed and executed it will try migrate the store
// during NewStore. This means that the previous running rkt will fail
// or behave badly after the migration as it's expecting another db format.
// For this reason, before executing migration, an exclusive lock must
// be taken on the whole store.
storeLock *lock.FileLock
imageLockDir string
treeStoreLockDir string
}
func NewStore(base string) (*Store, error) {
casDir := filepath.Join(base, "cas")
s := &Store{
base: base,
stores: make([]*diskv.Diskv, len(diskvStores)),
}
s.imageLockDir = filepath.Join(casDir, "imagelocks")
err := os.MkdirAll(s.imageLockDir, defaultPathPerm)
if err != nil {
return nil, err
}
s.treeStoreLockDir = filepath.Join(casDir, "treestorelocks")
err = os.MkdirAll(s.treeStoreLockDir, defaultPathPerm)
if err != nil {
return nil, err
}
// Take a shared cas lock
s.storeLock, err = lock.NewLock(casDir, lock.Dir)
if err != nil {
return nil, err
}
for i, p := range diskvStores {
s.stores[i] = diskv.New(diskv.Options{
BasePath: filepath.Join(casDir, p),
Transform: blockTransform,
})
}
db, err := NewDB(filepath.Join(casDir, "db"))
if err != nil {
return nil, err
}
s.db = db
s.treestore = &TreeStore{path: filepath.Join(base, "cas", "tree")}
needsMigrate := false
fn := func(tx *sql.Tx) error {
var err error
ok, err := dbIsPopulated(tx)
if err != nil {
return err
}
// populate the db
if !ok {
for _, stmt := range dbCreateStmts {
_, err = tx.Exec(stmt)
if err != nil {
return err
}
}
return nil
}
// if db is populated check its version
version, err := getDBVersion(tx)
if err != nil {
return err
}
if version < dbVersion {
needsMigrate = true
}
if version > dbVersion {
return fmt.Errorf("Current store db version: %d greater than the current rkt expected version: %d", version, dbVersion)
}
return nil
}
if err = db.Do(fn); err != nil {
return nil, err
}
// migration is done in another transaction as it must take an exclusive
// store lock. If, in the meantime, another process has already done the
// migration, between the previous db version check and the below
// migration code, the migration will do nothing as it'll start
// migration from the current version.
if needsMigrate {
// Take an exclusive store lock
err := s.storeLock.ExclusiveLock()
if err != nil {
return nil, err
}
// TODO(sgotti) take a db backup (for debugging and last resort rollback?)
fn := func(tx *sql.Tx) error {
return migrate(tx, dbVersion)
}
if err = db.Do(fn); err != nil {
return nil, err
}
}
return s, nil
}
// TmpFile returns an *os.File local to the same filesystem as the Store, or
// any error encountered
func (s Store) TmpFile() (*os.File, error) {
dir, err := s.TmpDir()
if err != nil {
return nil, err
}
return ioutil.TempFile(dir, "")
}
// TmpDir creates and returns dir local to the same filesystem as the Store,
// or any error encountered
func (s Store) TmpDir() (string, error) {
dir := filepath.Join(s.base, "tmp")
if err := os.MkdirAll(dir, defaultPathPerm); err != nil {
return "", err
}
return dir, nil
}
// ResolveKey resolves a partial key (of format `sha512-0c45e8c0ab2`) to a full
// key by considering the key a prefix and using the store for resolution.
// If the key is longer than the full key length, it is first truncated.
func (s Store) ResolveKey(key string) (string, error) {
if !strings.HasPrefix(key, hashPrefix) {
return "", fmt.Errorf("wrong key prefix")
}
if len(key) < minlenKey {
return "", fmt.Errorf("key too short")
}
if len(key) > lenKey {
key = key[:lenKey]
}
aciInfos := []*ACIInfo{}
err := s.db.Do(func(tx *sql.Tx) error {
var err error
aciInfos, err = GetACIInfosWithKeyPrefix(tx, key)
return err
})
if err != nil {
return "", fmt.Errorf("error retrieving ACI Infos: %v", err)
}
keyCount := len(aciInfos)
if keyCount == 0 {
return "", fmt.Errorf("no keys found")
}
if keyCount != 1 {
return "", fmt.Errorf("ambiguous key: %q", key)
}
return aciInfos[0].BlobKey, nil
}
func (s Store) ReadStream(key string) (io.ReadCloser, error) {
key, err := s.ResolveKey(key)
if err != nil {
return nil, fmt.Errorf("error resolving key: %v", err)
}
keyLock, err := lock.SharedKeyLock(s.imageLockDir, key)
if err != nil {
return nil, fmt.Errorf("error locking image: %v", err)
}
defer keyLock.Close()
return s.stores[blobType].ReadStream(key, false)
}
// WriteACI takes an ACI encapsulated in an io.Reader, decompresses it if
// necessary, and then stores it in the store under a key based on the image ID
// (i.e. the hash of the uncompressed ACI)
// latest defines if the aci has to be marked as the latest. For example an ACI
// discovered without asking for a specific version (latest pattern).
func (s Store) WriteACI(r io.Reader, latest bool) (string, error) {
// Peek at the first 512 bytes of the reader to detect filetype
br := bufio.NewReaderSize(r, 32768)
hd, err := br.Peek(512)
switch err {
case nil:
case io.EOF: // We may have still peeked enough to guess some types, so fall through
default:
return "", fmt.Errorf("error reading image header: %v", err)
}
typ, err := aci.DetectFileType(bytes.NewBuffer(hd))
if err != nil {
return "", fmt.Errorf("error detecting image type: %v", err)
}
dr, err := decompress(br, typ)
if err != nil {
return "", fmt.Errorf("error decompressing image: %v", err)
}
// Write the decompressed image (tar) to a temporary file on disk, and
// tee so we can generate the hash
h := sha512.New()
tr := io.TeeReader(dr, h)
fh, err := s.TmpFile()
if err != nil {
return "", fmt.Errorf("error creating image: %v", err)
}
if _, err := io.Copy(fh, tr); err != nil {
return "", fmt.Errorf("error copying image: %v", err)
}
im, err := aci.ManifestFromImage(fh)
if err != nil {
return "", fmt.Errorf("error extracting image manifest: %v", err)
}
if err := fh.Close(); err != nil {
return "", fmt.Errorf("error closing image: %v", err)
}
// Import the uncompressed image into the store at the real key
key := s.HashToKey(h)
keyLock, err := lock.ExclusiveKeyLock(s.imageLockDir, key)
if err != nil {
return "", fmt.Errorf("error locking image: %v", err)
}
defer keyLock.Close()
if err = s.stores[blobType].Import(fh.Name(), key, true); err != nil {
return "", fmt.Errorf("error importing image: %v", err)
}
// Save the imagemanifest using the same key used for the image
imj, err := json.Marshal(im)
if err != nil {
return "", fmt.Errorf("error marshalling image manifest: %v", err)
}
if err = s.stores[imageManifestType].Write(key, imj); err != nil {
return "", fmt.Errorf("error importing image manifest: %v", err)
}
// Save aciinfo
if err = s.db.Do(func(tx *sql.Tx) error {
aciinfo := &ACIInfo{
BlobKey: key,
AppName: im.Name.String(),
ImportTime: time.Now(),
Latest: latest,
}
return WriteACIInfo(tx, aciinfo)
}); err != nil {
return "", fmt.Errorf("error writing ACI Info: %v", err)
}
// The treestore for this ACI is not written here as ACIs downloaded as
// dependencies of another ACI will be exploded also if never directly used.
// Users of treestore should call s.RenderTreeStore before using it.
return key, nil
}
// RenderTreeStore renders a treestore for the given image key if it's not
// already fully rendered.
// Users of treestore should call s.RenderTreeStore before using it to ensure
// that the treestore is completely rendered.
func (s Store) RenderTreeStore(key string, rebuild bool) error {
// this lock references the treestore dir for the specified key. This
// is different from a lock on an image key as internally
// treestore.Write calls the acirenderer functions that use GetACI and
// GetImageManifest which are taking the image(s) lock.
treeStoreKeyLock, err := lock.ExclusiveKeyLock(s.treeStoreLockDir, key)
if err != nil {
return fmt.Errorf("error locking tree store: %v", err)
}
defer treeStoreKeyLock.Close()
if !rebuild {
rendered, err := s.treestore.IsRendered(key)
if err != nil {
return fmt.Errorf("cannot determine if tree is already rendered: %v", err)
}
if rendered {
return nil
}
}
// Firstly remove a possible partial treestore if existing.
// This is needed as a previous ACI removal operation could have failed
// cleaning the tree store leaving some stale files.
err = s.treestore.Remove(key)
if err != nil {
return err
}
err = s.treestore.Write(key, &s)
if err != nil {
return err
}
return nil
}
// CheckTreeStore verifies the treestore consistency for the specified key.
func (s Store) CheckTreeStore(key string) error {
treeStoreKeyLock, err := lock.SharedKeyLock(s.treeStoreLockDir, key)
if err != nil {
return fmt.Errorf("error locking tree store: %v", err)
}
defer treeStoreKeyLock.Close()
return s.treestore.Check(key)
}
// GetTreeStorePath returns the absolute path of the treestore for the specified key.
// It doesn't ensure that the path exists and is fully rendered. This should
// be done calling IsRendered()
func (s Store) GetTreeStorePath(key string) string {
return s.treestore.GetPath(key)
}
// GetTreeStoreRootFS returns the absolute path of the rootfs in the treestore
// for specified key.
// It doesn't ensure that the rootfs exists and is fully rendered. This should
// be done calling IsRendered()
func (s Store) GetTreeStoreRootFS(key string) string {
return s.treestore.GetRootFS(key)
}
// GetRemote tries to retrieve a remote with the given ACIURL. found will be
// false if remote doesn't exist.
func (s Store) GetRemote(aciURL string) (*Remote, bool, error) {
var remote *Remote
found := false
err := s.db.Do(func(tx *sql.Tx) error {
var err error
remote, found, err = GetRemote(tx, aciURL)
return err
})
return remote, found, err
}
// WriteRemote adds or updates the provided Remote.
func (s Store) WriteRemote(remote *Remote) error {
err := s.db.Do(func(tx *sql.Tx) error {
return WriteRemote(tx, remote)
})
return err
}
// Get the ImageManifest with the specified key.
func (s Store) GetImageManifest(key string) (*schema.ImageManifest, error) {
key, err := s.ResolveKey(key)
if err != nil {
return nil, fmt.Errorf("error resolving key: %v", err)
}
keyLock, err := lock.SharedKeyLock(s.imageLockDir, key)
if err != nil {
return nil, fmt.Errorf("error locking image: %v", err)
}
defer keyLock.Close()
imj, err := s.stores[imageManifestType].Read(key)
if err != nil {
return nil, fmt.Errorf("error retrieving image manifest: %v", err)
}
var im *schema.ImageManifest
if err = json.Unmarshal(imj, &im); err != nil {
return nil, fmt.Errorf("error unmarshalling image manifest: %v", err)
}
return im, nil
}
// GetACI retrieves the ACI that best matches the provided app name and labels.
// The returned value is the blob store key of the retrieved ACI.
// If there are multiple matching ACIs choose the latest one (defined as the
// last one imported in the store).
// If no version label is requested, ACIs marked as latest in the ACIInfo are
// preferred.
func (s Store) GetACI(name types.ACName, labels types.Labels) (string, error) {
var curaciinfo *ACIInfo
versionRequested := false
if _, ok := labels.Get("version"); ok {
versionRequested = true
}
var aciinfos []*ACIInfo
err := s.db.Do(func(tx *sql.Tx) error {
var err error
aciinfos, _, err = GetACIInfosWithAppName(tx, name.String())
return err
})
if err != nil {
return "", err
}
nextKey:
for _, aciinfo := range aciinfos {
im, err := s.GetImageManifest(aciinfo.BlobKey)
if err != nil {
return "", fmt.Errorf("error getting image manifest: %v", err)
}
// The image manifest must have all the requested labels
for _, l := range labels {
ok := false
for _, rl := range im.Labels {
if l.Name == rl.Name && l.Value == rl.Value {
ok = true
break
}
}
if !ok {
continue nextKey
}
}
if curaciinfo != nil {
// If no version is requested prefer the acis marked as latest
if !versionRequested {
if !curaciinfo.Latest && aciinfo.Latest {
curaciinfo = aciinfo
continue nextKey
}
if curaciinfo.Latest && !aciinfo.Latest {
continue nextKey
}
}
// If multiple matching image manifests are found, choose the latest imported in the cas.
if aciinfo.ImportTime.After(curaciinfo.ImportTime) {
curaciinfo = aciinfo
}
} else {
curaciinfo = aciinfo
}
}
if curaciinfo != nil {
return curaciinfo.BlobKey, nil
}
return "", fmt.Errorf("aci not found")
}
func (s Store) Dump(hex bool) {
for _, s := range s.stores {
var keyCount int
for key := range s.Keys(nil) {
val, err := s.Read(key)
if err != nil {
panic(fmt.Sprintf("key %s had no value", key))
}
if len(val) > 128 {
val = val[:128]
}
out := string(val)
if hex {
out = fmt.Sprintf("%x", val)
}
fmt.Printf("%s/%s: %s\n", s.BasePath, key, out)
keyCount++
}
fmt.Printf("%d total keys\n", keyCount)
}
}
// HashToKey takes a hash.Hash (which currently _MUST_ represent a full SHA512),
// calculates its sum, and returns a string which should be used as the key to
// store the data matching the hash.
func (s Store) HashToKey(h hash.Hash) string {
return hashToKey(h)
}
func hashToKey(h hash.Hash) string {
s := h.Sum(nil)
return keyToString(s)
}
// keyToString takes a key and returns a shortened and prefixed hexadecimal string version
func keyToString(k []byte) string {
if len(k) != lenHash {
panic(fmt.Sprintf("bad hash passed to hashToKey: %x", k))
}
return fmt.Sprintf("%s%x", hashPrefix, k)[0:lenKey]
}

View File

@@ -1,453 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"archive/tar"
"bytes"
"database/sql"
"encoding/hex"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/appc/spec/schema/types"
"github.com/coreos/rkt/pkg/aci"
)
const tstprefix = "store-test"
func TestBlobStore(t *testing.T) {
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
t.Fatalf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
s, err := NewStore(dir)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
for _, valueStr := range []string{
"I am a manually placed object",
} {
s.stores[blobType].Write(types.NewHashSHA512([]byte(valueStr)).String(), []byte(valueStr))
}
s.Dump(false)
}
func TestResolveKey(t *testing.T) {
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
t.Fatalf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
s, err := NewStore(dir)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// Return a hash key buffer from a hex string
str2key := func(s string) *bytes.Buffer {
k, _ := hex.DecodeString(s)
return bytes.NewBufferString(keyToString(k))
}
// Set up store (use key == data for simplicity)
data := []*bytes.Buffer{
str2key("12345678900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
str2key("abcdefabc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
str2key("abcabcabc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
str2key("abc01234500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"),
str2key("67147019a5b56f5e2ee01e989a8aa4787f56b8445960be2d8678391cf111009bc0780f31001fd181a2b61507547aee4caa44cda4b8bdb238d0e4ba830069ed2c"),
}
for _, d := range data {
// Save aciinfo
err := s.db.Do(func(tx *sql.Tx) error {
aciinfo := &ACIInfo{
BlobKey: d.String(),
AppName: "example.com/app",
ImportTime: time.Now(),
}
return WriteACIInfo(tx, aciinfo)
})
if err != nil {
t.Fatalf("error writing to store: %v", err)
}
}
// Full key already - should return short version of the full key
fkl := "sha512-67147019a5b56f5e2ee01e989a8aa4787f56b8445960be2d8678391cf111009bc0780f31001fd181a2b61507547aee4caa44cda4b8bdb238d0e4ba830069ed2c"
fks := "sha512-67147019a5b56f5e2ee01e989a8aa4787f56b8445960be2d8678391cf111009b"
for _, k := range []string{fkl, fks} {
key, err := s.ResolveKey(k)
if key != fks {
t.Errorf("expected ResolveKey to return unaltered short key, but got %q", key)
}
if err != nil {
t.Errorf("expected err=nil, got %v", err)
}
}
// Unambiguous prefix match
k, err := s.ResolveKey("sha512-123")
if k != "sha512-1234567890000000000000000000000000000000000000000000000000000000" {
t.Errorf("expected %q, got %q", "sha512-1234567890000000000000000000000000000000000000000000000000000000", k)
}
if err != nil {
t.Errorf("expected err=nil, got %v", err)
}
// Ambiguous prefix match
k, err = s.ResolveKey("sha512-abc")
if k != "" {
t.Errorf("expected %q, got %q", "", k)
}
if err == nil {
t.Errorf("expected non-nil error!")
}
// wrong key prefix
k, err = s.ResolveKey("badprefix-1")
expectedErr := "wrong key prefix"
if err == nil {
t.Errorf("expected non-nil error!")
}
if err.Error() != expectedErr {
t.Errorf("expected err=%q, got %q", expectedErr, err)
}
// key too short
k, err = s.ResolveKey("sha512-1")
expectedErr = "key too short"
if err == nil {
t.Errorf("expected non-nil error!")
}
if err.Error() != expectedErr {
t.Errorf("expected err=%q, got %q", expectedErr, err)
}
}
func TestGetImageManifest(t *testing.T) {
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
t.Fatalf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
s, err := NewStore(dir)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
imj := `{
"acKind": "ImageManifest",
"acVersion": "0.5.4",
"name": "example.com/test01"
}`
aci, err := aci.NewACI(dir, imj, nil)
if err != nil {
t.Fatalf("error creating test tar: %v", err)
}
// Rewind the ACI
if _, err := aci.Seek(0, 0); err != nil {
t.Fatalf("unexpected error %v", err)
}
key, err := s.WriteACI(aci, false)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
wanted := "example.com/test01"
im, err := s.GetImageManifest(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if im.Name.String() != wanted {
t.Errorf("expected im with name: %s, got: %s", wanted, im.Name.String())
}
// test unexistent key
im, err = s.GetImageManifest("sha512-aaaaaaaaaaaaaaaaa")
if err == nil {
t.Fatalf("expected non-nil error!")
}
}
func TestGetAci(t *testing.T) {
type test struct {
name types.ACName
labels types.Labels
expected int // the aci index to expect or -1 if not result expected,
}
type acidef struct {
imj string
latest bool
}
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
t.Fatalf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
s, err := NewStore(dir)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
tests := []struct {
acidefs []acidef
tests []test
}{
{
[]acidef{
{
`{
"acKind": "ImageManifest",
"acVersion": "0.1.1",
"name": "example.com/test01"
}`,
false,
},
{
`{
"acKind": "ImageManifest",
"acVersion": "0.1.1",
"name": "example.com/test02",
"labels": [
{
"name": "version",
"value": "1.0.0"
}
]
}`,
true,
},
{
`{
"acKind": "ImageManifest",
"acVersion": "0.1.1",
"name": "example.com/test02",
"labels": [
{
"name": "version",
"value": "2.0.0"
}
]
}`,
false,
},
},
[]test{
{
"example.com/unexistentaci",
types.Labels{},
-1,
},
{
"example.com/test01",
types.Labels{},
0,
},
{
"example.com/test02",
types.Labels{
{
Name: "version",
Value: "1.0.0",
},
},
1,
},
{
"example.com/test02",
types.Labels{
{
Name: "version",
Value: "2.0.0",
},
},
2,
},
{
"example.com/test02",
types.Labels{},
1,
},
},
},
}
for _, tt := range tests {
keys := []string{}
// Create ACIs
for _, ad := range tt.acidefs {
aci, err := aci.NewACI(dir, ad.imj, nil)
if err != nil {
t.Fatalf("error creating test tar: %v", err)
}
// Rewind the ACI
if _, err := aci.Seek(0, 0); err != nil {
t.Fatalf("unexpected error %v", err)
}
key, err := s.WriteACI(aci, ad.latest)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
keys = append(keys, key)
}
for _, test := range tt.tests {
key, err := s.GetACI(test.name, test.labels)
if test.expected == -1 {
if err == nil {
t.Fatalf("Expected no key for appName %s, got %s", test.name, key)
}
} else {
if err != nil {
t.Fatalf("unexpected error on GetACI for name %s, labels: %v: %v", test.name, test.labels, err)
}
if keys[test.expected] != key {
t.Errorf("expected key: %s, got %s. GetACI with name: %s, labels: %v", key, keys[test.expected], test.name, test.labels)
}
}
}
}
}
func TestTreeStore(t *testing.T) {
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
t.Fatalf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
s, err := NewStore(dir)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
imj := `
{
"acKind": "ImageManifest",
"acVersion": "0.5.4",
"name": "example.com/test01"
}
`
entries := []*aci.ACIEntry{
// An empty dir
{
Header: &tar.Header{
Name: "rootfs/a",
Typeflag: tar.TypeDir,
},
},
{
Contents: "hello",
Header: &tar.Header{
Name: "hello.txt",
Size: 5,
},
},
{
Header: &tar.Header{
Name: "rootfs/link.txt",
Linkname: "rootfs/hello.txt",
Typeflag: tar.TypeSymlink,
},
},
// dangling symlink
{
Header: &tar.Header{
Name: "rootfs/link2.txt",
Linkname: "rootfs/missingfile.txt",
Typeflag: tar.TypeSymlink,
},
},
{
Header: &tar.Header{
Name: "rootfs/fifo",
Typeflag: tar.TypeFifo,
},
},
}
aci, err := aci.NewACI(dir, imj, entries)
if err != nil {
t.Fatalf("error creating test tar: %v", err)
}
defer aci.Close()
// Rewind the ACI
if _, err := aci.Seek(0, 0); err != nil {
t.Fatalf("unexpected error %v", err)
}
// Import the new ACI
key, err := s.WriteACI(aci, false)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Ask the store to render the treestore
err = s.RenderTreeStore(key, false)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Verify image Hash. Should be the same.
err = s.CheckTreeStore(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Change a file permission
rootfs := s.GetTreeStoreRootFS(key)
err = os.Chmod(filepath.Join(rootfs, "a"), 0600)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Verify image Hash. Should be different
err = s.CheckTreeStore(key)
if err == nil {
t.Fatalf("unexpected error: %v", err)
}
// rebuild the tree
err = s.RenderTreeStore(key, true)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Add a file
rootfs = s.GetTreeStoreRootFS(key)
err = ioutil.WriteFile(filepath.Join(rootfs, "newfile"), []byte("newfile"), 0644)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Verify image Hash. Should be different
err = s.CheckTreeStore(key)
if err == nil {
t.Fatalf("unexpected error: %v", err)
}
}

View File

@@ -1,337 +0,0 @@
package store
import (
"archive/tar"
"crypto/sha512"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"syscall"
specaci "github.com/appc/spec/aci"
"github.com/appc/spec/pkg/tarheader"
"github.com/appc/spec/schema/types"
"github.com/coreos/rkt/pkg/aci"
"github.com/coreos/rkt/pkg/sys"
)
const (
hashfilename = "hash"
renderedfilename = "rendered"
)
// TreeStore represents a store of rendered ACIs
// The image's key becomes the name of the directory containing the rendered aci.
type TreeStore struct {
path string
}
// Write renders the ACI with the provided key in the treestore
// Write, to avoid having a rendered ACI with old stale files, requires that
// the destination directory doesn't exist (usually Remove should be called
// before Write)
func (ts *TreeStore) Write(key string, s *Store) error {
treepath := filepath.Join(ts.path, key)
fi, _ := os.Stat(treepath)
if fi != nil {
return fmt.Errorf("treestore: path %s already exists", treepath)
}
imageID, err := types.NewHash(key)
if err != nil {
return fmt.Errorf("treestore: cannot convert key to imageID: %v", err)
}
err = aci.RenderACIWithImageID(*imageID, treepath, s)
if err != nil {
return fmt.Errorf("treestore: cannot render aci: %v", err)
}
hash, err := ts.Hash(key)
if err != nil {
return fmt.Errorf("treestore: cannot calculate tree hash: %v", err)
}
err = ioutil.WriteFile(filepath.Join(treepath, hashfilename), []byte(hash), 0644)
if err != nil {
return fmt.Errorf("treestore: cannot write hash file: %v", err)
}
// before creating the "rendered" flag file we need to ensure that all data is fsynced
dfd, err := syscall.Open(treepath, syscall.O_RDONLY, 0)
if err != nil {
return err
}
defer syscall.Close(dfd)
if err := sys.Syncfs(dfd); err != nil {
return fmt.Errorf("treestore: failed to sync data: %v", err)
}
// Create rendered file
f, err := os.Create(filepath.Join(treepath, renderedfilename))
if err != nil {
return fmt.Errorf("treestore: failed to write rendered file: %v", err)
}
f.Close()
if err := syscall.Fsync(dfd); err != nil {
return fmt.Errorf("treestore: failed to sync tree store directory: %v", err)
}
return nil
}
// Remove cleans the directory for the specified key
func (ts *TreeStore) Remove(key string) error {
treepath := filepath.Join(ts.path, key)
// If tree path doesn't exist we're done
_, err := os.Stat(treepath)
if err != nil && os.IsNotExist(err) {
return nil
}
if err != nil {
return fmt.Errorf("treestore: failed to open tree store directory: %v", err)
}
renderedFilePath := filepath.Join(treepath, renderedfilename)
// The "rendered" flag file should be the firstly removed file. So if
// the removal ends with some error leaving some stale files IsRendered()
// will return false.
_, err = os.Stat(renderedFilePath)
if err != nil && !os.IsNotExist(err) {
return err
}
if !os.IsNotExist(err) {
err := os.Remove(renderedFilePath)
// Ensure that the treepath directory is fsynced after removing the
// "rendered" flag file
f, err := os.Open(treepath)
if err != nil {
return fmt.Errorf("treestore: failed to open tree store directory: %v", err)
}
defer f.Close()
err = f.Sync()
if err != nil {
return fmt.Errorf("treestore: failed to sync tree store directory: %v", err)
}
}
return os.RemoveAll(treepath)
}
// IsRendered checks if the tree store is fully rendered
func (ts *TreeStore) IsRendered(key string) (bool, error) {
// if the "rendered" flag file exists, assume that the store is already
// fully rendered.
treepath := filepath.Join(ts.path, key)
_, err := os.Stat(filepath.Join(treepath, renderedfilename))
if os.IsNotExist(err) {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}
// GetPath returns the absolute path of the treestore for the specified key.
// It doesn't ensure that the path exists and is fully rendered. This should
// be done calling IsRendered()
func (ts *TreeStore) GetPath(key string) string {
return filepath.Join(ts.path, key)
}
// GetRootFS returns the absolute path of the rootfs for the specified key.
// It doesn't ensure that the rootfs exists and is fully rendered. This should
// be done calling IsRendered()
func (ts *TreeStore) GetRootFS(key string) string {
return filepath.Join(ts.GetPath(key), "rootfs")
}
// TreeStore calculates an hash of the rendered ACI. It uses the same functions
// used to create a tar but instead of writing the full archive is just
// computes the sha512 sum of the file infos and contents.
func (ts *TreeStore) Hash(key string) (string, error) {
treepath := filepath.Join(ts.path, key)
hash := sha512.New()
iw := NewHashWriter(hash)
err := filepath.Walk(treepath, buildWalker(treepath, iw))
if err != nil {
return "", fmt.Errorf("treestore: error walking rootfs: %v", err)
}
hashstring := hashToKey(hash)
return hashstring, nil
}
// Check calculates the actual rendered ACI's hash and verifies that it matches
// the saved value.
func (ts *TreeStore) Check(key string) error {
treepath := filepath.Join(ts.path, key)
hash, err := ioutil.ReadFile(filepath.Join(treepath, hashfilename))
if err != nil {
return fmt.Errorf("treestore: cannot read hash file: %v", err)
}
curhash, err := ts.Hash(key)
if err != nil {
return fmt.Errorf("treestore: cannot calculate tree hash: %v", err)
}
if curhash != string(hash) {
return fmt.Errorf("treestore: wrong tree hash: %s, expected: %s", curhash, hash)
}
return nil
}
type xattr struct {
Name string
Value string
}
// Like tar Header but, to keep json output reproducible:
// * Xattrs as a slice
// * Skip Uname and Gname
// TODO. Should ModTime/AccessTime/ChangeTime be saved? For validation its
// probably enough to hash the file contents and the other infos and avoid
// problems due to them changing.
// TODO(sgotti) Is it possible that json output will change between go
// versions? Use another or our own Marshaller?
type fileInfo struct {
Name string // name of header file entry
Mode int64 // permission and mode bits
Uid int // user id of owner
Gid int // group id of owner
Size int64 // length in bytes
Typeflag byte // type of header entry
Linkname string // target name of link
Devmajor int64 // major number of character or block device
Devminor int64 // minor number of character or block device
Xattrs []xattr
}
func FileInfoFromHeader(hdr *tar.Header) *fileInfo {
fi := &fileInfo{
Name: hdr.Name,
Mode: hdr.Mode,
Uid: hdr.Uid,
Gid: hdr.Gid,
Size: hdr.Size,
Typeflag: hdr.Typeflag,
Linkname: hdr.Linkname,
Devmajor: hdr.Devmajor,
Devminor: hdr.Devminor,
}
keys := make([]string, len(hdr.Xattrs))
for k := range hdr.Xattrs {
keys = append(keys, k)
}
sort.Strings(keys)
xattrs := make([]xattr, 0)
for _, k := range keys {
xattrs = append(xattrs, xattr{Name: k, Value: hdr.Xattrs[k]})
}
fi.Xattrs = xattrs
return fi
}
// TODO(sgotti) this func is copied from appcs/spec/aci/build.go but also
// removes the hashfile and the renderedfile. Find a way to reuse it.
func buildWalker(root string, aw specaci.ArchiveWriter) filepath.WalkFunc {
// cache of inode -> filepath, used to leverage hard links in the archive
inos := map[uint64]string{}
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
relpath, err := filepath.Rel(root, path)
if err != nil {
return err
}
if relpath == "." {
return nil
}
if relpath == specaci.ManifestFile || relpath == hashfilename || relpath == renderedfilename {
// ignore; this will be written by the archive writer
// TODO(jonboulle): does this make sense? maybe just remove from archivewriter?
return nil
}
link := ""
var r io.Reader
switch info.Mode() & os.ModeType {
case os.ModeSocket:
return nil
case os.ModeNamedPipe:
case os.ModeCharDevice:
case os.ModeDevice:
case os.ModeDir:
case os.ModeSymlink:
target, err := os.Readlink(path)
if err != nil {
return err
}
link = target
default:
file, err := os.Open(path)
if err != nil {
return err
}
defer file.Close()
r = file
}
hdr, err := tar.FileInfoHeader(info, link)
if err != nil {
panic(err)
}
// Because os.FileInfo's Name method returns only the base
// name of the file it describes, it may be necessary to
// modify the Name field of the returned header to provide the
// full path name of the file.
hdr.Name = relpath
tarheader.Populate(hdr, info, inos)
// If the file is a hard link to a file we've already seen, we
// don't need the contents
if hdr.Typeflag == tar.TypeLink {
hdr.Size = 0
r = nil
}
if err := aw.AddFile(hdr, r); err != nil {
return err
}
return nil
}
}
type imageHashWriter struct {
io.Writer
}
func NewHashWriter(w io.Writer) specaci.ArchiveWriter {
return &imageHashWriter{w}
}
func (aw *imageHashWriter) AddFile(hdr *tar.Header, r io.Reader) error {
// Write the json encoding of the FileInfo struct
hdrj, err := json.Marshal(FileInfoFromHeader(hdr))
if err != nil {
return err
}
_, err = aw.Writer.Write(hdrj)
if err != nil {
return err
}
if r != nil {
// Write the file data
_, err := io.Copy(aw.Writer, r)
if err != nil {
return err
}
}
return nil
}
func (aw *imageHashWriter) Close() error {
return nil
}

View File

@@ -1,138 +0,0 @@
package store
import (
"archive/tar"
"io/ioutil"
"os"
"testing"
"github.com/coreos/rkt/pkg/aci"
)
func treeStoreWriteACI(dir string, s *Store) (string, error) {
imj := `
{
"acKind": "ImageManifest",
"acVersion": "0.5.4",
"name": "example.com/test01"
}
`
entries := []*aci.ACIEntry{
// An empty dir
{
Header: &tar.Header{
Name: "rootfs/a",
Typeflag: tar.TypeDir,
},
},
{
Contents: "hello",
Header: &tar.Header{
Name: "hello.txt",
Size: 5,
},
},
{
Header: &tar.Header{
Name: "rootfs/link.txt",
Linkname: "rootfs/hello.txt",
Typeflag: tar.TypeSymlink,
},
},
// dangling symlink
{
Header: &tar.Header{
Name: "rootfs/link2.txt",
Linkname: "rootfs/missingfile.txt",
Typeflag: tar.TypeSymlink,
},
},
{
Header: &tar.Header{
Name: "rootfs/fifo",
Typeflag: tar.TypeFifo,
},
},
}
aci, err := aci.NewACI(dir, imj, entries)
if err != nil {
return "", err
}
defer aci.Close()
// Rewind the ACI
if _, err := aci.Seek(0, 0); err != nil {
return "", err
}
// Import the new ACI
key, err := s.WriteACI(aci, false)
if err != nil {
return "", err
}
return key, nil
}
func TestTreeStoreWrite(t *testing.T) {
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
t.Fatalf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
s, err := NewStore(dir)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
key, err := treeStoreWriteACI(dir, s)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Ask the store to render the treestore
err = s.treestore.Write(key, s)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Verify image Hash. Should be the same.
err = s.treestore.Check(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
func TestTreeStoreRemove(t *testing.T) {
dir, err := ioutil.TempDir("", tstprefix)
if err != nil {
t.Fatalf("error creating tempdir: %v", err)
}
defer os.RemoveAll(dir)
s, err := NewStore(dir)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
key, err := treeStoreWriteACI(dir, s)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Test non existent dir
err = s.treestore.Remove(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// Test rendered tree
err = s.treestore.Write(key, s)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
err = s.treestore.Remove(key)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}

View File

@@ -1,74 +0,0 @@
// Copyright 2014 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package store
import (
"compress/bzip2"
"compress/gzip"
"errors"
"fmt"
"io"
"net/url"
"strings"
"github.com/appc/spec/aci"
)
// blockTransform creates a path slice from the given string to use as a
// directory prefix. The string must be in hash format:
// "sha256-abcdefgh"... -> []{"sha256", "ab"}
// Right now it just copies the default of git which is a two byte prefix. We
// will likely want to add re-sharding later.
func blockTransform(s string) []string {
// TODO(philips): use spec/types.Hash after export typ field
parts := strings.SplitN(s, "-", 2)
if len(parts) != 2 {
panic(fmt.Errorf("blockTransform should never receive non-hash, got %v", s))
}
return []string{
parts[0],
parts[1][0:2],
}
}
func parseAlways(s string) *url.URL {
u, _ := url.Parse(s)
return u
}
func decompress(rs io.Reader, typ aci.FileType) (io.Reader, error) {
var (
dr io.Reader
err error
)
switch typ {
case aci.TypeGzip:
dr, err = gzip.NewReader(rs)
if err != nil {
return nil, err
}
case aci.TypeBzip2:
dr = bzip2.NewReader(rs)
case aci.TypeXz:
dr = aci.XzReader(rs)
case aci.TypeTar:
dr = rs
case aci.TypeUnknown:
return nil, errors.New("error: unknown image filetype")
default:
return nil, errors.New("no type returned from DetectFileType?")
}
return dr, nil
}

View File

@@ -1,59 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package store
import (
"runtime"
"sort"
"strings"
"testing"
)
func interestingGoroutines() (gs []string) {
buf := make([]byte, 2<<20)
buf = buf[:runtime.Stack(buf, true)]
for _, g := range strings.Split(string(buf), "\n\n") {
sl := strings.SplitN(g, "\n", 2)
if len(sl) != 2 {
continue
}
stack := strings.TrimSpace(sl[1])
if stack == "" ||
strings.Contains(stack, "testing.RunTests") ||
strings.Contains(stack, "testing.Main(") ||
strings.Contains(stack, "runtime.goexit") ||
strings.Contains(stack, "created by runtime.gc") ||
strings.Contains(stack, "runtime.MHeap_Scavenger") {
continue
}
gs = append(gs, stack)
}
sort.Strings(gs)
return
}
// Verify the other tests didn't leave any goroutines running.
// This is in a file named z_last_test.go so it sorts at the end.
func TestGoroutinesRunning(t *testing.T) {
if testing.Short() {
t.Skip("not counting goroutines for leakage in -short mode")
}
gs := interestingGoroutines()
n := 0
stackCount := make(map[string]int)
for _, g := range gs {
stackCount[g]++
n++
}
t.Logf("num goroutines = %d", n)
if n > 0 {
t.Error("Too many goroutines.")
for stack, count := range stackCount {
t.Logf("%d instances of:\n%s", count, stack)
}
}
}

View File

@@ -1,11 +0,0 @@
# This file lists authors for copyright purposes. This file is distinct from
# the CONTRIBUTORS files. See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
#
# The email address is not required for organizations.
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

View File

@@ -1,9 +0,0 @@
# This file lists people who contributed code to this repository. The AUTHORS
# file lists the copyright holders; this file lists people.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

View File

@@ -1,27 +0,0 @@
Copyright (c) 2014 The bufs Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,31 +0,0 @@
# Copyright 2014 The bufs Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
all: clean
go fmt
go test -i
go test
go build
go vet
golint .
go install
make todo
todo:
@grep -n ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alnum:]] *.go || true
@grep -n TODO *.go || true
@grep -n FIXME *.go || true
@grep -n BUG *.go || true
clean:
rm -f bufs.test mem.out *~
demo:
go test -bench . -benchmem
go test -c
./bufs.test -test.v -test.run Foo -test.memprofile mem.out \
-test.memprofilerate 1
go tool pprof bufs.test mem.out --alloc_space --nodefraction 0.0001 \
--edgefraction 0 -web
@echo "Note: Foo vs FooBufs allocated memory is in hundreds of MBs vs 8 kB."

View File

@@ -1,8 +0,0 @@
bufs
====
Package bufs implements a simple buffer cache.
installation: go get github.com/cznic/bufs
documentation: http://godoc.org/github.com/cznic/bufs

View File

@@ -1,391 +0,0 @@
// Copyright 2014 The bufs Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package bufs implements a simple buffer cache.
//
// The intended use scheme is like:
//
// type Foo struct {
// buffers bufs.Buffers
// ...
// }
//
// // Bar can call Qux, but not the other way around (in this example).
// const maxFooDepth = 2
//
// func NewFoo() *Foo {
// return &Foo{buffers: bufs.New(maxFooDepth), ...}
// }
//
// func (f *Foo) Bar(n int) {
// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O
// defer f.buffers.Free()
// ...
// f.Qux(whatever)
// }
//
// func (f *Foo) Qux(n int) {
// buf := f.buffers.Alloc(n) // needed locally for computation and/or I/O
// defer f.buffers.Free()
// ...
// }
//
// The whole idea behind 'bufs' is that when calling e.g. Foo.Bar N times, then
// normally, without using 'bufs', there will be 2*N (in this example) []byte
// buffers allocated. While using 'bufs', only 2 buffers (in this example)
// will ever be created. For large N it can be a substantial difference.
//
// It's not a good idea to use Buffers to cache too big buffers. The cost of
// having a cached buffer is that the buffer is naturally not eligible for
// garbage collection. Of course, that holds only while the Foo instance is
// reachable, in the above example.
//
// The buffer count limit is intentionally "hard" (read panicking), although
// configurable in New(). The rationale is to prevent recursive calls, using
// Alloc, to cause excessive, "static" memory consumption. Tune the limit
// carefully or do not use Buffers from within [mutually] recursive functions
// where the nesting depth is not realistically bounded to some rather small
// number.
//
// Buffers cannot guarantee improvements to you program performance. There may
// be a gain in case where they fit well. Firm grasp on what your code is
// actually doing, when and in what order is essential to proper use of
// Buffers. It's _highly_ recommended to first do profiling and memory
// profiling before even thinking about using 'bufs'. The real world example,
// and cause for this package, was a first correct, yet no optimizations done
// version of a program; producing few MB of useful data while allocating 20+GB
// of memory. Of course the garbage collector properly kicked in, yet the
// memory abuse caused ~80+% of run time to be spent memory management. The
// program _was_ expected to be slow in its still development phase, but the
// bottleneck was guessed to be in I/O. Actually the hard disk was waiting for
// the billions bytes being allocated and zeroed. Garbage collect on low
// memory, rinse and repeat.
//
// In the provided tests, TestFoo and TestFooBufs do the same simulated work,
// except the later uses Buffers while the former does not. Suggested test runs
// which show the differences:
//
// $ go test -bench . -benchmem
//
// or
//
// $ go test -c
// $ ./bufs.test -test.v -test.run Foo -test.memprofile mem.out -test.memprofilerate 1
// $ go tool pprof bufs.test mem.out --alloc_space --nodefraction 0.0001 --edgefraction 0 -web
// $ # Note: Foo vs FooBufs allocated memory is in hundreds of MBs vs 8 kB.
//
// or
//
// $ make demo # same as all of the above
//
//
// NOTE: Alloc/Free calls must be properly nested in the same way as in for
// example BeginTransaction/EndTransaction pairs. If your code can panic then
// the pairing should be enforced by deferred calls.
//
// NOTE: Buffers objects do not allocate any space until requested by Alloc,
// the mechanism works on demand only.
//
// FAQ: Why the 'bufs' package name?
//
// Package name 'bufs' was intentionally chosen instead of the perhaps more
// conventional 'buf'. There are already too many 'buf' named things in the
// code out there and that'll be a source of a lot of trouble. It's a bit
// similar situation as in the case of package "strings" (not "string").
package bufs
import (
"errors"
"sort"
"sync"
)
// Buffers type represents a buffer ([]byte) cache.
//
// NOTE: Do not modify Buffers directly, use only its methods. Do not create
// additional values (copies) of Buffers, that'll break its functionality. Use
// a pointer instead to refer to a single instance from different
// places/scopes.
type Buffers [][]byte
// New returns a newly created instance of Buffers with a maximum capacity of n
// buffers.
//
// NOTE: 'bufs.New(n)' is the same as 'make(bufs.Buffers, n)'.
func New(n int) Buffers {
return make(Buffers, n)
}
// Alloc will return a buffer such that len(r) == n. It will firstly try to
// find an existing and unused buffer of big enough size. Only when there is no
// such, then one of the buffer slots is reallocated to a bigger size.
//
// It's okay to use append with buffers returned by Alloc. But it can cause
// allocation in that case and will again be producing load for the garbage
// collector. The best use of Alloc is for I/O buffers where the needed size of
// the buffer is figured out at some point of the code path in a 'final size'
// sense. Another real world example are compression/decompression buffers.
//
// NOTE: The buffer returned by Alloc _is not_ zeroed. That's okay for e.g.
// passing a buffer to io.Reader. If you need a zeroed buffer use Calloc.
//
// NOTE: Buffers returned from Alloc _must not_ be exposed/returned to your
// clients. Those buffers are intended to be used strictly internally, within
// the methods of some "object".
//
// NOTE: Alloc will panic if there are no buffers (buffer slots) left.
func (p *Buffers) Alloc(n int) (r []byte) {
b := *p
if len(b) == 0 {
panic(errors.New("Buffers.Alloc: out of buffers"))
}
biggest, best, biggestI, bestI := -1, -1, -1, -1
for i, v := range b {
//ln := len(v)
// The above was correct, buts it's just confusing. It worked
// because not the buffers, but slices of them are returned in
// the 'if best >= n' code path.
ln := cap(v)
if ln >= biggest {
biggest, biggestI = ln, i
}
if ln >= n && (bestI < 0 || best > ln) {
best, bestI = ln, i
if ln == n {
break
}
}
}
last := len(b) - 1
if best >= n {
r = b[bestI]
b[last], b[bestI] = b[bestI], b[last]
*p = b[:last]
return r[:n]
}
r = make([]byte, n, overCommit(n))
b[biggestI] = r
b[last], b[biggestI] = b[biggestI], b[last]
*p = b[:last]
return
}
// Calloc will acquire a buffer using Alloc and then clears it to zeros. The
// zeroing goes up to n, not cap(r).
func (p *Buffers) Calloc(n int) (r []byte) {
r = p.Alloc(n)
for i := range r {
r[i] = 0
}
return
}
// Free makes the lastly allocated by Alloc buffer free (available) again for
// Alloc.
//
// NOTE: Improper Free invocations, like in the sequence {New, Alloc, Free,
// Free}, will panic.
func (p *Buffers) Free() {
b := *p
b = b[:len(b)+1]
*p = b
}
// Stats reports memory consumed by Buffers, without accounting for some
// (smallish) additional overhead.
func (p *Buffers) Stats() (bytes int) {
b := *p
b = b[:cap(b)]
for _, v := range b {
bytes += cap(v)
}
return
}
// Cache caches buffers ([]byte). A zero value of Cache is ready for use.
//
// NOTE: Do not modify a Cache directly, use only its methods. Do not create
// additional values (copies) of a Cache, that'll break its functionality. Use
// a pointer instead to refer to a single instance from different
// places/scopes.
type Cache [][]byte
// Get returns a buffer ([]byte) of length n. If no such buffer is cached then
// a biggest cached buffer is resized to have length n and returned. If there
// are no cached items at all, Get returns a newly allocated buffer.
//
// In other words the cache policy is:
//
// - If the cache is empty, the buffer must be newly created and returned.
// Cache remains empty.
//
// - If a buffer of sufficient size is found in the cache, remove it from the
// cache and return it.
//
// - Otherwise the cache is non empty, but no cached buffer is big enough.
// Enlarge the biggest cached buffer, remove it from the cache and return it.
// This provide cached buffers size adjustment based on demand.
//
// In short, if the cache is not empty, Get guarantees to make it always one
// item less. This rules prevent uncontrolled cache grow in some scenarios.
// The older policy was not preventing that. Another advantage is better cached
// buffers sizes "auto tuning", although not in every possible use case.
//
// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's
// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer
// use Cget.
func (c *Cache) Get(n int) []byte {
r, _ := c.get(n)
return r
}
func (c *Cache) get(n int) (r []byte, isZeroed bool) {
s := *c
lens := len(s)
if lens == 0 {
r, isZeroed = make([]byte, n, overCommit(n)), true
return
}
i := sort.Search(lens, func(x int) bool { return len(s[x]) >= n })
if i == lens {
i--
s[i] = make([]byte, n, overCommit(n))
}
r = s[i][:n]
copy(s[i:], s[i+1:])
s[lens-1] = nil
s = s[:lens-1]
*c = s
return r, false
}
// Cget will acquire a buffer using Get and then clears it to zeros. The
// zeroing goes up to n, not cap(r).
func (c *Cache) Cget(n int) (r []byte) {
r, ok := c.get(n)
if ok {
return
}
for i := range r {
r[i] = 0
}
return
}
// Put caches b for possible later reuse (via Get). No other references to b's
// backing array may exist. Otherwise a big mess is sooner or later inevitable.
func (c *Cache) Put(b []byte) {
b = b[:cap(b)]
lenb := len(b)
if lenb == 0 {
return
}
s := *c
lens := len(s)
i := sort.Search(lens, func(x int) bool { return len(s[x]) >= lenb })
s = append(s, nil)
copy(s[i+1:], s[i:])
s[i] = b
*c = s
return
}
// Stats reports memory consumed by a Cache, without accounting for some
// (smallish) additional overhead. 'n' is the number of cached buffers, bytes
// is their combined capacity.
func (c Cache) Stats() (n, bytes int) {
n = len(c)
for _, v := range c {
bytes += cap(v)
}
return
}
// CCache is a Cache which is safe for concurrent use by multiple goroutines.
type CCache struct {
c Cache
mu sync.Mutex
}
// Get returns a buffer ([]byte) of length n. If no such buffer is cached then
// a biggest cached buffer is resized to have length n and returned. If there
// are no cached items at all, Get returns a newly allocated buffer.
//
// In other words the cache policy is:
//
// - If the cache is empty, the buffer must be newly created and returned.
// Cache remains empty.
//
// - If a buffer of sufficient size is found in the cache, remove it from the
// cache and return it.
//
// - Otherwise the cache is non empty, but no cached buffer is big enough.
// Enlarge the biggest cached buffer, remove it from the cache and return it.
// This provide cached buffers size adjustment based on demand.
//
// In short, if the cache is not empty, Get guarantees to make it always one
// item less. This rules prevent uncontrolled cache grow in some scenarios.
// The older policy was not preventing that. Another advantage is better cached
// buffers sizes "auto tuning", although not in every possible use case.
//
// NOTE: The buffer returned by Get _is not guaranteed_ to be zeroed. That's
// okay for e.g. passing a buffer to io.Reader. If you need a zeroed buffer
// use Cget.
func (c *CCache) Get(n int) []byte {
c.mu.Lock()
r, _ := c.c.get(n)
c.mu.Unlock()
return r
}
// Cget will acquire a buffer using Get and then clears it to zeros. The
// zeroing goes up to n, not cap(r).
func (c *CCache) Cget(n int) (r []byte) {
c.mu.Lock()
r = c.c.Cget(n)
c.mu.Unlock()
return
}
// Put caches b for possible later reuse (via Get). No other references to b's
// backing array may exist. Otherwise a big mess is sooner or later inevitable.
func (c *CCache) Put(b []byte) {
c.mu.Lock()
c.c.Put(b)
c.mu.Unlock()
}
// Stats reports memory consumed by a Cache, without accounting for some
// (smallish) additional overhead. 'n' is the number of cached buffers, bytes
// is their combined capacity.
func (c *CCache) Stats() (n, bytes int) {
c.mu.Lock()
n, bytes = c.c.Stats()
c.mu.Unlock()
return
}
// GCache is a ready to use global instance of a CCache.
var GCache CCache
func overCommit(n int) int {
switch {
case n < 8:
return 8
case n < 1e5:
return 2 * n
case n < 1e6:
return 3 * n / 2
default:
return n
}
}

View File

@@ -1,174 +0,0 @@
// Copyright 2014 The bufs Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufs
import (
"fmt"
"path"
"runtime"
"testing"
)
var dbg = func(s string, va ...interface{}) {
_, fn, fl, _ := runtime.Caller(1)
fmt.Printf("%s:%d: ", path.Base(fn), fl)
fmt.Printf(s, va...)
fmt.Println()
}
func Test0(t *testing.T) {
b := New(0)
defer func() {
recover()
}()
b.Alloc(1)
t.Fatal("unexpected success")
}
func Test1(t *testing.T) {
b := New(1)
expected := false
defer func() {
if e := recover(); e != nil && !expected {
t.Fatal(fmt.Errorf("%v", e))
}
}()
b.Alloc(1)
expected = true
b.Alloc(1)
t.Fatal("unexpected success")
}
func Test2(t *testing.T) {
b := New(1)
expected := false
defer func() {
if e := recover(); e != nil && !expected {
t.Fatal(fmt.Errorf("%v", e))
}
}()
b.Alloc(1)
b.Free()
b.Alloc(1)
expected = true
b.Alloc(1)
t.Fatal("unexpected success")
}
func Test3(t *testing.T) {
b := New(1)
expected := false
defer func() {
if e := recover(); e != nil && !expected {
t.Fatal(fmt.Errorf("%v", e))
}
}()
b.Alloc(1)
b.Free()
expected = true
b.Free()
t.Fatal("unexpected success")
}
const (
N = 1e5
bufSize = 1 << 12
)
type Foo struct {
result []byte
}
func NewFoo() *Foo {
return &Foo{}
}
func (f *Foo) Bar(n int) {
buf := make([]byte, n)
sum := 0
for _, v := range buf {
sum += int(v)
}
f.result = append(f.result, byte(sum))
f.Qux(n)
}
func (f *Foo) Qux(n int) {
buf := make([]byte, n)
sum := 0
for _, v := range buf {
sum += int(v)
}
f.result = append(f.result, byte(sum))
}
type FooBufs struct {
buffers Buffers
result []byte
}
const maxFooDepth = 2
func NewFooBufs() *FooBufs {
return &FooBufs{buffers: New(maxFooDepth)}
}
func (f *FooBufs) Bar(n int) {
buf := f.buffers.Alloc(n)
defer f.buffers.Free()
sum := 0
for _, v := range buf {
sum += int(v)
}
f.result = append(f.result, byte(sum))
f.Qux(n)
}
func (f *FooBufs) Qux(n int) {
buf := f.buffers.Alloc(n)
defer f.buffers.Free()
sum := 0
for _, v := range buf {
sum += int(v)
}
f.result = append(f.result, byte(sum))
}
func TestFoo(t *testing.T) {
foo := NewFoo()
for i := 0; i < N; i++ {
foo.Bar(bufSize)
}
}
func TestFooBufs(t *testing.T) {
foo := NewFooBufs()
for i := 0; i < N; i++ {
foo.Bar(bufSize)
}
t.Log("buffers.Stats()", foo.buffers.Stats())
}
func BenchmarkFoo(b *testing.B) {
b.SetBytes(2 * bufSize)
foo := NewFoo()
for i := 0; i < b.N; i++ {
foo.Bar(bufSize)
}
}
func BenchmarkFooBufs(b *testing.B) {
b.SetBytes(2 * bufSize)
foo := NewFooBufs()
for i := 0; i < b.N; i++ {
foo.Bar(bufSize)
}
}

View File

@@ -1,324 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Two Phase Commit & Structural ACID
package lldb
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"os"
"github.com/cznic/fileutil"
"github.com/cznic/mathutil"
)
var _ Filer = &ACIDFiler0{} // Ensure ACIDFiler0 is a Filer
type acidWrite struct {
b []byte
off int64
}
type acidWriter0 ACIDFiler0
func (a *acidWriter0) WriteAt(b []byte, off int64) (n int, err error) {
f := (*ACIDFiler0)(a)
if f.bwal == nil { // new epoch
f.data = f.data[:0]
f.bwal = bufio.NewWriter(f.wal)
if err = a.writePacket([]interface{}{wpt00Header, walTypeACIDFiler0, ""}); err != nil {
return
}
}
if err = a.writePacket([]interface{}{wpt00WriteData, b, off}); err != nil {
return
}
f.data = append(f.data, acidWrite{b, off})
return len(b), nil
}
func (a *acidWriter0) writePacket(items []interface{}) (err error) {
f := (*ACIDFiler0)(a)
b, err := EncodeScalars(items...)
if err != nil {
return
}
var b4 [4]byte
binary.BigEndian.PutUint32(b4[:], uint32(len(b)))
if _, err = f.bwal.Write(b4[:]); err != nil {
return
}
if _, err = f.bwal.Write(b); err != nil {
return
}
if m := (4 + len(b)) % 16; m != 0 {
var pad [15]byte
_, err = f.bwal.Write(pad[:16-m])
}
return
}
// WAL Packet Tags
const (
wpt00Header = iota
wpt00WriteData
wpt00Checkpoint
)
const (
walTypeACIDFiler0 = iota
)
// ACIDFiler0 is a very simple, synchronous implementation of 2PC. It uses a
// single write ahead log file to provide the structural atomicity
// (BeginUpdate/EndUpdate/Rollback) and durability (DB can be recovered from
// WAL if a crash occurred).
//
// ACIDFiler0 is a Filer.
//
// NOTE: Durable synchronous 2PC involves three fsyncs in this implementation
// (WAL, DB, zero truncated WAL). Where possible, it's recommended to collect
// transactions for, say one second before performing the two phase commit as
// the typical performance for rotational hard disks is about few tens of
// fsyncs per second atmost. For an example of such collective transaction
// approach please see the colecting FSM STT in Dbm's documentation[1].
//
// [1]: http://godoc.org/github.com/cznic/exp/dbm
type ACIDFiler0 struct {
*RollbackFiler
wal *os.File
bwal *bufio.Writer
data []acidWrite
testHook bool // keeps WAL untruncated (once)
peakWal int64 // tracks WAL maximum used size
peakBitFilerPages int // track maximum transaction memory
}
// NewACIDFiler0 returns a newly created ACIDFiler0 with WAL in wal.
//
// If the WAL is zero sized then a previous clean shutdown of db is taken for
// granted and no recovery procedure is taken.
//
// If the WAL is of non zero size then it is checked for having a
// commited/fully finished transaction not yet been reflected in db. If such
// transaction exists it's committed to db. If the recovery process finishes
// successfully, the WAL is truncated to zero size and fsync'ed prior to return
// from NewACIDFiler0.
func NewACIDFiler(db Filer, wal *os.File) (r *ACIDFiler0, err error) {
fi, err := wal.Stat()
if err != nil {
return
}
r = &ACIDFiler0{wal: wal}
if fi.Size() != 0 {
if err = r.recoverDb(db); err != nil {
return
}
}
acidWriter := (*acidWriter0)(r)
if r.RollbackFiler, err = NewRollbackFiler(
db,
func(sz int64) (err error) {
// Checkpoint
if err = acidWriter.writePacket([]interface{}{wpt00Checkpoint, sz}); err != nil {
return
}
if err = r.bwal.Flush(); err != nil {
return
}
r.bwal = nil
if err = r.wal.Sync(); err != nil {
return
}
wfi, err := r.wal.Stat()
switch err != nil {
case true:
// unexpected, but ignored
case false:
r.peakWal = mathutil.MaxInt64(wfi.Size(), r.peakWal)
}
// Phase 1 commit complete
for _, v := range r.data {
if _, err := db.WriteAt(v.b, v.off); err != nil {
return err
}
}
if err = db.Truncate(sz); err != nil {
return
}
if err = db.Sync(); err != nil {
return
}
// Phase 2 commit complete
if !r.testHook {
if err = r.wal.Truncate(0); err != nil {
return
}
if _, err = r.wal.Seek(0, 0); err != nil {
return
}
}
r.testHook = false
return r.wal.Sync()
},
acidWriter,
); err != nil {
return
}
return r, nil
}
// PeakWALSize reports the maximum size WAL has ever used.
func (a ACIDFiler0) PeakWALSize() int64 {
return a.peakWal
}
func (a *ACIDFiler0) readPacket(f *bufio.Reader) (items []interface{}, err error) {
var b4 [4]byte
n, err := io.ReadAtLeast(f, b4[:], 4)
if n != 4 {
return
}
ln := int(binary.BigEndian.Uint32(b4[:]))
m := (4 + ln) % 16
padd := (16 - m) % 16
b := make([]byte, ln+padd)
if n, err = io.ReadAtLeast(f, b, len(b)); n != len(b) {
return
}
return DecodeScalars(b[:ln])
}
func (a *ACIDFiler0) recoverDb(db Filer) (err error) {
fi, err := a.wal.Stat()
if err != nil {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: err}
}
if sz := fi.Size(); sz%16 != 0 {
return &ErrILSEQ{Type: ErrFileSize, Name: a.wal.Name(), Arg: sz}
}
f := bufio.NewReader(a.wal)
items, err := a.readPacket(f)
if err != nil {
return
}
if len(items) != 3 || items[0] != int64(wpt00Header) || items[1] != int64(walTypeACIDFiler0) {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid packet items %#v", items)}
}
tr := NewBTree(nil)
for {
items, err = a.readPacket(f)
if err != nil {
return
}
if len(items) < 2 {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("too few packet items %#v", items)}
}
switch items[0] {
case int64(wpt00WriteData):
if len(items) != 3 {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("invalid data packet items %#v", items)}
}
b, off := items[1].([]byte), items[2].(int64)
var key [8]byte
binary.BigEndian.PutUint64(key[:], uint64(off))
if err = tr.Set(key[:], b); err != nil {
return
}
case int64(wpt00Checkpoint):
var b1 [1]byte
if n, err := f.Read(b1[:]); n != 0 || err == nil {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint n %d, err %v", n, err)}
}
if len(items) != 2 {
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("checkpoint packet invalid items %#v", items)}
}
sz := items[1].(int64)
enum, err := tr.seekFirst()
if err != nil {
return err
}
for {
k, v, err := enum.current()
if err != nil {
if fileutil.IsEOF(err) {
break
}
return err
}
if _, err = db.WriteAt(v, int64(binary.BigEndian.Uint64(k))); err != nil {
return err
}
if err = enum.next(); err != nil {
if fileutil.IsEOF(err) {
break
}
return err
}
}
if err = db.Truncate(sz); err != nil {
return err
}
if err = db.Sync(); err != nil {
return err
}
// Recovery complete
if err = a.wal.Truncate(0); err != nil {
return err
}
return a.wal.Sync()
default:
return &ErrILSEQ{Type: ErrInvalidWAL, Name: a.wal.Name(), More: fmt.Sprintf("packet tag %v", items[0])}
}
}
}

View File

@@ -1,44 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Anatomy of a WAL file
WAL file
A sequence of packets
WAL packet, parts in slice notation
[0:4], 4 bytes: N uint32 // network byte order
[4:4+N], N bytes: payload []byte // gb encoded scalars
Packets, including the 4 byte 'size' prefix, MUST BE padded to size == 0 (mod
16). The values of the padding bytes MUST BE zero.
Encoded scalars first item is a packet type number (packet tag). The meaning of
any other item(s) of the payload depends on the packet tag.
Packet definitions
{wpt00Header int, typ int, s string}
typ: Must be zero (ACIDFiler0 file).
s: Any comment string, empty string is okay.
This packet must be present only once - as the first packet of
a WAL file.
{wpt00WriteData int, b []byte, off int64}
Write data (WriteAt(b, off)).
{wpt00Checkpoint int, sz int64}
Checkpoint (Truncate(sz)).
This packet must be present only once - as the last packet of
a WAL file.
*/
package lldb
//TODO optimize bitfiler/wal/2pc data above final size

View File

@@ -1,285 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Two Phase Commit & Structural ACID
package lldb
import (
"bytes"
"encoding/binary"
"io/ioutil"
"math/rand"
"os"
"testing"
"github.com/cznic/mathutil"
)
var _ Filer = &truncFiler{}
type truncFiler struct {
f Filer
fake *MemFiler
totalWritten int // Including silently dropped
realWritten int
limit int // -1: unlimited, n: silently stop writing after limit bytes
}
func NewTruncFiler(f Filer, limit int) *truncFiler {
return &truncFiler{f: f, fake: NewMemFiler(), limit: limit}
}
func (f *truncFiler) BeginUpdate() error { panic("internal error") }
func (f *truncFiler) Close() error { return f.f.Close() }
func (f *truncFiler) EndUpdate() error { panic("internal error") }
func (f *truncFiler) Name() string { return f.f.Name() }
func (f *truncFiler) PunchHole(off, sz int64) error { panic("internal error") }
func (f *truncFiler) ReadAt(b []byte, off int64) (int, error) { return f.fake.ReadAt(b, off) }
func (f *truncFiler) Rollback() error { panic("internal error") }
func (f *truncFiler) Size() (int64, error) { return f.fake.Size() }
func (f *truncFiler) Sync() error { return f.f.Sync() }
func (f *truncFiler) Truncate(sz int64) error {
f.fake.Truncate(sz)
return f.f.Truncate(sz)
}
func (f *truncFiler) WriteAt(b []byte, off int64) (n int, err error) {
rq := len(b)
n = f.totalWritten
if lim := f.limit; lim >= 0 && n+rq > lim {
over := n + rq - lim
rq -= over
rq = mathutil.Max(rq, 0)
}
if n, err = f.fake.WriteAt(b, off); err != nil {
return
}
f.totalWritten += n
if rq != 0 {
n, err := f.f.WriteAt(b[:rq], off)
if err != nil {
return n, err
}
f.realWritten += n
}
return
}
// Verify memory BTrees don't have maxRq limits.
func TestACID0MemBTreeCaps(t *testing.T) {
rng := rand.New(rand.NewSource(42))
tr := NewBTree(nil)
b := make([]byte, 2*maxRq)
for i := range b {
b[i] = byte(rng.Int())
}
if err := tr.Set(nil, b); err != nil {
t.Fatal(len(b), err)
}
g, err := tr.Get(nil, nil)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(g, b) {
t.Fatal("data mismatach")
}
}
func TestACIDFiler0(t *testing.T) {
const SZ = 1 << 17
// Phase 1: Create a DB, fill with it with data.
wal, err := ioutil.TempFile("", "test-acidfiler0-wal-")
if err != nil {
t.Fatal(err)
}
if !*oKeep {
defer os.Remove(wal.Name())
}
db, err := ioutil.TempFile("", "test-acidfiler0-db-")
if err != nil {
t.Fatal(err)
}
dbName := db.Name()
if !*oKeep {
defer os.Remove(db.Name())
}
realFiler := NewSimpleFileFiler(db)
truncFiler := NewTruncFiler(realFiler, -1)
acidFiler, err := NewACIDFiler(truncFiler, wal)
if err != nil {
t.Error(err)
return
}
if err = acidFiler.BeginUpdate(); err != nil {
t.Error(err)
return
}
a, err := NewAllocator(acidFiler, &Options{})
if err != nil {
t.Error(err)
return
}
a.Compress = true
tr, h, err := CreateBTree(a, nil)
if h != 1 || err != nil {
t.Error(h, err)
return
}
rng := rand.New(rand.NewSource(42))
var key, val [8]byte
ref := map[int64]int64{}
for {
sz, err := acidFiler.Size()
if err != nil {
t.Error(err)
return
}
if sz > SZ {
break
}
k, v := rng.Int63(), rng.Int63()
ref[k] = v
binary.BigEndian.PutUint64(key[:], uint64(k))
binary.BigEndian.PutUint64(val[:], uint64(v))
if err := tr.Set(key[:], val[:]); err != nil {
t.Error(err)
return
}
}
acidFiler.testHook = true // keep WAL
if err := acidFiler.EndUpdate(); err != nil {
t.Error(err)
return
}
if err := acidFiler.Close(); err != nil {
t.Error(err)
return
}
if err := wal.Sync(); err != nil {
t.Error(err)
return
}
if _, err = wal.Seek(0, 0); err != nil {
t.Error(err)
return
}
// Phase 2: Reopen and verify structure and data.
db, err = os.OpenFile(dbName, os.O_RDWR, 0666)
if err != nil {
t.Error(err)
return
}
filer := NewSimpleFileFiler(db)
a, err = NewAllocator(filer, &Options{})
if err != nil {
t.Error(err)
return
}
if err = a.Verify(NewMemFiler(), nil, nil); err != nil {
t.Error(err)
return
}
tr, err = OpenBTree(a, nil, 1)
for k, v := range ref {
binary.BigEndian.PutUint64(key[:], uint64(k))
binary.BigEndian.PutUint64(val[:], uint64(v))
var b []byte
b, err = tr.Get(b, key[:])
if err != nil || b == nil || !bytes.Equal(b, val[:]) {
t.Error(err, b, val[:])
return
}
}
okImage, err := ioutil.ReadFile(dbName)
if err != nil {
t.Error(err)
return
}
// Phase 3: Simulate a crash
sz, err := filer.Size()
if err != nil {
t.Error(err)
return
}
sz /= 2
if err := db.Truncate(sz); err != nil {
t.Error(err)
return
}
z := make([]byte, sz/3)
n, err := db.WriteAt(z, sz/3)
if n != len(z) {
t.Error(n, err)
return
}
if err := db.Sync(); err != nil {
t.Error(err)
return
}
// Phase 4: Open the corrupted DB
filer = NewSimpleFileFiler(db)
acidFiler, err = NewACIDFiler(filer, wal)
if err != nil {
t.Error(err)
return
}
if err = acidFiler.Sync(); err != nil {
t.Error(err)
return
}
if err = acidFiler.Close(); err != nil {
t.Error(err)
return
}
// Phase 5: Verify DB was recovered.
newImage, err := ioutil.ReadFile(dbName)
if err != nil {
t.Error(err)
return
}
if !bytes.Equal(okImage, newImage) {
t.Error(err)
return
}
}

View File

@@ -1,11 +0,0 @@
# This file lists authors for copyright purposes. This file is distinct from
# the CONTRIBUTORS files. See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
#
# The email address is not required for organizations.
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

View File

@@ -1,9 +0,0 @@
# This file lists people who contributed code to this repository. The AUTHORS
# file lists the copyright holders; this file lists people.
#
# Names should be added to this file like so:
# Name <email address>
#
# Please keep the list sorted.
Jan Mercl <0xjnml@gmail.com>

View File

@@ -1,27 +0,0 @@
Copyright (c) 2014 The lldb Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the names of the authors nor the names of the
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,45 +0,0 @@
# Copyright 2014 The lldb Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
.PHONY: all editor clean cover nuke
testbin=lldb.test
grep=--include=*.go
all: editor
go build
go vet
golint .
go install
make todo
clean:
go clean
rm -f *~ cov cov.html bad-dump good-dump lldb.test old.txt new.txt \
test-acidfiler0-* _test.db _wal
cover:
t=$(shell tempfile) ; go test -coverprofile $$t && go tool cover -html $$t && unlink $$t
editor:
go fmt
go test -i
go test -timeout 1h
mem:
go test -c
./$(testbin) -test.bench . -test.memprofile mem.out -test.memprofilerate 1 -test.timeout 24h
go tool pprof --lines --web --alloc_space $(testbin) mem.out
nuke: clean
go clean -i
todo:
@grep -nr $(grep) BUG * || true
@grep -nr $(grep) LATER * || true
@grep -nr $(grep) MAYBE * || true
@grep -nr $(grep) TODO * || true
@grep -nr $(grep) FIXME * || true
@grep -nr $(grep) ^[[:space:]]*_[[:space:]]*=[[:space:]][[:alpha:]][[:alnum:]]* * || true
@grep -nr $(grep) println * || true

View File

@@ -1,8 +0,0 @@
lldb
====
Package lldb (WIP) implements a low level database engine.
Installation: $ go get github.com/cznic/exp/lldb
Documentation: [godoc.org/github.com/cznic/exp/lldb](http://godoc.org/github.com/cznic/exp/lldb)

View File

@@ -1,43 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lldb
import (
"encoding/hex"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"time"
)
const (
testDbName = "_test.db"
walName = "_wal"
)
func now() time.Time { return time.Now() }
func hdump(b []byte) string {
return hex.Dump(b)
}
func die() {
os.Exit(1)
}
func stack() string {
buf := make([]byte, 1<<16)
return string(buf[:runtime.Stack(buf, false)])
}
func temp() (dir, name string) {
dir, err := ioutil.TempDir("", "test-lldb-")
if err != nil {
panic(err)
}
return dir, filepath.Join(dir, "test.tmp")
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,170 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Some errors returned by this package.
//
// Note that this package can return more errors than declared here, for
// example io.EOF from Filer.ReadAt().
package lldb
import (
"fmt"
)
// ErrDecodeScalars is possibly returned from DecodeScalars
type ErrDecodeScalars struct {
B []byte // Data being decoded
I int // offending offset
}
// Error implements the built in error type.
func (e *ErrDecodeScalars) Error() string {
return fmt.Sprintf("DecodeScalars: corrupted data @ %d/%d", e.I, len(e.B))
}
// ErrINVAL reports invalid values passed as parameters, for example negative
// offsets where only non-negative ones are allowed or read from the DB.
type ErrINVAL struct {
Src string
Val interface{}
}
// Error implements the built in error type.
func (e *ErrINVAL) Error() string {
return fmt.Sprintf("%s: %+v", e.Src, e.Val)
}
// ErrPERM is for example reported when a Filer is closed while BeginUpdate(s)
// are not balanced with EndUpdate(s)/Rollback(s) or when EndUpdate or Rollback
// is invoked which is not paired with a BeginUpdate.
type ErrPERM struct {
Src string
}
// Error implements the built in error type.
func (e *ErrPERM) Error() string {
return fmt.Sprintf("%s: Operation not permitted", string(e.Src))
}
// ErrTag represents an ErrILSEQ kind.
type ErrType int
// ErrILSEQ types
const (
ErrOther ErrType = iota
ErrAdjacentFree // Adjacent free blocks (.Off and .Arg)
ErrDecompress // Used compressed block: corrupted compression
ErrExpFreeTag // Expected a free block tag, got .Arg
ErrExpUsedTag // Expected a used block tag, got .Arg
ErrFLT // Free block is invalid or referenced multiple times
ErrFLTLoad // FLT truncated to .Off, need size >= .Arg
ErrFLTSize // Free block size (.Arg) doesn't belong to its list min size: .Arg2
ErrFileSize // File .Name size (.Arg) != 0 (mod 16)
ErrFreeChaining // Free block, .prev.next doesn't point back to this block
ErrFreeTailBlock // Last block is free
ErrHead // Head of a free block list has non zero Prev (.Arg)
ErrInvalidRelocTarget // Reloc doesn't target (.Arg) a short or long used block
ErrInvalidWAL // Corrupted write ahead log. .Name: file name, .More: more
ErrLongFreeBlkTooLong // Long free block spans beyond EOF, size .Arg
ErrLongFreeBlkTooShort // Long free block must have at least 2 atoms, got only .Arg
ErrLongFreeNextBeyondEOF // Long free block .Next (.Arg) spans beyond EOF
ErrLongFreePrevBeyondEOF // Long free block .Prev (.Arg) spans beyond EOF
ErrLongFreeTailTag // Expected a long free block tail tag, got .Arg
ErrLostFreeBlock // Free block is not in any FLT list
ErrNullReloc // Used reloc block with nil target
ErrRelocBeyondEOF // Used reloc points (.Arg) beyond EOF
ErrShortFreeTailTag // Expected a short free block tail tag, got .Arg
ErrSmall // Request for a free block (.Arg) returned a too small one (.Arg2) at .Off
ErrTailTag // Block at .Off has invalid tail CC (compression code) tag, got .Arg
ErrUnexpReloc // Unexpected reloc block referred to from reloc block .Arg
ErrVerifyPadding // Used block has nonzero padding
ErrVerifyTailSize // Long free block size .Arg but tail size .Arg2
ErrVerifyUsedSpan // Used block size (.Arg) spans beyond EOF
)
// ErrILSEQ reports a corrupted file format. Details in fields according to Type.
type ErrILSEQ struct {
Type ErrType
Off int64
Arg int64
Arg2 int64
Arg3 int64
Name string
More interface{}
}
// Error implements the built in error type.
func (e *ErrILSEQ) Error() string {
switch e.Type {
case ErrAdjacentFree:
return fmt.Sprintf("Adjacent free blocks at offset %#x and %#x", e.Off, e.Arg)
case ErrDecompress:
return fmt.Sprintf("Compressed block at offset %#x: Corrupted compressed content", e.Off)
case ErrExpFreeTag:
return fmt.Sprintf("Block at offset %#x: Expected a free block tag, got %#2x", e.Off, e.Arg)
case ErrExpUsedTag:
return fmt.Sprintf("Block at ofset %#x: Expected a used block tag, got %#2x", e.Off, e.Arg)
case ErrFLT:
return fmt.Sprintf("Free block at offset %#x is invalid or referenced multiple times", e.Off)
case ErrFLTLoad:
return fmt.Sprintf("FLT truncated to size %d, expected at least %d", e.Off, e.Arg)
case ErrFLTSize:
return fmt.Sprintf("Free block at offset %#x has size (%#x) should be at least (%#x)", e.Off, e.Arg, e.Arg2)
case ErrFileSize:
return fmt.Sprintf("File %q size (%#x) != 0 (mod 16)", e.Name, e.Arg)
case ErrFreeChaining:
return fmt.Sprintf("Free block at offset %#x: .prev.next doesn point back here.", e.Off)
case ErrFreeTailBlock:
return fmt.Sprintf("Free block at offset %#x: Cannot be last file block", e.Off)
case ErrHead:
return fmt.Sprintf("Block at offset %#x: Head of free block list has non zero .prev %#x", e.Off, e.Arg)
case ErrInvalidRelocTarget:
return fmt.Sprintf("Used reloc block at offset %#x: Target (%#x) is not a short or long used block", e.Off, e.Arg)
case ErrInvalidWAL:
return fmt.Sprintf("Corrupted write ahead log file: %q %v", e.Name, e.More)
case ErrLongFreeBlkTooLong:
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) beyond EOF", e.Off, e.Arg)
case ErrLongFreeBlkTooShort:
return fmt.Sprintf("Long free block at offset %#x: Size (%#x) too small", e.Off, e.Arg)
case ErrLongFreeNextBeyondEOF:
return fmt.Sprintf("Long free block at offset %#x: Next (%#x) points beyond EOF", e.Off, e.Arg)
case ErrLongFreePrevBeyondEOF:
return fmt.Sprintf("Long free block at offset %#x: Prev (%#x) points beyond EOF", e.Off, e.Arg)
case ErrLongFreeTailTag:
return fmt.Sprintf("Block at offset %#x: Expected long free tail tag, got %#2x", e.Off, e.Arg)
case ErrLostFreeBlock:
return fmt.Sprintf("Free block at offset %#x: not in any FLT list", e.Off)
case ErrNullReloc:
return fmt.Sprintf("Used reloc block at offset %#x: Nil target", e.Off)
case ErrRelocBeyondEOF:
return fmt.Sprintf("Used reloc block at offset %#x: Link (%#x) points beyond EOF", e.Off, e.Arg)
case ErrShortFreeTailTag:
return fmt.Sprintf("Block at offset %#x: Expected short free tail tag, got %#2x", e.Off, e.Arg)
case ErrSmall:
return fmt.Sprintf("Request for of free block of size %d returned a too small (%d) one at offset %#x", e.Arg, e.Arg2, e.Off)
case ErrTailTag:
return fmt.Sprintf("Block at offset %#x: Invalid tail CC tag, got %#2x", e.Off, e.Arg)
case ErrUnexpReloc:
return fmt.Sprintf("Block at offset %#x: Unexpected reloc block. Referred to from reloc block at offset %#x", e.Off, e.Arg)
case ErrVerifyPadding:
return fmt.Sprintf("Used block at offset %#x: Nonzero padding", e.Off)
case ErrVerifyTailSize:
return fmt.Sprintf("Long free block at offset %#x: Size %#x, but tail size %#x", e.Off, e.Arg, e.Arg2)
case ErrVerifyUsedSpan:
return fmt.Sprintf("Used block at offset %#x: Size %#x spans beyond EOF", e.Off, e.Arg)
}
more := ""
if e.More != nil {
more = fmt.Sprintf(", %v", e.More)
}
off := ""
if e.Off != 0 {
off = fmt.Sprintf(", off: %#x", e.Off)
}
return fmt.Sprintf("Error%s%s", off, more)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,192 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// An abstraction of file like (persistent) storage with optional (abstracted)
// support for structural integrity.
package lldb
import (
"fmt"
"github.com/cznic/mathutil"
)
func doubleTrouble(first, second error) error {
return fmt.Errorf("%q. Additionally, while attempting to recover (rollback): %q", first, second)
}
// A Filer is a []byte-like model of a file or similar entity. It may
// optionally implement support for structural transaction safety. In contrast
// to a file stream, a Filer is not sequentially accessible. ReadAt and WriteAt
// are always "addressed" by an offset and are assumed to perform atomically.
// A Filer is not safe for concurrent access, it's designed for consumption by
// the other objects in package, which should use a Filer from one goroutine
// only or via a mutex. BeginUpdate, EndUpdate and Rollback must be either all
// implemented by a Filer for structural integrity - or they should be all
// no-ops; where/if that requirement is relaxed.
//
// If a Filer wraps another Filer implementation, it usually invokes the same
// methods on the "inner" one, after some possible argument translations etc.
// If a Filer implements the structural transactions handling methods
// (BeginUpdate, EndUpdate and Rollback) as no-ops _and_ wraps another Filer:
// it then still MUST invoke those methods on the inner Filer. This is
// important for the case where a RollbackFiler exists somewhere down the
// chain. It's also important for an Allocator - to know when it must
// invalidate its FLT cache.
type Filer interface {
// BeginUpdate increments the "nesting" counter (initially zero). Every
// call to BeginUpdate must be eventually "balanced" by exactly one of
// EndUpdate or Rollback. Calls to BeginUpdate may nest.
BeginUpdate() error
// Analogous to os.File.Close().
Close() error
// EndUpdate decrements the "nesting" counter. If it's zero after that
// then assume the "storage" has reached structural integrity (after a
// batch of partial updates). If a Filer implements some support for
// that (write ahead log, journal, etc.) then the appropriate actions
// are to be taken for nesting == 0. Invocation of an unbalanced
// EndUpdate is an error.
EndUpdate() error
// Analogous to os.File.Name().
Name() string
// PunchHole deallocates space inside a "file" in the byte range
// starting at off and continuing for size bytes. The actual hole
// created by PunchHole may be smaller than requested. The Filer size
// (as reported by `Size()` does not change when hole punching, even
// when punching the end of a file off. In contrast to the Linux
// implementation of FALLOC_FL_PUNCH_HOLE in `fallocate`(2); a Filer is
// free not only to ignore `PunchHole()` (implement it as a nop), but
// additionally no guarantees about the content of the hole, when
// eventually read back, are required, i.e. any data, not only zeros,
// can be read from the "hole", including just anything what was left
// there - with all of the possible security problems.
PunchHole(off, size int64) error
// As os.File.ReadAt. Note: `off` is an absolute "file pointer"
// address and cannot be negative even when a Filer is a InnerFiler.
ReadAt(b []byte, off int64) (n int, err error)
// Rollback cancels and undoes the innermost pending update level.
// Rollback decrements the "nesting" counter. If a Filer implements
// some support for keeping structural integrity (write ahead log,
// journal, etc.) then the appropriate actions are to be taken.
// Invocation of an unbalanced Rollback is an error.
Rollback() error
// Analogous to os.File.FileInfo().Size().
Size() (int64, error)
// Analogous to os.Sync().
Sync() (err error)
// Analogous to os.File.Truncate().
Truncate(size int64) error
// Analogous to os.File.WriteAt(). Note: `off` is an absolute "file
// pointer" address and cannot be negative even when a Filer is a
// InnerFiler.
WriteAt(b []byte, off int64) (n int, err error)
}
var _ Filer = &InnerFiler{} // Ensure InnerFiler is a Filer.
// A InnerFiler is a Filer with added addressing/size translation.
type InnerFiler struct {
outer Filer
off int64
}
// NewInnerFiler returns a new InnerFiler wrapped by `outer` in a way which
// adds `off` to every access.
//
// For example, considering:
//
// inner := NewInnerFiler(outer, 10)
//
// then
//
// inner.WriteAt([]byte{42}, 4)
//
// translates to
//
// outer.WriteAt([]byte{42}, 14)
//
// But an attempt to emulate
//
// outer.WriteAt([]byte{17}, 9)
//
// by
//
// inner.WriteAt([]byte{17}, -1)
//
// will fail as the `off` parameter can never be < 0. Also note that
//
// inner.Size() == outer.Size() - off,
//
// i.e. `inner` pretends no `outer` exists. Finally, after e.g.
//
// inner.Truncate(7)
// outer.Size() == 17
//
// will be true.
func NewInnerFiler(outer Filer, off int64) *InnerFiler { return &InnerFiler{outer, off} }
// BeginUpdate implements Filer.
func (f *InnerFiler) BeginUpdate() error { return f.outer.BeginUpdate() }
// Close implements Filer.
func (f *InnerFiler) Close() (err error) { return f.outer.Close() }
// EndUpdate implements Filer.
func (f *InnerFiler) EndUpdate() error { return f.outer.EndUpdate() }
// Name implements Filer.
func (f *InnerFiler) Name() string { return f.outer.Name() }
// PunchHole implements Filer. `off`, `size` must be >= 0.
func (f *InnerFiler) PunchHole(off, size int64) error { return f.outer.PunchHole(f.off+off, size) }
// ReadAt implements Filer. `off` must be >= 0.
func (f *InnerFiler) ReadAt(b []byte, off int64) (n int, err error) {
if off < 0 {
return 0, &ErrINVAL{f.outer.Name() + ":ReadAt invalid off", off}
}
return f.outer.ReadAt(b, f.off+off)
}
// Rollback implements Filer.
func (f *InnerFiler) Rollback() error { return f.outer.Rollback() }
// Size implements Filer.
func (f *InnerFiler) Size() (int64, error) {
sz, err := f.outer.Size()
if err != nil {
return 0, err
}
return mathutil.MaxInt64(sz-f.off, 0), nil
}
// Sync() implements Filer.
func (f *InnerFiler) Sync() (err error) {
return f.outer.Sync()
}
// Truncate implements Filer.
func (f *InnerFiler) Truncate(size int64) error { return f.outer.Truncate(size + f.off) }
// WriteAt implements Filer. `off` must be >= 0.
func (f *InnerFiler) WriteAt(b []byte, off int64) (n int, err error) {
if off < 0 {
return 0, &ErrINVAL{f.outer.Name() + ":WriteAt invalid off", off}
}
return f.outer.WriteAt(b, f.off+off)
}

View File

@@ -1,764 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lldb
import (
"bytes"
"encoding/hex"
"io/ioutil"
"math/rand"
"os"
"runtime"
"testing"
"github.com/cznic/fileutil"
)
// Bench knobs.
const (
filerTestChunkSize = 32e3
filerTotalSize = 10e6
)
type newFunc func() Filer
type testFileFiler struct {
Filer
}
func (t *testFileFiler) Close() (err error) {
n := t.Name()
err = t.Filer.Close()
if errDel := os.Remove(n); errDel != nil && err == nil {
err = errDel
}
return
}
var (
newFileFiler = func() Filer {
file, err := ioutil.TempFile("", "lldb-test-file")
if err != nil {
panic(err)
}
return &testFileFiler{NewSimpleFileFiler(file)}
}
newOSFileFiler = func() Filer {
file, err := ioutil.TempFile("", "lldb-test-osfile")
if err != nil {
panic(err)
}
return &testFileFiler{NewOSFiler(file)}
}
newMemFiler = func() Filer {
return NewMemFiler()
}
nwBitFiler = func() Filer {
f, err := newBitFiler(NewMemFiler())
if err != nil {
panic(err)
}
return f
}
newRollbackFiler = func() Filer {
f := NewMemFiler()
var r Filer
checkpoint := func(sz int64) (err error) {
return f.Truncate(sz)
}
r, err := NewRollbackFiler(f, checkpoint, f)
if err != nil {
panic(err)
}
return r
}
)
func TestFilerNesting(t *testing.T) {
testFilerNesting(t, newFileFiler)
testFilerNesting(t, newOSFileFiler)
testFilerNesting(t, newMemFiler)
testFilerNesting(t, newRollbackFiler)
}
func testFilerNesting(t *testing.T, nf newFunc) {
// Check {Create, Close} works.
f := nf()
t.Log(f.Name())
if err := f.Close(); err != nil {
t.Fatal(err)
}
// Check {Create, EndUpdate} doesn't work.
f = nf()
t.Log(f.Name())
if err := f.EndUpdate(); err == nil {
f.Close()
t.Fatal("unexpected success")
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
// Check {Create, BeginUpdate, Close} doesn't work.
f = nf()
t.Log(f.Name())
f.BeginUpdate()
if err := f.Close(); err == nil {
t.Fatal("unexpected success")
}
// Check {Create, BeginUpdate, EndUpdate, Close} works.
f = nf()
t.Log(f.Name())
f.BeginUpdate()
if err := f.EndUpdate(); err != nil {
f.Close()
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
}
func TestFilerTruncate(t *testing.T) {
testFilerTruncate(t, newFileFiler)
testFilerTruncate(t, newOSFileFiler)
testFilerTruncate(t, newMemFiler)
testFilerTruncate(t, nwBitFiler)
testFilerTruncate(t, newRollbackFiler)
}
func testFilerTruncate(t *testing.T, nf newFunc) {
f := nf()
t.Log(f.Name())
defer func() {
if err := f.Close(); err != nil {
t.Error(err)
}
}()
if _, ok := f.(*RollbackFiler); ok {
if err := f.BeginUpdate(); err != nil {
t.Fatal(err)
}
defer func() {
if err := f.EndUpdate(); err != nil {
t.Error(err)
}
}()
}
// Check Truncate works.
sz := int64(1e6)
if err := f.Truncate(sz); err != nil {
t.Error(err)
return
}
fsz, err := f.Size()
if err != nil {
t.Error(err)
return
}
if g, e := fsz, sz; g != e {
t.Error(g, e)
return
}
sz *= 2
if err := f.Truncate(sz); err != nil {
t.Error(err)
return
}
fsz, err = f.Size()
if err != nil {
t.Error(err)
return
}
if g, e := fsz, sz; g != e {
t.Error(g, e)
return
}
sz = 0
if err := f.Truncate(sz); err != nil {
t.Error(err)
return
}
fsz, err = f.Size()
if err != nil {
t.Error(err)
return
}
if g, e := fsz, sz; g != e {
t.Error(g, e)
return
}
// Check Truncate(-1) doesn't work.
sz = -1
if err := f.Truncate(sz); err == nil {
t.Error(err)
return
}
}
func TestFilerReadAtWriteAt(t *testing.T) {
testFilerReadAtWriteAt(t, newFileFiler)
testFilerReadAtWriteAt(t, newOSFileFiler)
testFilerReadAtWriteAt(t, newMemFiler)
testFilerReadAtWriteAt(t, nwBitFiler)
testFilerReadAtWriteAt(t, newRollbackFiler)
}
func testFilerReadAtWriteAt(t *testing.T, nf newFunc) {
f := nf()
t.Log(f.Name())
defer func() {
if err := f.Close(); err != nil {
t.Error(err)
}
}()
if _, ok := f.(*RollbackFiler); ok {
if err := f.BeginUpdate(); err != nil {
t.Fatal(err)
}
defer func() {
if err := f.EndUpdate(); err != nil {
t.Error(err)
}
}()
}
const (
N = 1 << 16
M = 2e2
)
s := make([]byte, N)
e := make([]byte, N)
rnd := rand.New(rand.NewSource(42))
for i := range e {
s[i] = byte(rnd.Intn(256))
}
n2 := 0
for i := 0; i < M; i++ {
var from, to int
for {
from = rnd.Intn(N)
to = rnd.Intn(N)
if from != to {
break
}
}
if from > to {
from, to = to, from
}
for i := range s[from:to] {
s[from+i] = byte(rnd.Intn(256))
}
copy(e[from:to], s[from:to])
if to > n2 {
n2 = to
}
n, err := f.WriteAt(s[from:to], int64(from))
if err != nil {
t.Error(err)
return
}
if g, e := n, to-from; g != e {
t.Error(g, e)
return
}
}
fsz, err := f.Size()
if err != nil {
t.Error(err)
return
}
if g, e := fsz, int64(n2); g != e {
t.Error(g, e)
return
}
b := make([]byte, n2)
for i := 0; i <= M; i++ {
from := rnd.Intn(n2)
to := rnd.Intn(n2)
if from > to {
from, to = to, from
}
if i == M {
from, to = 0, n2
}
n, err := f.ReadAt(b[from:to], int64(from))
if err != nil && (!fileutil.IsEOF(err) && n != 0) {
fsz, err = f.Size()
if err != nil {
t.Error(err)
return
}
t.Error(fsz, from, to, err)
return
}
if g, e := n, to-from; g != e {
t.Error(g, e)
return
}
if g, e := b[from:to], e[from:to]; !bytes.Equal(g, e) {
if x, ok := f.(*MemFiler); ok {
for i := int64(0); i <= 3; i++ {
t.Logf("pg %d\n----\n%s", i, hex.Dump(x.m[i][:]))
}
}
t.Errorf(
"i %d from %d to %d len(g) %d len(e) %d\n---- got ----\n%s\n---- exp ----\n%s",
i, from, to, len(g), len(e), hex.Dump(g), hex.Dump(e),
)
return
}
}
mf, ok := f.(*MemFiler)
if !ok {
return
}
buf := &bytes.Buffer{}
if _, err := mf.WriteTo(buf); err != nil {
t.Error(err)
return
}
if g, e := buf.Bytes(), e[:n2]; !bytes.Equal(g, e) {
t.Errorf("\nlen %d\n%s\nlen %d\n%s", len(g), hex.Dump(g), len(e), hex.Dump(e))
return
}
if err := mf.Truncate(0); err != nil {
t.Error(err)
return
}
if _, err := mf.ReadFrom(buf); err != nil {
t.Error(err)
return
}
roundTrip := make([]byte, n2)
if n, err := mf.ReadAt(roundTrip, 0); err != nil && n == 0 {
t.Error(err)
return
}
if g, e := roundTrip, e[:n2]; !bytes.Equal(g, e) {
t.Errorf("\nlen %d\n%s\nlen %d\n%s", len(g), hex.Dump(g), len(e), hex.Dump(e))
return
}
}
func TestInnerFiler(t *testing.T) {
testInnerFiler(t, newFileFiler)
testInnerFiler(t, newOSFileFiler)
testInnerFiler(t, newMemFiler)
testInnerFiler(t, nwBitFiler)
testInnerFiler(t, newRollbackFiler)
}
func testInnerFiler(t *testing.T, nf newFunc) {
const (
HDR_SIZE = 42
LONG_OFF = 3330
)
outer := nf()
t.Log(outer.Name())
inner := NewInnerFiler(outer, HDR_SIZE)
defer func() {
if err := outer.Close(); err != nil {
t.Error(err)
}
}()
if _, ok := outer.(*RollbackFiler); ok {
if err := outer.BeginUpdate(); err != nil {
t.Fatal(err)
}
defer func() {
if err := outer.EndUpdate(); err != nil {
t.Error(err)
}
}()
}
b := []byte{2, 5, 11}
n, err := inner.WriteAt(b, -1)
if err == nil {
t.Error("unexpected success")
return
}
n, err = inner.ReadAt(make([]byte, 10), -1)
if err == nil {
t.Error("unexpected success")
return
}
n, err = inner.WriteAt(b, 0)
if err != nil {
t.Error(err)
return
}
if g, e := n, len(b); g != e {
t.Error(g, e)
return
}
osz, err := outer.Size()
if err != nil {
t.Error(err)
return
}
if g, e := osz, int64(HDR_SIZE+3); g != e {
t.Error(g, e)
return
}
isz, err := inner.Size()
if err != nil {
t.Error(err)
return
}
if g, e := isz, int64(3); g != e {
t.Error(g, e)
return
}
rbuf := make([]byte, 3)
if n, err = outer.ReadAt(rbuf, 0); err != nil && n == 0 {
t.Error(err)
return
}
if g, e := n, len(rbuf); g != e {
t.Error(g, e)
return
}
if g, e := rbuf, make([]byte, 3); !bytes.Equal(g, e) {
t.Error(g, e)
}
rbuf = make([]byte, 3)
if n, err = outer.ReadAt(rbuf, HDR_SIZE); err != nil && n == 0 {
t.Error(err)
return
}
if g, e := n, len(rbuf); g != e {
t.Error(g, e)
return
}
if g, e := rbuf, []byte{2, 5, 11}; !bytes.Equal(g, e) {
t.Error(g, e)
}
rbuf = make([]byte, 3)
if n, err = inner.ReadAt(rbuf, 0); err != nil && n == 0 {
t.Error(err)
return
}
if g, e := n, len(rbuf); g != e {
t.Error(g, e)
return
}
if g, e := rbuf, []byte{2, 5, 11}; !bytes.Equal(g, e) {
t.Error(g, e)
}
b = []byte{22, 55, 111}
if n, err = inner.WriteAt(b, LONG_OFF); err != nil {
t.Error(err)
return
}
if g, e := n, len(b); g != e {
t.Error(g, e)
return
}
osz, err = outer.Size()
if err != nil {
t.Error(err)
return
}
if g, e := osz, int64(HDR_SIZE+LONG_OFF+3); g != e {
t.Error(g, e)
return
}
isz, err = inner.Size()
if err != nil {
t.Error(err)
return
}
if g, e := isz, int64(LONG_OFF+3); g != e {
t.Error(g, e)
return
}
rbuf = make([]byte, 3)
if n, err = outer.ReadAt(rbuf, HDR_SIZE+LONG_OFF); err != nil && n == 0 {
t.Error(err)
return
}
if g, e := n, len(rbuf); g != e {
t.Error(g, e)
return
}
if g, e := rbuf, []byte{22, 55, 111}; !bytes.Equal(g, e) {
t.Error(g, e)
}
rbuf = make([]byte, 3)
if n, err = inner.ReadAt(rbuf, LONG_OFF); err != nil && n == 0 {
t.Error(err)
return
}
if g, e := n, len(rbuf); g != e {
t.Error(g, e)
return
}
if g, e := rbuf, []byte{22, 55, 111}; !bytes.Equal(g, e) {
t.Error(g, e)
return
}
if err = inner.Truncate(1); err != nil {
t.Error(err)
return
}
isz, err = inner.Size()
if err != nil {
t.Error(err)
return
}
if g, e := isz, int64(1); g != e {
t.Error(g, e)
return
}
osz, err = outer.Size()
if err != nil {
t.Error(err)
return
}
if g, e := osz, int64(HDR_SIZE+1); g != e {
t.Error(g, e)
return
}
}
func TestFileReadAtHole(t *testing.T) {
testFileReadAtHole(t, newFileFiler)
testFileReadAtHole(t, newOSFileFiler)
testFileReadAtHole(t, newMemFiler)
testFileReadAtHole(t, nwBitFiler)
testFileReadAtHole(t, newRollbackFiler)
}
func testFileReadAtHole(t *testing.T, nf newFunc) {
f := nf()
t.Log(f.Name())
defer func() {
if err := f.Close(); err != nil {
t.Error(err)
}
}()
if _, ok := f.(*RollbackFiler); ok {
if err := f.BeginUpdate(); err != nil {
t.Fatal(err)
}
defer func() {
if err := f.EndUpdate(); err != nil {
t.Error(err)
}
}()
}
n, err := f.WriteAt([]byte{1}, 40000)
if err != nil {
t.Error(err)
return
}
if n != 1 {
t.Error(n)
return
}
n, err = f.ReadAt(make([]byte, 1000), 20000)
if err != nil {
t.Error(err)
return
}
if n != 1000 {
t.Error(n)
return
}
}
func BenchmarkMemFilerWrSeq(b *testing.B) {
b.StopTimer()
buf := make([]byte, filerTestChunkSize)
for i := range buf {
buf[i] = byte(rand.Int())
}
f := newMemFiler()
runtime.GC()
b.StartTimer()
var ofs int64
for i := 0; i < b.N; i++ {
_, err := f.WriteAt(buf, ofs)
if err != nil {
b.Fatal(err)
}
ofs = (ofs + filerTestChunkSize) % filerTotalSize
}
}
func BenchmarkMemFilerRdSeq(b *testing.B) {
b.StopTimer()
buf := make([]byte, filerTestChunkSize)
for i := range buf {
buf[i] = byte(rand.Int())
}
f := newMemFiler()
var ofs int64
for i := 0; i < b.N; i++ {
_, err := f.WriteAt(buf, ofs)
if err != nil {
b.Fatal(err)
}
ofs = (ofs + filerTestChunkSize) % filerTotalSize
}
runtime.GC()
b.StartTimer()
ofs = 0
for i := 0; i < b.N; i++ {
n, err := f.ReadAt(buf, ofs)
if err != nil && n == 0 {
b.Fatal(err)
}
ofs = (ofs + filerTestChunkSize) % filerTotalSize
}
}
func BenchmarkMemFilerWrRand(b *testing.B) {
b.StopTimer()
rng := rand.New(rand.NewSource(42))
f := newMemFiler()
var bytes int64
var ofs, runs []int
for i := 0; i < b.N; i++ {
ofs = append(ofs, rng.Intn(1<<31-1))
runs = append(runs, rng.Intn(1<<31-1)%(2*pgSize))
}
data := make([]byte, 2*pgSize)
for i := range data {
data[i] = byte(rng.Int())
}
runtime.GC()
b.StartTimer()
for i, v := range ofs {
n := runs[i]
bytes += int64(n)
f.WriteAt(data[:n], int64(v))
}
b.StopTimer()
}
func BenchmarkMemFilerRdRand(b *testing.B) {
b.StopTimer()
rng := rand.New(rand.NewSource(42))
f := newMemFiler()
var bytes int64
var ofs, runs []int
for i := 0; i < b.N; i++ {
ofs = append(ofs, rng.Intn(1<<31-1))
runs = append(runs, rng.Intn(1<<31-1)%(2*pgSize))
}
data := make([]byte, 2*pgSize)
for i := range data {
data[i] = byte(rng.Int())
}
for i, v := range ofs {
n := runs[i]
bytes += int64(n)
f.WriteAt(data[:n], int64(v))
}
runtime.GC()
b.StartTimer()
for _, v := range ofs {
f.ReadAt(data, int64(v))
}
b.StopTimer()
}

View File

@@ -1,812 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Utilities to encode/decode and collate Go predeclared scalar types (and the
// typeless nil and []byte). The encoding format is a variation of the one
// used by the "encoding/gob" package.
package lldb
import (
"bytes"
"fmt"
"math"
"github.com/cznic/mathutil"
)
const (
gbNull = iota // 0x00
gbFalse // 0x01
gbTrue // 0x02
gbFloat0 // 0x03
gbFloat1 // 0x04
gbFloat2 // 0x05
gbFloat3 // 0x06
gbFloat4 // 0x07
gbFloat5 // 0x08
gbFloat6 // 0x09
gbFloat7 // 0x0a
gbFloat8 // 0x0b
gbComplex0 // 0x0c
gbComplex1 // 0x0d
gbComplex2 // 0x0e
gbComplex3 // 0x0f
gbComplex4 // 0x10
gbComplex5 // 0x11
gbComplex6 // 0x12
gbComplex7 // 0x13
gbComplex8 // 0x14
gbBytes00 // 0x15
gbBytes01 // 0x16
gbBytes02 // 0x17
gbBytes03 // 0x18
gbBytes04 // 0x19
gbBytes05 // 0x1a
gbBytes06 // 0x1b
gbBytes07 // 0x1c
gbBytes08 // 0x1d
gbBytes09 // 0x1e
gbBytes10 // 0x1f
gbBytes11 // 0x20
gbBytes12 // 0x21
gbBytes13 // 0x22
gbBytes14 // 0x23
gbBytes15 // 0x24
gbBytes16 // 0x25
gbBytes17 // Ox26
gbBytes1 // 0x27
gbBytes2 // 0x28: Offset by one to allow 64kB sized []byte.
gbString00 // 0x29
gbString01 // 0x2a
gbString02 // 0x2b
gbString03 // 0x2c
gbString04 // 0x2d
gbString05 // 0x2e
gbString06 // 0x2f
gbString07 // 0x30
gbString08 // 0x31
gbString09 // 0x32
gbString10 // 0x33
gbString11 // 0x34
gbString12 // 0x35
gbString13 // 0x36
gbString14 // 0x37
gbString15 // 0x38
gbString16 // 0x39
gbString17 // 0x3a
gbString1 // 0x3b
gbString2 // 0x3c
gbUintP1 // 0x3d
gbUintP2 // 0x3e
gbUintP3 // 0x3f
gbUintP4 // 0x40
gbUintP5 // 0x41
gbUintP6 // 0x42
gbUintP7 // 0x43
gbUintP8 // 0x44
gbIntM8 // 0x45
gbIntM7 // 0x46
gbIntM6 // 0x47
gbIntM5 // 0x48
gbIntM4 // 0x49
gbIntM3 // 0x4a
gbIntM2 // 0x4b
gbIntM1 // 0x4c
gbIntP1 // 0x4d
gbIntP2 // 0x4e
gbIntP3 // 0x4f
gbIntP4 // 0x50
gbIntP5 // 0x51
gbIntP6 // 0x52
gbIntP7 // 0x53
gbIntP8 // 0x54
gbInt0 // 0x55
gbIntMax = 255 - gbInt0 // 0xff == 170
)
// EncodeScalars encodes a vector of predeclared scalar type values to a
// []byte, making it suitable to store it as a "record" in a DB or to use it as
// a key of a BTree.
func EncodeScalars(scalars ...interface{}) (b []byte, err error) {
for _, scalar := range scalars {
switch x := scalar.(type) {
default:
return nil, &ErrINVAL{"EncodeScalars: unsupported type", fmt.Sprintf("%T in `%#v`", x, scalars)}
case nil:
b = append(b, gbNull)
case bool:
switch x {
case false:
b = append(b, gbFalse)
case true:
b = append(b, gbTrue)
}
case float32:
encFloat(float64(x), &b)
case float64:
encFloat(x, &b)
case complex64:
encComplex(complex128(x), &b)
case complex128:
encComplex(x, &b)
case string:
n := len(x)
if n <= 17 {
b = append(b, byte(gbString00+n))
b = append(b, []byte(x)...)
break
}
if n > 65535 {
return nil, fmt.Errorf("EncodeScalars: cannot encode string of length %d (limit 65536)", n)
}
pref := byte(gbString1)
if n > 255 {
pref++
}
b = append(b, pref)
encUint0(uint64(n), &b)
b = append(b, []byte(x)...)
case int8:
encInt(int64(x), &b)
case int16:
encInt(int64(x), &b)
case int32:
encInt(int64(x), &b)
case int64:
encInt(x, &b)
case int:
encInt(int64(x), &b)
case uint8:
encUint(uint64(x), &b)
case uint16:
encUint(uint64(x), &b)
case uint32:
encUint(uint64(x), &b)
case uint64:
encUint(x, &b)
case uint:
encUint(uint64(x), &b)
case []byte:
n := len(x)
if n <= 17 {
b = append(b, byte(gbBytes00+n))
b = append(b, []byte(x)...)
break
}
if n > 655356 {
return nil, fmt.Errorf("EncodeScalars: cannot encode []byte of length %d (limit 65536)", n)
}
pref := byte(gbBytes1)
if n > 255 {
pref++
}
b = append(b, pref)
if n <= 255 {
b = append(b, byte(n))
} else {
n--
b = append(b, byte(n>>8), byte(n))
}
b = append(b, x...)
}
}
return
}
func encComplex(f complex128, b *[]byte) {
encFloatPrefix(gbComplex0, real(f), b)
encFloatPrefix(gbComplex0, imag(f), b)
}
func encFloatPrefix(prefix byte, f float64, b *[]byte) {
u := math.Float64bits(f)
var n uint64
for i := 0; i < 8; i++ {
n <<= 8
n |= u & 0xFF
u >>= 8
}
bits := mathutil.BitLenUint64(n)
if bits == 0 {
*b = append(*b, prefix)
return
}
// 0 1 2 3 4 5 6 7 8 9
// . 1 1 1 1 1 1 1 1 2
encUintPrefix(prefix+1+byte((bits-1)>>3), n, b)
}
func encFloat(f float64, b *[]byte) {
encFloatPrefix(gbFloat0, f, b)
}
func encUint0(n uint64, b *[]byte) {
switch {
case n <= 0xff:
*b = append(*b, byte(n))
case n <= 0xffff:
*b = append(*b, byte(n>>8), byte(n))
case n <= 0xffffff:
*b = append(*b, byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffff:
*b = append(*b, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffff:
*b = append(*b, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffffff:
*b = append(*b, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffffffff:
*b = append(*b, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= math.MaxUint64:
*b = append(*b, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
}
}
func encUintPrefix(prefix byte, n uint64, b *[]byte) {
*b = append(*b, prefix)
encUint0(n, b)
}
func encUint(n uint64, b *[]byte) {
bits := mathutil.Max(1, mathutil.BitLenUint64(n))
encUintPrefix(gbUintP1+byte((bits-1)>>3), n, b)
}
func encInt(n int64, b *[]byte) {
switch {
case n < -0x100000000000000:
*b = append(*b, byte(gbIntM8), byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x1000000000000:
*b = append(*b, byte(gbIntM7), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x10000000000:
*b = append(*b, byte(gbIntM6), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x100000000:
*b = append(*b, byte(gbIntM5), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x1000000:
*b = append(*b, byte(gbIntM4), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n < -0x10000:
*b = append(*b, byte(gbIntM3), byte(n>>16), byte(n>>8), byte(n))
case n < -0x100:
*b = append(*b, byte(gbIntM2), byte(n>>8), byte(n))
case n < 0:
*b = append(*b, byte(gbIntM1), byte(n))
case n <= gbIntMax:
*b = append(*b, byte(gbInt0+n))
case n <= 0xff:
*b = append(*b, gbIntP1, byte(n))
case n <= 0xffff:
*b = append(*b, gbIntP2, byte(n>>8), byte(n))
case n <= 0xffffff:
*b = append(*b, gbIntP3, byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffff:
*b = append(*b, gbIntP4, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffff:
*b = append(*b, gbIntP5, byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffffff:
*b = append(*b, gbIntP6, byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0xffffffffffffff:
*b = append(*b, gbIntP7, byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
case n <= 0x7fffffffffffffff:
*b = append(*b, gbIntP8, byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
}
}
func decodeFloat(b []byte) float64 {
var u uint64
for i, v := range b {
u |= uint64(v) << uint((i+8-len(b))*8)
}
return math.Float64frombits(u)
}
// DecodeScalars decodes a []byte produced by EncodeScalars.
func DecodeScalars(b []byte) (scalars []interface{}, err error) {
b0 := b
for len(b) != 0 {
switch tag := b[0]; tag {
//default:
//return nil, fmt.Errorf("tag %d(%#x) not supported", b[0], b[0])
case gbNull:
scalars = append(scalars, nil)
b = b[1:]
case gbFalse:
scalars = append(scalars, false)
b = b[1:]
case gbTrue:
scalars = append(scalars, true)
b = b[1:]
case gbFloat0:
scalars = append(scalars, 0.0)
b = b[1:]
case gbFloat1, gbFloat2, gbFloat3, gbFloat4, gbFloat5, gbFloat6, gbFloat7, gbFloat8:
n := 1 + int(tag) - gbFloat0
if len(b) < n-1 {
goto corrupted
}
scalars = append(scalars, decodeFloat(b[1:n]))
b = b[n:]
case gbComplex0, gbComplex1, gbComplex2, gbComplex3, gbComplex4, gbComplex5, gbComplex6, gbComplex7, gbComplex8:
n := 1 + int(tag) - gbComplex0
if len(b) < n-1 {
goto corrupted
}
re := decodeFloat(b[1:n])
b = b[n:]
if len(b) == 0 {
goto corrupted
}
tag = b[0]
if tag < gbComplex0 || tag > gbComplex8 {
goto corrupted
}
n = 1 + int(tag) - gbComplex0
if len(b) < n-1 {
goto corrupted
}
scalars = append(scalars, complex(re, decodeFloat(b[1:n])))
b = b[n:]
case gbBytes00, gbBytes01, gbBytes02, gbBytes03, gbBytes04,
gbBytes05, gbBytes06, gbBytes07, gbBytes08, gbBytes09,
gbBytes10, gbBytes11, gbBytes12, gbBytes13, gbBytes14,
gbBytes15, gbBytes16, gbBytes17:
n := int(tag - gbBytes00)
if len(b) < n+1 {
goto corrupted
}
scalars = append(scalars, append([]byte(nil), b[1:n+1]...))
b = b[n+1:]
case gbBytes1:
if len(b) < 2 {
goto corrupted
}
n := int(b[1])
b = b[2:]
if len(b) < n {
goto corrupted
}
scalars = append(scalars, append([]byte(nil), b[:n]...))
b = b[n:]
case gbBytes2:
if len(b) < 3 {
goto corrupted
}
n := int(b[1])<<8 | int(b[2]) + 1
b = b[3:]
if len(b) < n {
goto corrupted
}
scalars = append(scalars, append([]byte(nil), b[:n]...))
b = b[n:]
case gbString00, gbString01, gbString02, gbString03, gbString04,
gbString05, gbString06, gbString07, gbString08, gbString09,
gbString10, gbString11, gbString12, gbString13, gbString14,
gbString15, gbString16, gbString17:
n := int(tag - gbString00)
if len(b) < n+1 {
goto corrupted
}
scalars = append(scalars, string(b[1:n+1]))
b = b[n+1:]
case gbString1:
if len(b) < 2 {
goto corrupted
}
n := int(b[1])
b = b[2:]
if len(b) < n {
goto corrupted
}
scalars = append(scalars, string(b[:n]))
b = b[n:]
case gbString2:
if len(b) < 3 {
goto corrupted
}
n := int(b[1])<<8 | int(b[2])
b = b[3:]
if len(b) < n {
goto corrupted
}
scalars = append(scalars, string(b[:n]))
b = b[n:]
case gbUintP1, gbUintP2, gbUintP3, gbUintP4, gbUintP5, gbUintP6, gbUintP7, gbUintP8:
b = b[1:]
n := 1 + int(tag) - gbUintP1
if len(b) < n {
goto corrupted
}
var u uint64
for _, v := range b[:n] {
u = u<<8 | uint64(v)
}
scalars = append(scalars, u)
b = b[n:]
case gbIntM8, gbIntM7, gbIntM6, gbIntM5, gbIntM4, gbIntM3, gbIntM2, gbIntM1:
b = b[1:]
n := 8 - (int(tag) - gbIntM8)
if len(b) < n {
goto corrupted
}
u := uint64(math.MaxUint64)
for _, v := range b[:n] {
u = u<<8 | uint64(v)
}
scalars = append(scalars, int64(u))
b = b[n:]
case gbIntP1, gbIntP2, gbIntP3, gbIntP4, gbIntP5, gbIntP6, gbIntP7, gbIntP8:
b = b[1:]
n := 1 + int(tag) - gbIntP1
if len(b) < n {
goto corrupted
}
i := int64(0)
for _, v := range b[:n] {
i = i<<8 | int64(v)
}
scalars = append(scalars, i)
b = b[n:]
default:
scalars = append(scalars, int64(b[0])-gbInt0)
b = b[1:]
}
}
return append([]interface{}(nil), scalars...), nil
corrupted:
return nil, &ErrDecodeScalars{append([]byte(nil), b0...), len(b0) - len(b)}
}
func collateComplex(x, y complex128) int {
switch rx, ry := real(x), real(y); {
case rx < ry:
return -1
case rx == ry:
switch ix, iy := imag(x), imag(y); {
case ix < iy:
return -1
case ix == iy:
return 0
case ix > iy:
return 1
}
}
//case rx > ry:
return 1
}
func collateFloat(x, y float64) int {
switch {
case x < y:
return -1
case x == y:
return 0
}
//case x > y:
return 1
}
func collateInt(x, y int64) int {
switch {
case x < y:
return -1
case x == y:
return 0
}
//case x > y:
return 1
}
func collateUint(x, y uint64) int {
switch {
case x < y:
return -1
case x == y:
return 0
}
//case x > y:
return 1
}
func collateIntUint(x int64, y uint64) int {
if y > math.MaxInt64 {
return -1
}
return collateInt(x, int64(y))
}
func collateUintInt(x uint64, y int64) int {
return -collateIntUint(y, x)
}
func collateType(i interface{}) (r interface{}, err error) {
switch x := i.(type) {
default:
return nil, fmt.Errorf("invalid collate type %T", x)
case nil:
return i, nil
case bool:
return i, nil
case int8:
return int64(x), nil
case int16:
return int64(x), nil
case int32:
return int64(x), nil
case int64:
return i, nil
case int:
return int64(x), nil
case uint8:
return uint64(x), nil
case uint16:
return uint64(x), nil
case uint32:
return uint64(x), nil
case uint64:
return i, nil
case uint:
return uint64(x), nil
case float32:
return float64(x), nil
case float64:
return i, nil
case complex64:
return complex128(x), nil
case complex128:
return i, nil
case []byte:
return i, nil
case string:
return i, nil
}
}
// Collate collates two arrays of Go predeclared scalar types (and the typeless
// nil or []byte). If any other type appears in x or y, Collate will return a
// non nil error. String items are collated using strCollate or lexically
// byte-wise (as when using Go comparison operators) when strCollate is nil.
// []byte items are collated using bytes.Compare.
//
// Collate returns:
//
// -1 if x < y
// 0 if x == y
// +1 if x > y
//
// The same value as defined above must be returned from strCollate.
//
// The "outer" ordering is: nil, bool, number, []byte, string. IOW, nil is
// "smaller" than anything else except other nil, numbers collate before
// []byte, []byte collate before strings, etc.
//
// Integers and real numbers collate as expected in math. However, complex
// numbers are not ordered in Go. Here the ordering is defined: Complex numbers
// are in comparison considered first only by their real part. Iff the result
// is equality then the imaginary part is used to determine the ordering. In
// this "second order" comparing, integers and real numbers are considered as
// complex numbers with a zero imaginary part.
func Collate(x, y []interface{}, strCollate func(string, string) int) (r int, err error) {
nx, ny := len(x), len(y)
switch {
case nx == 0 && ny != 0:
return -1, nil
case nx == 0 && ny == 0:
return 0, nil
case nx != 0 && ny == 0:
return 1, nil
}
r = 1
if nx > ny {
x, y, r = y, x, -r
}
var c int
for i, xi0 := range x {
yi0 := y[i]
xi, err := collateType(xi0)
if err != nil {
return 0, err
}
yi, err := collateType(yi0)
if err != nil {
return 0, err
}
switch x := xi.(type) {
default:
panic(fmt.Errorf("internal error: %T", x))
case nil:
switch yi.(type) {
case nil:
// nop
default:
return -r, nil
}
case bool:
switch y := yi.(type) {
case nil:
return r, nil
case bool:
switch {
case !x && y:
return -r, nil
case x == y:
// nop
case x && !y:
return r, nil
}
default:
return -r, nil
}
case int64:
switch y := yi.(type) {
case nil, bool:
return r, nil
case int64:
c = collateInt(x, y)
case uint64:
c = collateIntUint(x, y)
case float64:
c = collateFloat(float64(x), y)
case complex128:
c = collateComplex(complex(float64(x), 0), y)
case []byte:
return -r, nil
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case uint64:
switch y := yi.(type) {
case nil, bool:
return r, nil
case int64:
c = collateUintInt(x, y)
case uint64:
c = collateUint(x, y)
case float64:
c = collateFloat(float64(x), y)
case complex128:
c = collateComplex(complex(float64(x), 0), y)
case []byte:
return -r, nil
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case float64:
switch y := yi.(type) {
case nil, bool:
return r, nil
case int64:
c = collateFloat(x, float64(y))
case uint64:
c = collateFloat(x, float64(y))
case float64:
c = collateFloat(x, y)
case complex128:
c = collateComplex(complex(x, 0), y)
case []byte:
return -r, nil
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case complex128:
switch y := yi.(type) {
case nil, bool:
return r, nil
case int64:
c = collateComplex(x, complex(float64(y), 0))
case uint64:
c = collateComplex(x, complex(float64(y), 0))
case float64:
c = collateComplex(x, complex(y, 0))
case complex128:
c = collateComplex(x, y)
case []byte:
return -r, nil
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case []byte:
switch y := yi.(type) {
case nil, bool, int64, uint64, float64, complex128:
return r, nil
case []byte:
c = bytes.Compare(x, y)
case string:
return -r, nil
}
if c != 0 {
return c * r, nil
}
case string:
switch y := yi.(type) {
case nil, bool, int64, uint64, float64, complex128:
return r, nil
case []byte:
return r, nil
case string:
switch {
case strCollate != nil:
c = strCollate(x, y)
case x < y:
return -r, nil
case x == y:
c = 0
case x > y:
return r, nil
}
}
if c != 0 {
return c * r, nil
}
}
}
if nx == ny {
return 0, nil
}
return -r, nil
}

View File

@@ -1,364 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Utilities to encode/decode and collate Go predeclared scalar types. The
// encoding format reused the one used by the "encoding/gob" package.
package lldb
import (
"bytes"
"math"
"testing"
)
const s256 = "" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef" +
"0123456789abcdef"
func TestEncodeDecodeScalars(t *testing.T) {
table := []struct{ v, exp interface{} }{
{nil, "00"},
{false, "01"},
{true, "02"},
{math.Float64frombits(0), []byte{gbFloat0}},
{17., []byte{gbFloat2, 0x31, 0x40}},
{math.Float64frombits(0x4031320000000000), []byte{gbFloat3, 0x32, 0x31, 0x40}},
{math.Float64frombits(0x4031323300000000), []byte{gbFloat4, 0x33, 0x32, 0x31, 0x40}},
{math.Float64frombits(0x4031323334000000), []byte{gbFloat5, 0x34, 0x33, 0x32, 0x31, 0x40}},
{math.Float64frombits(0x4031323334350000), []byte{gbFloat6, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
{math.Float64frombits(0x4031323334353600), []byte{gbFloat7, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
{math.Float64frombits(0x4031323334353637), []byte{gbFloat8, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
{0 + 0i, []byte{gbComplex0, gbComplex0}},
{17 + 17i, []byte{gbComplex2, 0x31, 0x40, gbComplex2, 0x31, 0x40}},
{complex(math.Float64frombits(0x4041420000000000), math.Float64frombits(0x4031320000000000)), []byte{gbComplex3, 0x42, 0x41, 0x40, gbComplex3, 0x32, 0x31, 0x40}},
{complex(math.Float64frombits(0x4041424300000000), math.Float64frombits(0x4031323300000000)), []byte{gbComplex4, 0x43, 0x42, 0x41, 0x40, gbComplex4, 0x33, 0x32, 0x31, 0x40}},
{complex(math.Float64frombits(0x4041424344000000), math.Float64frombits(0x4031323334000000)), []byte{gbComplex5, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex5, 0x34, 0x33, 0x32, 0x31, 0x40}},
{complex(math.Float64frombits(0x4041424344450000), math.Float64frombits(0x4031323334350000)), []byte{gbComplex6, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex6, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
{complex(math.Float64frombits(0x4041424344454600), math.Float64frombits(0x4031323334353600)), []byte{gbComplex7, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex7, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
{complex(math.Float64frombits(0x4041424344454647), math.Float64frombits(0x4031323334353637)), []byte{gbComplex8, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, gbComplex8, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x40}},
{[]byte(""), []byte{gbBytes00}},
{[]byte("f"), []byte{gbBytes01, 'f'}},
{[]byte("fo"), []byte{gbBytes02, 'f', 'o'}},
{[]byte("0123456789abcdefx"), []byte{gbBytes17, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x'}},
{[]byte("0123456789abcdefxy"), []byte{gbBytes1, 18, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x', 'y'}},
{[]byte(s256[:255]), append([]byte{gbBytes1, 0xff}, []byte(s256[:255])...)},
{[]byte(s256), append([]byte{gbBytes2, 0x00, 0xff}, []byte(s256)...)},
{"", []byte{gbString00}},
{"f", []byte{gbString01, 'f'}},
{"fo", []byte{gbString02, 'f', 'o'}},
{"0123456789abcdefx", []byte{gbString17, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x'}},
{"0123456789abcdefxy", []byte{gbString1, 18, '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'x', 'y'}},
{s256[:255], append([]byte{gbString1, 0xff}, []byte(s256[:255])...)},
{s256, append([]byte{gbString2, 0x01, 0x00}, []byte(s256)...)},
{uint64(0xff), []byte{gbUintP1, 255}},
{uint64(0xffff), []byte{gbUintP2, 255, 255}},
{uint64(0xffffff), []byte{gbUintP3, 255, 255, 255}},
{uint64(0xffffffff), []byte{gbUintP4, 255, 255, 255, 255}},
{uint64(0xffffffffff), []byte{gbUintP5, 255, 255, 255, 255, 255}},
{uint64(0xffffffffffff), []byte{gbUintP6, 255, 255, 255, 255, 255, 255}},
{uint64(0xffffffffffffff), []byte{gbUintP7, 255, 255, 255, 255, 255, 255, 255}},
{uint64(0xffffffffffffffff), []byte{gbUintP8, 255, 255, 255, 255, 255, 255, 255, 255}},
{int64(math.MinInt64), []byte{gbIntM8, 128, 0, 0, 0, 0, 0, 0, 0}},
{-int64(0x100000000000000), []byte{gbIntM7, 0, 0, 0, 0, 0, 0, 0}},
{-int64(0x1000000000000), []byte{gbIntM6, 0, 0, 0, 0, 0, 0}},
{-int64(0x10000000000), []byte{gbIntM5, 0, 0, 0, 0, 0}},
{-int64(0x100000000), []byte{gbIntM4, 0, 0, 0, 0}},
{-int64(0x1000000), []byte{gbIntM3, 0, 0, 0}},
{-int64(0x10000), []byte{gbIntM2, 0, 0}},
{-int64(0x100), []byte{gbIntM1, 0}},
{-int64(0xff), []byte{gbIntM1, 1}},
{-int64(1), []byte{gbIntM1, 255}},
{int64(gbIntMax + 1), []byte{gbIntP1, gbIntMax + 1}},
{int64(0xff), []byte{gbIntP1, 255}},
{int64(0xffff), []byte{gbIntP2, 255, 255}},
{int64(0xffffff), []byte{gbIntP3, 255, 255, 255}},
{int64(0xffffffff), []byte{gbIntP4, 255, 255, 255, 255}},
{int64(0xffffffffff), []byte{gbIntP5, 255, 255, 255, 255, 255}},
{int64(0xffffffffffff), []byte{gbIntP6, 255, 255, 255, 255, 255, 255}},
{int64(0xffffffffffffff), []byte{gbIntP7, 255, 255, 255, 255, 255, 255, 255}},
{int64(0x7fffffffffffffff), []byte{gbIntP8, 127, 255, 255, 255, 255, 255, 255, 255}},
{int64(0), []byte{0 + gbInt0}},
{int64(1), []byte{1 + gbInt0}},
{int64(2), []byte{2 + gbInt0}},
{int64(gbIntMax - 2), "fd"},
{int64(gbIntMax - 1), "fe"},
{int64(gbIntMax), "ff"},
}
for i, v := range table {
g, err := EncodeScalars(v.v)
if err != nil {
t.Fatal(i, err)
}
var e []byte
switch x := v.exp.(type) {
case string:
e = s2b(x)
case []byte:
e = x
}
if !bytes.Equal(g, e) {
t.Fatalf("%d %v\n|% 02x|\n|% 02x|", i, v.v, g, e)
}
t.Logf("%#v |% 02x|", v.v, g)
dec, err := DecodeScalars(g)
if err != nil {
t.Fatal(err)
}
if g, e := len(dec), 1; g != e {
t.Fatalf("%d %d %#v", g, e, dec)
}
if g, ok := dec[0].([]byte); ok {
if e := v.v.([]byte); !bytes.Equal(g, e) {
t.Fatal(g, e)
}
continue
}
if g, e := dec[0], v.v; g != e {
t.Fatal(g, e)
}
}
}
func strcmp(a, b string) (r int) {
if a < b {
return -1
}
if a == b {
return 0
}
return 1
}
func TestCollateScalars(t *testing.T) {
// all cases must return -1
table := []struct{ x, y []interface{} }{
{[]interface{}{}, []interface{}{1}},
{[]interface{}{1}, []interface{}{2}},
{[]interface{}{1, 2}, []interface{}{2, 3}},
{[]interface{}{nil}, []interface{}{nil, true}},
{[]interface{}{nil}, []interface{}{false}},
{[]interface{}{nil}, []interface{}{nil, 1}},
{[]interface{}{nil}, []interface{}{1}},
{[]interface{}{nil}, []interface{}{nil, uint(1)}},
{[]interface{}{nil}, []interface{}{uint(1)}},
{[]interface{}{nil}, []interface{}{nil, 3.14}},
{[]interface{}{nil}, []interface{}{3.14}},
{[]interface{}{nil}, []interface{}{nil, 3.14 + 1i}},
{[]interface{}{nil}, []interface{}{3.14 + 1i}},
{[]interface{}{nil}, []interface{}{nil, []byte("foo")}},
{[]interface{}{nil}, []interface{}{[]byte("foo")}},
{[]interface{}{nil}, []interface{}{nil, "foo"}},
{[]interface{}{nil}, []interface{}{"foo"}},
{[]interface{}{false}, []interface{}{false, false}},
{[]interface{}{false}, []interface{}{false, true}},
{[]interface{}{false}, []interface{}{true}},
{[]interface{}{false}, []interface{}{false, 1}},
{[]interface{}{false}, []interface{}{1}},
{[]interface{}{false}, []interface{}{false, uint(1)}},
{[]interface{}{false}, []interface{}{uint(1)}},
{[]interface{}{false}, []interface{}{false, 1.5}},
{[]interface{}{false}, []interface{}{1.5}},
{[]interface{}{false}, []interface{}{false, 1.5 + 3i}},
{[]interface{}{false}, []interface{}{1.5 + 3i}},
{[]interface{}{false}, []interface{}{false, []byte("foo")}},
{[]interface{}{false}, []interface{}{[]byte("foo")}},
{[]interface{}{false}, []interface{}{false, "foo"}},
{[]interface{}{false}, []interface{}{"foo"}},
{[]interface{}{1}, []interface{}{1, 2}},
{[]interface{}{1}, []interface{}{1, 1}},
{[]interface{}{1}, []interface{}{1, uint(2)}},
{[]interface{}{1}, []interface{}{uint(2)}},
{[]interface{}{1}, []interface{}{1, 1.1}},
{[]interface{}{1}, []interface{}{1.1}},
{[]interface{}{1}, []interface{}{1, 1.1 + 2i}},
{[]interface{}{1}, []interface{}{1.1 + 2i}},
{[]interface{}{1}, []interface{}{1, []byte("foo")}},
{[]interface{}{1}, []interface{}{[]byte("foo")}},
{[]interface{}{1}, []interface{}{1, "foo"}},
{[]interface{}{1}, []interface{}{"foo"}},
{[]interface{}{uint(1)}, []interface{}{uint(1), uint(1)}},
{[]interface{}{uint(1)}, []interface{}{uint(2)}},
{[]interface{}{uint(1)}, []interface{}{uint(1), 2.}},
{[]interface{}{uint(1)}, []interface{}{2.}},
{[]interface{}{uint(1)}, []interface{}{uint(1), 2. + 0i}},
{[]interface{}{uint(1)}, []interface{}{2. + 0i}},
{[]interface{}{uint(1)}, []interface{}{uint(1), []byte("foo")}},
{[]interface{}{uint(1)}, []interface{}{[]byte("foo")}},
{[]interface{}{uint(1)}, []interface{}{uint(1), "foo"}},
{[]interface{}{uint(1)}, []interface{}{"foo"}},
{[]interface{}{1.}, []interface{}{1., 1}},
{[]interface{}{1.}, []interface{}{2}},
{[]interface{}{1.}, []interface{}{1., uint(1)}},
{[]interface{}{1.}, []interface{}{uint(2)}},
{[]interface{}{1.}, []interface{}{1., 1.}},
{[]interface{}{1.}, []interface{}{1.1}},
{[]interface{}{1.}, []interface{}{1., []byte("foo")}},
{[]interface{}{1.}, []interface{}{[]byte("foo")}},
{[]interface{}{1.}, []interface{}{1., "foo"}},
{[]interface{}{1.}, []interface{}{"foo"}},
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, 1}},
{[]interface{}{1 + 2i}, []interface{}{2}},
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, uint(1)}},
{[]interface{}{1 + 2i}, []interface{}{uint(2)}},
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, 1.1}},
{[]interface{}{1 + 2i}, []interface{}{1.1}},
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, []byte("foo")}},
{[]interface{}{1 + 2i}, []interface{}{[]byte("foo")}},
{[]interface{}{1 + 2i}, []interface{}{1 + 2i, "foo"}},
{[]interface{}{1 + 2i}, []interface{}{"foo"}},
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("bar"), []byte("bar")}},
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("foo")}},
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("c")}},
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("bas")}},
{[]interface{}{[]byte("bar")}, []interface{}{[]byte("bara")}},
{[]interface{}{[]byte("bar")}, []interface{}{"bap"}},
{[]interface{}{[]byte("bar")}, []interface{}{"bar"}},
{[]interface{}{[]byte("bar")}, []interface{}{"bas"}},
{[]interface{}{"bar"}, []interface{}{"bar", "bar"}},
{[]interface{}{"bar"}, []interface{}{"foo"}},
{[]interface{}{"bar"}, []interface{}{"c"}},
{[]interface{}{"bar"}, []interface{}{"bas"}},
{[]interface{}{"bar"}, []interface{}{"bara"}},
{[]interface{}{1 + 2i}, []interface{}{1 + 3i}},
{[]interface{}{int64(math.MaxInt64)}, []interface{}{uint64(math.MaxInt64 + 1)}},
{[]interface{}{int8(1)}, []interface{}{int16(2)}},
{[]interface{}{int32(1)}, []interface{}{uint8(2)}},
{[]interface{}{uint16(1)}, []interface{}{uint32(2)}},
{[]interface{}{float32(1)}, []interface{}{complex(float32(2), 0)}},
// resolved bugs
{[]interface{}{"Customer"}, []interface{}{"Date"}},
{[]interface{}{"Customer"}, []interface{}{"Items", 1, "Quantity"}},
}
more := []interface{}{42, nil, 1, uint(2), 3.0, 4 + 5i, "..."}
collate := func(x, y []interface{}, strCollate func(string, string) int) (r int) {
var err error
r, err = Collate(x, y, strCollate)
if err != nil {
t.Fatal(err)
}
return
}
for _, scf := range []func(string, string) int{nil, strcmp} {
for _, prefix := range more {
for i, test := range table {
var x, y []interface{}
if prefix != 42 {
x = append(x, prefix)
y = append(y, prefix)
}
x = append(x, test.x...)
y = append(y, test.y...)
// cmp(x, y) == -1
if g, e := collate(x, y, scf), -1; g != e {
t.Fatal(i, g, e, x, y)
}
// cmp(y, x) == 1
if g, e := collate(y, x, scf), 1; g != e {
t.Fatal(i, g, e, y, x)
}
src := x
for ix := len(src) - 1; ix > 0; ix-- {
if g, e := collate(src[:ix], src[:ix], scf), 0; g != e {
t.Fatal(ix, g, e)
}
if g, e := collate(src[:ix], src, scf), -1; g != e {
t.Fatal(ix, g, e)
}
}
src = y
for ix := len(src) - 1; ix > 0; ix-- {
if g, e := collate(src[:ix], src[:ix], scf), 0; g != e {
t.Fatal(ix, g, e)
}
if g, e := collate(src[:ix], src, scf), -1; g != e {
t.Fatal(ix, g, e)
}
}
}
}
}
}
func TestEncodingBug(t *testing.T) {
bits := uint64(0)
for i := 0; i <= 64; i++ {
encoded, err := EncodeScalars(math.Float64frombits(bits))
if err != nil {
t.Fatal(err)
}
t.Logf("bits %016x, enc |% x|", bits, encoded)
decoded, err := DecodeScalars(encoded)
if err != nil {
t.Fatal(err)
}
if g, e := len(decoded), 1; g != e {
t.Fatal(g, e)
}
f, ok := decoded[0].(float64)
if !ok {
t.Fatal(err)
}
if g, e := math.Float64bits(f), bits; g != e {
t.Fatal(err)
}
t.Log(f)
bits >>= 1
bits |= 1 << 63
}
}

View File

@@ -1,155 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package lldb (WIP) implements a low level database engine. The database
// model used could be considered a specific implementation of some small(est)
// intersection of models listed in [1]. As a settled term is lacking, it'll be
// called here a 'Virtual memory model' (VMM).
//
// Experimental release notes
//
// This is an experimental release. Don't open a DB from two applications or
// two instances of an application - it will get corrupted (no file locking is
// implemented and this task is delegated to lldb's clients).
//
// WARNING: THE LLDB API IS SUBJECT TO CHANGE.
//
// Filers
//
// A Filer is an abstraction of storage. A Filer may be a part of some process'
// virtual address space, an OS file, a networked, remote file etc. Persistence
// of the storage is optional, opaque to VMM and it is specific to a concrete
// Filer implementation.
//
// Space management
//
// Mechanism to allocate, reallocate (resize), deallocate (and later reclaim
// the unused) contiguous parts of a Filer, called blocks. Blocks are
// identified and referred to by a handle, an int64.
//
// BTrees
//
// In addition to the VMM like services, lldb provides volatile and
// non-volatile BTrees. Keys and values of a BTree are limited in size to 64kB
// each (a bit more actually). Support for larger keys/values, if desired, can
// be built atop a BTree to certain limits.
//
// Handles vs pointers
//
// A handle is the abstracted storage counterpart of a memory address. There
// is one fundamental difference, though. Resizing a block never results in a
// change to the handle which refers to the resized block, so a handle is more
// akin to an unique numeric id/key. Yet it shares one property of pointers -
// handles can be associated again with blocks after the original handle block
// was deallocated. In other words, a handle uniqueness domain is the state of
// the database and is not something comparable to e.g. an ever growing
// numbering sequence.
//
// Also, as with memory pointers, dangling handles can be created and blocks
// overwritten when such handles are used. Using a zero handle to refer to a
// block will not panic; however, the resulting error is effectively the same
// exceptional situation as dereferencing a nil pointer.
//
// Blocks
//
// Allocated/used blocks, are limited in size to only a little bit more than
// 64kB. Bigger semantic entities/structures must be built in lldb's client
// code. The content of a block has no semantics attached, it's only a fully
// opaque `[]byte`.
//
// Scalars
//
// Use of "scalars" applies to EncodeScalars, DecodeScalars and Collate. Those
// first two "to bytes" and "from bytes" functions are suggested for handling
// multi-valued Allocator content items and/or keys/values of BTrees (using
// Collate for keys). Types called "scalar" are:
//
// nil (the typeless one)
// bool
// all integral types: [u]int8, [u]int16, [u]int32, [u]int, [u]int64
// all floating point types: float32, float64
// all complex types: complex64, complex128
// []byte (64kB max)
// string (64kb max)
//
// Specific implementations
//
// Included are concrete implementations of some of the VMM interfaces included
// to ease serving simple client code or for testing and possibly as an
// example. More details in the documentation of such implementations.
//
// [1]: http://en.wikipedia.org/wiki/Database_model
package lldb
const (
fltSz = 0x70 // size of the FLT
maxShort = 251
maxRq = 65787
maxFLTRq = 4112
maxHandle = 1<<56 - 1
atomLen = 16
tagUsedLong = 0xfc
tagUsedRelocated = 0xfd
tagFreeShort = 0xfe
tagFreeLong = 0xff
tagNotCompressed = 0
tagCompressed = 1
)
// Content size n -> blocksize in atoms.
func n2atoms(n int) int {
if n > maxShort {
n += 2
}
return (n+1)/16 + 1
}
// Content size n -> number of padding zeros.
func n2padding(n int) int {
if n > maxShort {
n += 2
}
return 15 - (n+1)&15
}
// Handle <-> offset
func h2off(h int64) int64 { return (h + 6) * 16 }
func off2h(off int64) int64 { return off/16 - 6 }
// Get a 7B int64 from b
func b2h(b []byte) (h int64) {
for _, v := range b[:7] {
h = h<<8 | int64(v)
}
return
}
// Put a 7B int64 into b
func h2b(b []byte, h int64) []byte {
for i := range b[:7] {
b[i], h = byte(h>>48), h<<8
}
return b
}
// Content length N (must be in [252, 65787]) to long used block M field.
func n2m(n int) (m int) {
return n % 0x10000
}
// Long used block M (must be in [0, 65535]) field to content length N.
func m2n(m int) (n int) {
if m <= maxShort {
m += 0x10000
}
return m
}
func bpack(a []byte) []byte {
if cap(a) > len(a) {
return append([]byte(nil), a...)
}
return a
}

View File

@@ -1,217 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lldb
import (
"bytes"
"encoding/hex"
"fmt"
"path"
"runtime"
"strings"
"testing"
)
var dbg = func(s string, va ...interface{}) {
_, fn, fl, _ := runtime.Caller(1)
fmt.Printf("%s:%d: ", path.Base(fn), fl)
fmt.Printf(s+"\n", va...)
}
func use(...interface{}) {}
func TestN2Atoms(t *testing.T) {
tab := []struct{ n, a int }{
{0, 1},
{1, 1},
{2, 1},
{3, 1},
{4, 1},
{5, 1},
{6, 1},
{7, 1},
{8, 1},
{9, 1},
{10, 1},
{11, 1},
{12, 1},
{13, 1},
{14, 1},
{15, 2},
{16, 2},
{17, 2},
{18, 2},
{19, 2},
{20, 2},
{21, 2},
{22, 2},
{23, 2},
{24, 2},
{25, 2},
{26, 2},
{27, 2},
{28, 2},
{29, 2},
{30, 2},
{31, 3},
{252, 16},
{253, 17},
{254, 17},
{255, 17},
{256, 17},
{257, 17},
{258, 17},
{259, 17},
{260, 17},
{261, 17},
{262, 17},
{263, 17},
{264, 17},
{265, 17},
{266, 17},
{267, 17},
{268, 17},
{269, 18},
{65532, 4096},
{65533, 4097},
{65787, 4112},
}
for i, test := range tab {
if g, e := n2atoms(test.n), test.a; g != e {
t.Errorf("(%d) %d %d %d", i, test.n, g, e)
}
}
}
func TestN2Padding(t *testing.T) {
tab := []struct{ n, p int }{
{0, 14},
{1, 13},
{2, 12},
{3, 11},
{4, 10},
{5, 9},
{6, 8},
{7, 7},
{8, 6},
{9, 5},
{10, 4},
{11, 3},
{12, 2},
{13, 1},
{14, 0},
{15, 15},
{16, 14},
{17, 13},
{18, 12},
{19, 11},
{20, 10},
{21, 9},
{22, 8},
{23, 7},
{24, 6},
{25, 5},
{26, 4},
{27, 3},
{28, 2},
{29, 1},
{30, 0},
{31, 15},
{252, 0},
{253, 15},
{254, 14},
{255, 13},
{256, 12},
{257, 11},
{258, 10},
{259, 9},
{260, 8},
{261, 7},
{262, 6},
{263, 5},
{264, 4},
{265, 3},
{266, 2},
{267, 1},
{268, 0},
{269, 15},
}
for i, test := range tab {
if g, e := n2padding(test.n), test.p; g != e {
t.Errorf("(%d) %d %d %d", i, test.n, g, e)
}
}
}
func TestH2Off(t *testing.T) {
tab := []struct{ h, off int64 }{
{-1, fltSz - 32},
{0, fltSz - 16},
{1, fltSz + 0},
{2, fltSz + 16},
{3, fltSz + 32},
}
for i, test := range tab {
if g, e := h2off(test.h), test.off; g != e {
t.Error("h2off", i, g, e)
}
if g, e := off2h(test.off), test.h; g != e {
t.Error("off2h", i, g, e)
}
}
}
func TestB2H(t *testing.T) {
tab := []struct {
b []byte
h int64
}{
{[]byte{0, 0, 0, 0, 0, 0, 0}, 0},
{[]byte{0, 0, 0, 0, 0, 0, 1}, 1},
{[]byte{0, 0, 0, 0, 0, 0, 1, 2}, 1},
{[]byte{0, 0, 0, 0, 0, 0x32, 0x10}, 0x3210},
{[]byte{0, 0, 0, 0, 0x54, 0x32, 0x10}, 0x543210},
{[]byte{0, 0, 0, 0x76, 0x54, 0x32, 0x10}, 0x76543210},
{[]byte{0, 0, 0x98, 0x76, 0x54, 0x32, 0x10}, 0x9876543210},
{[]byte{0, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}, 0xba9876543210},
{[]byte{0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10}, 0xdcba9876543210},
}
for i, test := range tab {
if g, e := b2h(test.b), test.h; g != e {
t.Errorf("b2h: %d %#8x %#8x", i, g, e)
}
var g [7]byte
h2b(g[:], test.h)
if e := test.b; !bytes.Equal(g[:], e[:7]) {
t.Errorf("b2h: %d g: % 0x e: % 0x", i, g, e)
}
}
}
func s2b(s string) []byte {
if s == "" {
return nil
}
s = strings.Replace(s, " ", "", -1)
if n := len(s) & 1; n != 0 {
panic(n)
}
b, err := hex.DecodeString(s)
if err != nil {
panic(err)
}
return b
}

View File

@@ -1,344 +0,0 @@
// Copyright 2014 The lldb Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// A memory-only implementation of Filer.
/*
pgBits: 8
BenchmarkMemFilerWrSeq 100000 19430 ns/op 1646.93 MB/s
BenchmarkMemFilerRdSeq 100000 17390 ns/op 1840.13 MB/s
BenchmarkMemFilerWrRand 1000000 1903 ns/op 133.94 MB/s
BenchmarkMemFilerRdRand 1000000 1153 ns/op 221.16 MB/s
pgBits: 9
BenchmarkMemFilerWrSeq 100000 16195 ns/op 1975.80 MB/s
BenchmarkMemFilerRdSeq 200000 13011 ns/op 2459.39 MB/s
BenchmarkMemFilerWrRand 1000000 2248 ns/op 227.28 MB/s
BenchmarkMemFilerRdRand 1000000 1177 ns/op 433.94 MB/s
pgBits: 10
BenchmarkMemFilerWrSeq 100000 16169 ns/op 1979.04 MB/s
BenchmarkMemFilerRdSeq 200000 12673 ns/op 2524.91 MB/s
BenchmarkMemFilerWrRand 1000000 5550 ns/op 184.30 MB/s
BenchmarkMemFilerRdRand 1000000 1699 ns/op 601.79 MB/s
pgBits: 11
BenchmarkMemFilerWrSeq 100000 13449 ns/op 2379.31 MB/s
BenchmarkMemFilerRdSeq 200000 12058 ns/op 2653.80 MB/s
BenchmarkMemFilerWrRand 500000 4335 ns/op 471.47 MB/s
BenchmarkMemFilerRdRand 1000000 2843 ns/op 719.47 MB/s
pgBits: 12
BenchmarkMemFilerWrSeq 200000 11976 ns/op 2672.00 MB/s
BenchmarkMemFilerRdSeq 200000 12255 ns/op 2611.06 MB/s
BenchmarkMemFilerWrRand 200000 8058 ns/op 507.14 MB/s
BenchmarkMemFilerRdRand 500000 4365 ns/op 936.15 MB/s
pgBits: 13
BenchmarkMemFilerWrSeq 200000 10852 ns/op 2948.69 MB/s
BenchmarkMemFilerRdSeq 200000 11561 ns/op 2767.77 MB/s
BenchmarkMemFilerWrRand 200000 9748 ns/op 840.15 MB/s
BenchmarkMemFilerRdRand 500000 7236 ns/op 1131.59 MB/s
pgBits: 14
BenchmarkMemFilerWrSeq 200000 10328 ns/op 3098.12 MB/s
BenchmarkMemFilerRdSeq 200000 11292 ns/op 2833.66 MB/s
BenchmarkMemFilerWrRand 100000 16768 ns/op 978.75 MB/s
BenchmarkMemFilerRdRand 200000 13033 ns/op 1258.43 MB/s
pgBits: 15
BenchmarkMemFilerWrSeq 200000 10309 ns/op 3103.93 MB/s
BenchmarkMemFilerRdSeq 200000 11126 ns/op 2876.12 MB/s
BenchmarkMemFilerWrRand 50000 31985 ns/op 1021.74 MB/s
BenchmarkMemFilerRdRand 100000 25217 ns/op 1297.65 MB/s
pgBits: 16
BenchmarkMemFilerWrSeq 200000 10324 ns/op 3099.45 MB/s
BenchmarkMemFilerRdSeq 200000 11201 ns/op 2856.80 MB/s
BenchmarkMemFilerWrRand 20000 55226 ns/op 1184.76 MB/s
BenchmarkMemFilerRdRand 50000 48316 ns/op 1355.16 MB/s
pgBits: 17
BenchmarkMemFilerWrSeq 200000 10377 ns/op 3083.53 MB/s
BenchmarkMemFilerRdSeq 200000 11018 ns/op 2904.18 MB/s
BenchmarkMemFilerWrRand 10000 143425 ns/op 913.12 MB/s
BenchmarkMemFilerRdRand 20000 95267 ns/op 1376.99 MB/s
pgBits: 18
BenchmarkMemFilerWrSeq 200000 10312 ns/op 3102.96 MB/s
BenchmarkMemFilerRdSeq 200000 11069 ns/op 2890.84 MB/s
BenchmarkMemFilerWrRand 5000 280910 ns/op 934.14 MB/s
BenchmarkMemFilerRdRand 10000 188500 ns/op 1388.17 MB/s
*/
package lldb
import (
"bytes"
"fmt"
"io"
"github.com/cznic/fileutil"
"github.com/cznic/mathutil"
)
const (
pgBits = 16
pgSize = 1 << pgBits
pgMask = pgSize - 1
)
var _ Filer = &MemFiler{} // Ensure MemFiler is a Filer.
type memFilerMap map[int64]*[pgSize]byte
// MemFiler is a memory backed Filer. It implements BeginUpdate, EndUpdate and
// Rollback as no-ops. MemFiler is not automatically persistent, but it has
// ReadFrom and WriteTo methods.
type MemFiler struct {
m memFilerMap
nest int
size int64
}
// NewMemFiler returns a new MemFiler.
func NewMemFiler() *MemFiler {
return &MemFiler{m: memFilerMap{}}
}
// BeginUpdate implements Filer.
func (f *MemFiler) BeginUpdate() error {
f.nest++
return nil
}
// Close implements Filer.
func (f *MemFiler) Close() (err error) {
if f.nest != 0 {
return &ErrPERM{(f.Name() + ":Close")}
}
return
}
// EndUpdate implements Filer.
func (f *MemFiler) EndUpdate() (err error) {
if f.nest == 0 {
return &ErrPERM{(f.Name() + ": EndUpdate")}
}
f.nest--
return
}
// Name implements Filer.
func (f *MemFiler) Name() string {
return fmt.Sprintf("%p.memfiler", f)
}
// PunchHole implements Filer.
func (f *MemFiler) PunchHole(off, size int64) (err error) {
if off < 0 {
return &ErrINVAL{f.Name() + ": PunchHole off", off}
}
if size < 0 || off+size > f.size {
return &ErrINVAL{f.Name() + ": PunchHole size", size}
}
first := off >> pgBits
if off&pgMask != 0 {
first++
}
off += size - 1
last := off >> pgBits
if off&pgMask != 0 {
last--
}
if limit := f.size >> pgBits; last > limit {
last = limit
}
for pg := first; pg <= last; pg++ {
delete(f.m, pg)
}
return
}
var zeroPage [pgSize]byte
// ReadAt implements Filer.
func (f *MemFiler) ReadAt(b []byte, off int64) (n int, err error) {
avail := f.size - off
pgI := off >> pgBits
pgO := int(off & pgMask)
rem := len(b)
if int64(rem) >= avail {
rem = int(avail)
err = io.EOF
}
for rem != 0 && avail > 0 {
pg := f.m[pgI]
if pg == nil {
pg = &zeroPage
}
nc := copy(b[:mathutil.Min(rem, pgSize)], pg[pgO:])
pgI++
pgO = 0
rem -= nc
n += nc
b = b[nc:]
}
return
}
// ReadFrom is a helper to populate MemFiler's content from r. 'n' reports the
// number of bytes read from 'r'.
func (f *MemFiler) ReadFrom(r io.Reader) (n int64, err error) {
if err = f.Truncate(0); err != nil {
return
}
var (
b [pgSize]byte
rn int
off int64
)
var rerr error
for rerr == nil {
if rn, rerr = r.Read(b[:]); rn != 0 {
f.WriteAt(b[:rn], off)
off += int64(rn)
n += int64(rn)
}
}
if !fileutil.IsEOF(rerr) {
err = rerr
}
return
}
// Rollback implements Filer.
func (f *MemFiler) Rollback() (err error) { return }
// Size implements Filer.
func (f *MemFiler) Size() (int64, error) {
return f.size, nil
}
// Sync implements Filer.
func (f *MemFiler) Sync() error {
return nil
}
// Truncate implements Filer.
func (f *MemFiler) Truncate(size int64) (err error) {
switch {
case size < 0:
return &ErrINVAL{"Truncate size", size}
case size == 0:
f.m = memFilerMap{}
f.size = 0
return
}
first := size >> pgBits
if size&pgMask != 0 {
first++
}
last := f.size >> pgBits
if f.size&pgMask != 0 {
last++
}
for ; first < last; first++ {
delete(f.m, first)
}
f.size = size
return
}
// WriteAt implements Filer.
func (f *MemFiler) WriteAt(b []byte, off int64) (n int, err error) {
pgI := off >> pgBits
pgO := int(off & pgMask)
n = len(b)
rem := n
var nc int
for rem != 0 {
if pgO == 0 && rem >= pgSize && bytes.Equal(b[:pgSize], zeroPage[:]) {
delete(f.m, pgI)
nc = pgSize
} else {
pg := f.m[pgI]
if pg == nil {
pg = new([pgSize]byte)
f.m[pgI] = pg
}
nc = copy((*pg)[pgO:], b)
}
pgI++
pgO = 0
rem -= nc
b = b[nc:]
}
f.size = mathutil.MaxInt64(f.size, off+int64(n))
return
}
// WriteTo is a helper to copy/persist MemFiler's content to w. If w is also
// an io.WriterAt then WriteTo may attempt to _not_ write any big, for some
// value of big, runs of zeros, i.e. it will attempt to punch holes, where
// possible, in `w` if that happens to be a freshly created or to zero length
// truncated OS file. 'n' reports the number of bytes written to 'w'.
func (f *MemFiler) WriteTo(w io.Writer) (n int64, err error) {
var (
b [pgSize]byte
wn, rn int
off int64
rerr error
)
if wa, ok := w.(io.WriterAt); ok {
lastPgI := f.size >> pgBits
for pgI := int64(0); pgI <= lastPgI; pgI++ {
sz := pgSize
if pgI == lastPgI {
sz = int(f.size & pgMask)
}
pg := f.m[pgI]
if pg != nil {
wn, err = wa.WriteAt(pg[:sz], off)
if err != nil {
return
}
n += int64(wn)
off += int64(sz)
if wn != sz {
return n, io.ErrShortWrite
}
}
}
return
}
var werr error
for rerr == nil {
if rn, rerr = f.ReadAt(b[:], off); rn != 0 {
off += int64(rn)
if wn, werr = w.Write(b[:rn]); werr != nil {
return n, werr
}
n += int64(wn)
}
}
if !fileutil.IsEOF(rerr) {
err = rerr
}
return
}

Some files were not shown because too many files have changed in this diff Show More