mirror of
https://github.com/containers/skopeo.git
synced 2025-06-24 13:52:27 +00:00
Vendor after merging mtrmac/image:docker-archive-auto-compression
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
parent
ae64ff7084
commit
1a259b76da
@ -1,4 +1,6 @@
|
||||
github.com/urfave/cli v1.17.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/containers/image master
|
||||
github.com/opencontainers/go-digest master
|
||||
gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
@ -10,6 +12,7 @@ github.com/davecgh/go-spew master
|
||||
github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
|
4
vendor/github.com/containers/image/README.md
generated
vendored
4
vendor/github.com/containers/image/README.md
generated
vendored
@ -65,7 +65,9 @@ the primary downside is that creating new signatures with the Golang-only implem
|
||||
- `containers_image_ostree_stub`: Instead of importing `ostree:` transport in `github.com/containers/image/transports/alltransports`, use a stub which reports that the transport is not supported. This allows building the library without requiring the `libostree` development libraries. The `github.com/containers/image/ostree` package is completely disabled
|
||||
and impossible to import when this build tag is in use.
|
||||
|
||||
## Contributing
|
||||
## [Contributing](CONTRIBUTING.md)**
|
||||
|
||||
Information about contributing to this project.
|
||||
|
||||
When developing this library, please use `make` (or `make … BUILDTAGS=…`) to take advantage of the tests and validation.
|
||||
|
||||
|
8
vendor/github.com/containers/image/copy/copy.go
generated
vendored
8
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@ -347,6 +347,9 @@ func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.Sy
|
||||
|
||||
// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
|
||||
func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
if ic.c.dest.IgnoresEmbeddedDockerReference() {
|
||||
return nil // Destination would prefer us not to update the embedded reference.
|
||||
}
|
||||
destRef := ic.c.dest.Reference().DockerReference()
|
||||
if destRef == nil {
|
||||
return nil // Destination does not care about Docker references
|
||||
@ -601,6 +604,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer s.Close()
|
||||
stream = s
|
||||
}
|
||||
|
||||
@ -670,10 +674,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Size = -1
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
|
||||
logrus.Debugf("Blob will be decompressed")
|
||||
destStream, err = decompressor(destStream)
|
||||
s, err := decompressor(destStream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer s.Close()
|
||||
destStream = s
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
} else {
|
||||
|
7
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
7
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@ -117,6 +117,13 @@ func (d *dirImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
11
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
11
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@ -280,13 +280,19 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
v2Res := &V2Results{}
|
||||
v1Res := &V1Results{}
|
||||
|
||||
// Get credentials from authfile for the underlying hostname
|
||||
username, password, err := config.GetAuthentication(sys, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
}
|
||||
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
|
||||
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
|
||||
if registry == dockerHostname {
|
||||
registry = dockerV1Hostname
|
||||
}
|
||||
|
||||
client, err := newDockerClientWithDetails(sys, registry, "", "", "", nil, "")
|
||||
client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
@ -443,6 +449,9 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
}
|
||||
authReq = authReq.WithContext(ctx)
|
||||
getParams := authReq.URL.Query()
|
||||
if c.username != "" {
|
||||
getParams.Add("account", c.username)
|
||||
}
|
||||
if service != "" {
|
||||
getParams.Add("service", service)
|
||||
}
|
||||
|
7
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
7
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@ -95,6 +95,13 @@ func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *dockerImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match.
|
||||
}
|
||||
|
||||
// sizeCounter is an io.Writer which only counts the total size of its input.
|
||||
type sizeCounter struct{ size int64 }
|
||||
|
||||
|
7
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
7
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
@ -75,6 +75,13 @@ func (d *Destination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *Destination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
102
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
102
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
@ -3,7 +3,6 @@ package tarfile
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
@ -43,8 +42,7 @@ type layerInfo struct {
|
||||
// the source of an image.
|
||||
// To do for both the NewSourceFromFile and NewSourceFromStream functions
|
||||
|
||||
// NewSourceFromFile returns a tarfile.Source for the specified path
|
||||
// NewSourceFromFile supports both conpressed and uncompressed input
|
||||
// NewSourceFromFile returns a tarfile.Source for the specified path.
|
||||
func NewSourceFromFile(path string) (*Source, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
@ -52,19 +50,24 @@ func NewSourceFromFile(path string) (*Source, error) {
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
reader, err := gzip.NewReader(file)
|
||||
// If the file is already not compressed we can just return the file itself
|
||||
// as a source. Otherwise we pass the stream to NewSourceFromStream.
|
||||
stream, isCompressed, err := compression.AutoDecompress(file)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error detecting compression for file %q", path)
|
||||
}
|
||||
defer stream.Close()
|
||||
if !isCompressed {
|
||||
return &Source{
|
||||
tarPath: path,
|
||||
}, nil
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
return NewSourceFromStream(reader)
|
||||
return NewSourceFromStream(stream)
|
||||
}
|
||||
|
||||
// NewSourceFromStream returns a tarfile.Source for the specified inputStream, which must be uncompressed.
|
||||
// The caller can close the inputStream immediately after NewSourceFromFile returns.
|
||||
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
|
||||
// which can be either compressed or uncompressed. The caller can close the
|
||||
// inputStream immediately after NewSourceFromFile returns.
|
||||
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
|
||||
// FIXME: use SystemContext here.
|
||||
// Save inputStream to a temporary file
|
||||
@ -81,8 +84,20 @@ func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
|
||||
}
|
||||
}()
|
||||
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, inputStream); err != nil {
|
||||
// In order to be compatible with docker-load, we need to support
|
||||
// auto-decompression (it's also a nice quality-of-life thing to avoid
|
||||
// giving users really confusing "invalid tar header" errors).
|
||||
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error auto-decompressing input")
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
// Copy the plain archive to the temporary file.
|
||||
//
|
||||
// TODO: This can take quite some time, and should ideally be cancellable
|
||||
// using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
|
||||
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
|
||||
}
|
||||
succeeded = true
|
||||
@ -292,7 +307,25 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
return nil, err
|
||||
}
|
||||
if li, ok := unknownLayerSizes[h.Name]; ok {
|
||||
li.size = h.Size
|
||||
// Since GetBlob will decompress layers that are compressed we need
|
||||
// to do the decompression here as well, otherwise we will
|
||||
// incorrectly report the size. Pretty critical, since tools like
|
||||
// umoci always compress layer blobs. Obviously we only bother with
|
||||
// the slower method of checking if it's compressed.
|
||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", h.Name)
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
uncompressedSize := h.Size
|
||||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading %s to find its size", h.Name)
|
||||
}
|
||||
}
|
||||
li.size = uncompressedSize
|
||||
delete(unknownLayerSizes, h.Name)
|
||||
}
|
||||
}
|
||||
@ -346,16 +379,22 @@ func (s *Source) GetManifest(ctx context.Context, instanceDigest *digest.Digest)
|
||||
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
|
||||
}
|
||||
|
||||
type readCloseWrapper struct {
|
||||
// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input.
|
||||
type uncompressedReadCloser struct {
|
||||
io.Reader
|
||||
closeFunc func() error
|
||||
underlyingCloser func() error
|
||||
uncompressedCloser func() error
|
||||
}
|
||||
|
||||
func (r readCloseWrapper) Close() error {
|
||||
if r.closeFunc != nil {
|
||||
return r.closeFunc()
|
||||
func (r uncompressedReadCloser) Close() error {
|
||||
var res error
|
||||
if err := r.uncompressedCloser(); err != nil {
|
||||
res = err
|
||||
}
|
||||
return nil
|
||||
if err := r.underlyingCloser(); err != nil && res == nil {
|
||||
res = err
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
@ -369,10 +408,16 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadClose
|
||||
}
|
||||
|
||||
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
|
||||
stream, err := s.openTarComponent(li.path)
|
||||
underlyingStream, err := s.openTarComponent(li.path)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
closeUnderlyingStream := true
|
||||
defer func() {
|
||||
if closeUnderlyingStream {
|
||||
underlyingStream.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// In order to handle the fact that digests != diffIDs (and thus that a
|
||||
// caller which is trying to verify the blob will run into problems),
|
||||
@ -386,22 +431,17 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadClose
|
||||
// be verifing a "digest" which is not the actual layer's digest (but
|
||||
// is instead the DiffID).
|
||||
|
||||
decompressFunc, reader, err := compression.DetectCompression(stream)
|
||||
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
|
||||
return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest)
|
||||
}
|
||||
|
||||
if decompressFunc != nil {
|
||||
reader, err = decompressFunc(reader)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
newStream := readCloseWrapper{
|
||||
Reader: reader,
|
||||
closeFunc: stream.Close,
|
||||
newStream := uncompressedReadCloser{
|
||||
Reader: uncompressedStream,
|
||||
underlyingCloser: underlyingStream.Close,
|
||||
uncompressedCloser: uncompressedStream.Close,
|
||||
}
|
||||
closeUnderlyingStream = false
|
||||
|
||||
return newStream, li.size, nil
|
||||
}
|
||||
|
31
vendor/github.com/containers/image/docs/registries.conf.5.md
generated
vendored
31
vendor/github.com/containers/image/docs/registries.conf.5.md
generated
vendored
@ -7,35 +7,44 @@ registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The REGISTRIES configuration file is a system-wide configuration file for container image
|
||||
registries. The file format is TOML.
|
||||
registries. The file format is TOML. The valid categories are: 'registries.search',
|
||||
'registries.insecure', and 'registries.block'.
|
||||
|
||||
# FORMAT
|
||||
The TOML_format is used to build simple list format for registries under two
|
||||
categories: `search` and `insecure`. You can list multiple registries using
|
||||
as a comma separated list.
|
||||
The TOML_format is used to build a simple list format for registries under three
|
||||
categories: `registries.search`, `registries.insecure`, and `registries.block`.
|
||||
You can list multiple registries using a comma separated list.
|
||||
|
||||
Search registries are used when the caller of a container runtime does not fully specify the
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Insecure Registries. By default container runtimes use TLS when retrieving images
|
||||
from a registry. If the registry is not setup with TLS, then the container runtime
|
||||
will fail to pull images from the registry. If you add the registry to the list of
|
||||
insecure registries then the container runtime will attempt use standard web protocols to
|
||||
pull the image. It also allows you to pull from a registry with self-signed certificates.
|
||||
Note insecure registries can be used for any registry, not just the
|
||||
registries listed under search.
|
||||
Note insecure registries can be used for any registry, not just the registries listed
|
||||
under search.
|
||||
|
||||
The following example configuration defines two searchable registries and one
|
||||
insecure registry.
|
||||
Block Registries. The registries in this category are are not pulled from when
|
||||
retrieving images.
|
||||
|
||||
# EXAMPLE
|
||||
The following example configuration defines two searchable registries, one
|
||||
insecure registry, and two blocked registries.
|
||||
|
||||
```
|
||||
[registries.search]
|
||||
registries = ["registry1.com", "registry2.com"]
|
||||
registries = ['registry1.com', 'registry2.com']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ["registry3.com"]
|
||||
registries = ['registry3.com']
|
||||
|
||||
[registries.block]
|
||||
registries = ['registry.untrusted.com', 'registry.unsafe.com']
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Aug 2017, Originally compiled by Brent Baude <bbaude@redhat.com>
|
||||
Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>
|
||||
|
29
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
29
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
@ -2,7 +2,6 @@ package image
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
@ -25,8 +24,12 @@ func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
|
||||
}
|
||||
|
||||
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
|
||||
func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest {
|
||||
return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)}
|
||||
func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) {
|
||||
m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &manifestSchema1{m: m}, nil
|
||||
}
|
||||
|
||||
func (m *manifestSchema1) serialize() ([]byte, error) {
|
||||
@ -64,7 +67,7 @@ func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, erro
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
|
||||
return m.m.LayerInfos()
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
@ -148,12 +151,12 @@ func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.Manife
|
||||
|
||||
// Based on github.com/docker/docker/distribution/pull_v2.go
|
||||
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) {
|
||||
if len(m.m.History) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
||||
if len(m.m.ExtractedV1Compatibility) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing.
|
||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
if len(m.m.History) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers))
|
||||
if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers))
|
||||
}
|
||||
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
|
||||
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
|
||||
@ -165,14 +168,10 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||
// Build a list of the diffIDs for the non-empty layers.
|
||||
diffIDs := []digest.Digest{}
|
||||
var layers []manifest.Schema2Descriptor
|
||||
for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- {
|
||||
v2Index := (len(m.m.History) - 1) - v1Index
|
||||
for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- {
|
||||
v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index
|
||||
|
||||
var v1compat manifest.Schema1V1Compatibility
|
||||
if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil {
|
||||
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
|
||||
}
|
||||
if !v1compat.ThrowAway {
|
||||
if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway {
|
||||
var size int64
|
||||
if uploadedLayerInfos != nil {
|
||||
size = uploadedLayerInfos[v2Index].Size
|
||||
|
29
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
29
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@ -18,17 +18,17 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||
// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||
// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
|
||||
// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
|
||||
// in registries, so let’s use the same values.
|
||||
var gzippedEmptyLayer = []byte{
|
||||
var GzippedEmptyLayer = []byte{
|
||||
31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
|
||||
0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
|
||||
}
|
||||
|
||||
// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
|
||||
const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||
// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer
|
||||
const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
|
||||
|
||||
type manifestSchema2 struct {
|
||||
src types.ImageSource // May be nil if configBlob is not nil
|
||||
@ -95,11 +95,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, types.BlobInfo{
|
||||
Digest: m.m.ConfigDescriptor.Digest,
|
||||
Size: m.m.ConfigDescriptor.Size,
|
||||
URLs: m.m.ConfigDescriptor.URLs,
|
||||
})
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -121,7 +117,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
|
||||
return m.m.LayerInfos()
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
@ -253,16 +249,16 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
if historyEntry.EmptyLayer {
|
||||
if !haveGzippedEmptyLayer {
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))}, false)
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
||||
}
|
||||
if info.Digest != gzippedEmptyLayerDigest {
|
||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
|
||||
if info.Digest != GzippedEmptyLayerDigest {
|
||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, GzippedEmptyLayerDigest)
|
||||
}
|
||||
haveGzippedEmptyLayer = true
|
||||
}
|
||||
blobDigest = gzippedEmptyLayerDigest
|
||||
blobDigest = GzippedEmptyLayerDigest
|
||||
} else {
|
||||
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
|
||||
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
|
||||
@ -308,7 +304,10 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
}
|
||||
history[0].V1Compatibility = string(v1Config)
|
||||
|
||||
m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
|
||||
m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
|
||||
if err != nil {
|
||||
return nil, err // This should never happen, we should have created all the components correctly.
|
||||
}
|
||||
return memoryImageFromManifest(m1), nil
|
||||
}
|
||||
|
||||
|
9
vendor/github.com/containers/image/image/manifest.go
generated
vendored
9
vendor/github.com/containers/image/image/manifest.go
generated
vendored
@ -62,3 +62,12 @@ func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src
|
||||
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
|
||||
}
|
||||
}
|
||||
|
||||
// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo.
|
||||
func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo {
|
||||
blobs := make([]types.BlobInfo, len(layers))
|
||||
for i, layer := range layers {
|
||||
blobs[i] = layer.BlobInfo
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
|
8
vendor/github.com/containers/image/image/oci.go
generated
vendored
8
vendor/github.com/containers/image/image/oci.go
generated
vendored
@ -60,11 +60,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, types.BlobInfo{
|
||||
Digest: m.m.Config.Digest,
|
||||
Size: m.m.Config.Size,
|
||||
URLs: m.m.Config.URLs,
|
||||
})
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -101,7 +97,7 @@ func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error)
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
|
||||
return m.m.LayerInfos()
|
||||
return manifestLayerInfosToBlobInfos(m.m.LayerInfos())
|
||||
}
|
||||
|
||||
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
|
||||
|
119
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
119
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
@ -25,25 +25,28 @@ type Schema1History struct {
|
||||
|
||||
// Schema1 is a manifest in docker/distribution schema 1.
|
||||
type Schema1 struct {
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag"`
|
||||
Architecture string `json:"architecture"`
|
||||
FSLayers []Schema1FSLayers `json:"fsLayers"`
|
||||
History []Schema1History `json:"history"`
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag"`
|
||||
Architecture string `json:"architecture"`
|
||||
FSLayers []Schema1FSLayers `json:"fsLayers"`
|
||||
History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility!
|
||||
ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image)
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
}
|
||||
|
||||
type schema1V1CompatibilityContainerConfig struct {
|
||||
Cmd []string
|
||||
}
|
||||
|
||||
// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1.
|
||||
type Schema1V1Compatibility struct {
|
||||
ID string `json:"id"`
|
||||
Parent string `json:"parent,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
ContainerConfig struct {
|
||||
Cmd []string
|
||||
} `json:"container_config,omitempty"`
|
||||
Author string `json:"author,omitempty"`
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
ID string `json:"id"`
|
||||
Parent string `json:"parent,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"`
|
||||
Author string `json:"author,omitempty"`
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
}
|
||||
|
||||
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
|
||||
@ -57,11 +60,8 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) {
|
||||
if s1.SchemaVersion != 1 {
|
||||
return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
|
||||
}
|
||||
if len(s1.FSLayers) != len(s1.History) {
|
||||
return nil, errors.New("length of history not equal to number of layers")
|
||||
}
|
||||
if len(s1.FSLayers) == 0 {
|
||||
return nil, errors.New("no FSLayers in manifest")
|
||||
if err := s1.initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s1.fixManifestLayers(); err != nil {
|
||||
return nil, err
|
||||
@ -70,7 +70,7 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) {
|
||||
}
|
||||
|
||||
// Schema1FromComponents creates an Schema1 manifest instance from the supplied data.
|
||||
func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 {
|
||||
func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) {
|
||||
var name, tag string
|
||||
if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
|
||||
name = reference.Path(ref)
|
||||
@ -78,7 +78,7 @@ func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, hist
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
}
|
||||
return &Schema1{
|
||||
s1 := Schema1{
|
||||
Name: name,
|
||||
Tag: tag,
|
||||
Architecture: architecture,
|
||||
@ -86,6 +86,10 @@ func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, hist
|
||||
History: history,
|
||||
SchemaVersion: 1,
|
||||
}
|
||||
if err := s1.initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s1, nil
|
||||
}
|
||||
|
||||
// Schema1Clone creates a copy of the supplied Schema1 manifest.
|
||||
@ -94,31 +98,51 @@ func Schema1Clone(src *Schema1) *Schema1 {
|
||||
return ©
|
||||
}
|
||||
|
||||
// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest.
|
||||
func (m *Schema1) initialize() error {
|
||||
if len(m.FSLayers) != len(m.History) {
|
||||
return errors.New("length of history not equal to number of layers")
|
||||
}
|
||||
if len(m.FSLayers) == 0 {
|
||||
return errors.New("no FSLayers in manifest")
|
||||
}
|
||||
m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History))
|
||||
for i, h := range m.History {
|
||||
if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil {
|
||||
return errors.Wrapf(err, "Error parsing v2s1 history entry %d", i)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
func (m *Schema1) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{}
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *Schema1) LayerInfos() []types.BlobInfo {
|
||||
layers := make([]types.BlobInfo, len(m.FSLayers))
|
||||
func (m *Schema1) LayerInfos() []LayerInfo {
|
||||
layers := make([]LayerInfo, len(m.FSLayers))
|
||||
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
|
||||
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
|
||||
layers[(len(m.FSLayers)-1)-i] = LayerInfo{
|
||||
BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1},
|
||||
EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway,
|
||||
}
|
||||
}
|
||||
return layers
|
||||
}
|
||||
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
|
||||
// Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well.
|
||||
if len(m.FSLayers) != len(layerInfos) {
|
||||
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
|
||||
}
|
||||
m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
|
||||
for i, info := range layerInfos {
|
||||
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
|
||||
// (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest,
|
||||
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
|
||||
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
|
||||
m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
|
||||
@ -144,31 +168,19 @@ func (m *Schema1) Serialize() ([]byte, error) {
|
||||
// Note that even after this succeeds, m.FSLayers may contain duplicate entries
|
||||
// (for Dockerfile operations which change the configuration but not the filesystem).
|
||||
func (m *Schema1) fixManifestLayers() error {
|
||||
type imageV1 struct {
|
||||
ID string
|
||||
Parent string
|
||||
}
|
||||
// Per the specification, we can assume that len(m.FSLayers) == len(m.History)
|
||||
imgs := make([]*imageV1, len(m.FSLayers))
|
||||
for i := range m.FSLayers {
|
||||
img := &imageV1{}
|
||||
|
||||
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgs[i] = img
|
||||
if err := validateV1ID(img.ID); err != nil {
|
||||
// m.initialize() has verified that len(m.FSLayers) == len(m.History)
|
||||
for _, compat := range m.ExtractedV1Compatibility {
|
||||
if err := validateV1ID(compat.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if imgs[len(imgs)-1].Parent != "" {
|
||||
if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" {
|
||||
return errors.New("Invalid parent ID in the base layer of the image")
|
||||
}
|
||||
// check general duplicates to error instead of a deadlock
|
||||
idmap := make(map[string]struct{})
|
||||
var lastID string
|
||||
for _, img := range imgs {
|
||||
for _, img := range m.ExtractedV1Compatibility {
|
||||
// skip IDs that appear after each other, we handle those later
|
||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||
return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
@ -177,12 +189,13 @@ func (m *Schema1) fixManifestLayers() error {
|
||||
idmap[lastID] = struct{}{}
|
||||
}
|
||||
// backwards loop so that we keep the remaining indexes after removing items
|
||||
for i := len(imgs) - 2; i >= 0; i-- {
|
||||
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
||||
for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- {
|
||||
if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue
|
||||
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
|
||||
m.History = append(m.History[:i], m.History[i+1:]...)
|
||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||
return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
||||
m.ExtractedV1Compatibility = append(m.ExtractedV1Compatibility[:i], m.ExtractedV1Compatibility[i+1:]...)
|
||||
} else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID {
|
||||
return errors.Errorf("Invalid parent ID. Expected %v, got %v", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -209,7 +222,7 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI
|
||||
DockerVersion: s1.DockerVersion,
|
||||
Architecture: s1.Architecture,
|
||||
Os: s1.OS,
|
||||
Layers: LayerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
}
|
||||
if s1.Config != nil {
|
||||
i.Labels = s1.Config.Labels
|
||||
@ -240,11 +253,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
||||
}
|
||||
// Build the history.
|
||||
convertedHistory := []Schema2History{}
|
||||
for _, h := range m.History {
|
||||
compat := Schema1V1Compatibility{}
|
||||
if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil {
|
||||
return nil, errors.Wrapf(err, "error decoding history information")
|
||||
}
|
||||
for _, compat := range m.ExtractedV1Compatibility {
|
||||
hitem := Schema2History{
|
||||
Created: compat.Created,
|
||||
CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "),
|
||||
|
27
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
27
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
@ -18,6 +18,16 @@ type Schema2Descriptor struct {
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
}
|
||||
|
||||
// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor.
|
||||
func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo {
|
||||
return types.BlobInfo{
|
||||
Digest: desc.Digest,
|
||||
Size: desc.Size,
|
||||
URLs: desc.URLs,
|
||||
MediaType: desc.MediaType,
|
||||
}
|
||||
}
|
||||
|
||||
// Schema2 is a manifest in docker/distribution schema 2.
|
||||
type Schema2 struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
@ -171,19 +181,18 @@ func Schema2Clone(src *Schema2) *Schema2 {
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
func (m *Schema2) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
|
||||
return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor)
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *Schema2) LayerInfos() []types.BlobInfo {
|
||||
blobs := []types.BlobInfo{}
|
||||
func (m *Schema2) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
for _, layer := range m.LayersDescriptors {
|
||||
blobs = append(blobs, types.BlobInfo{
|
||||
Digest: layer.Digest,
|
||||
Size: layer.Size,
|
||||
URLs: layer.URLs,
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromSchema2Descriptor(layer),
|
||||
EmptyLayer: false,
|
||||
})
|
||||
}
|
||||
return blobs
|
||||
@ -227,7 +236,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
|
||||
DockerVersion: s2.DockerVersion,
|
||||
Architecture: s2.Architecture,
|
||||
Os: s2.OS,
|
||||
Layers: LayerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
}
|
||||
if s2.Config != nil {
|
||||
i.Labels = s2.Config.Labels
|
||||
|
14
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
14
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
@ -50,10 +50,10 @@ var DefaultRequestedManifestMIMETypes = []string{
|
||||
type Manifest interface {
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
ConfigInfo() types.BlobInfo
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
LayerInfos() []types.BlobInfo
|
||||
LayerInfos() []LayerInfo
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
UpdateLayerInfos(layerInfos []types.BlobInfo) error
|
||||
|
||||
@ -73,6 +73,12 @@ type Manifest interface {
|
||||
Serialize() ([]byte, error)
|
||||
}
|
||||
|
||||
// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos.
|
||||
type LayerInfo struct {
|
||||
types.BlobInfo
|
||||
EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
|
||||
}
|
||||
|
||||
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
|
||||
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
|
||||
// but we may not have such metadata available (e.g. when the manifest is a local file).
|
||||
@ -227,9 +233,9 @@ func FromBlob(manblob []byte, mt string) (Manifest, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func LayerInfosToStrings(infos []types.BlobInfo) []string {
|
||||
func layerInfosToStrings(infos []LayerInfo) []string {
|
||||
layers := make([]string, len(infos))
|
||||
for i, info := range infos {
|
||||
layers[i] = info.Digest.String()
|
||||
|
26
vendor/github.com/containers/image/manifest/oci.go
generated
vendored
26
vendor/github.com/containers/image/manifest/oci.go
generated
vendored
@ -10,6 +10,17 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor.
|
||||
func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo {
|
||||
return types.BlobInfo{
|
||||
Digest: desc.Digest,
|
||||
Size: desc.Size,
|
||||
URLs: desc.URLs,
|
||||
Annotations: desc.Annotations,
|
||||
MediaType: desc.MediaType,
|
||||
}
|
||||
}
|
||||
|
||||
// OCI1 is a manifest.Manifest implementation for OCI images.
|
||||
// The underlying data from imgspecv1.Manifest is also available.
|
||||
type OCI1 struct {
|
||||
@ -45,16 +56,19 @@ func OCI1Clone(src *OCI1) *OCI1 {
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
func (m *OCI1) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations}
|
||||
return BlobInfoFromOCI1Descriptor(m.Config)
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *OCI1) LayerInfos() []types.BlobInfo {
|
||||
blobs := []types.BlobInfo{}
|
||||
func (m *OCI1) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
for _, layer := range m.Layers {
|
||||
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromOCI1Descriptor(layer),
|
||||
EmptyLayer: false,
|
||||
})
|
||||
}
|
||||
return blobs
|
||||
}
|
||||
@ -101,7 +115,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
||||
Labels: v1.Config.Labels,
|
||||
Architecture: v1.Architecture,
|
||||
Os: v1.OS,
|
||||
Layers: LayerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
7
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
7
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
@ -70,6 +70,13 @@ func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool {
|
||||
return d.unpackedDest.MustMatchRuntimeOS()
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.unpackedDest.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
18
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
18
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@ -18,9 +18,10 @@ import (
|
||||
)
|
||||
|
||||
type ociImageDestination struct {
|
||||
ref ociReference
|
||||
index imgspecv1.Index
|
||||
sharedBlobDir string
|
||||
ref ociReference
|
||||
index imgspecv1.Index
|
||||
sharedBlobDir string
|
||||
acceptUncompressedLayers bool
|
||||
}
|
||||
|
||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
@ -43,6 +44,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (types.Imag
|
||||
d := &ociImageDestination{ref: ref, index: *index}
|
||||
if sys != nil {
|
||||
d.sharedBlobDir = sys.OCISharedBlobDirPath
|
||||
d.acceptUncompressedLayers = sys.OCIAcceptUncompressedLayers
|
||||
}
|
||||
|
||||
if err := ensureDirectoryExists(d.ref.dir); err != nil {
|
||||
@ -81,6 +83,9 @@ func (d *ociImageDestination) SupportsSignatures(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
if d.acceptUncompressedLayers {
|
||||
return types.PreserveOriginal
|
||||
}
|
||||
return types.Compress
|
||||
}
|
||||
|
||||
@ -95,6 +100,13 @@ func (d *ociImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
12
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
12
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
@ -23,8 +23,14 @@ func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for OCI directories.
|
||||
var Transport = ociTransport{}
|
||||
var (
|
||||
// Transport is an ImageTransport for OCI directories.
|
||||
Transport = ociTransport{}
|
||||
|
||||
// ErrMoreThanOneImage is an error returned when the manifest includes
|
||||
// more than one image and the user should choose which one to use.
|
||||
ErrMoreThanOneImage = errors.New("more than one image in oci, choose an image")
|
||||
)
|
||||
|
||||
type ociTransport struct{}
|
||||
|
||||
@ -184,7 +190,7 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
|
||||
d = &index.Manifests[0]
|
||||
} else {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, errors.Wrapf(err, "more than one image in oci, choose an image")
|
||||
return imgspecv1.Descriptor{}, ErrMoreThanOneImage
|
||||
}
|
||||
} else {
|
||||
// if image specified, look through all manifests for a match
|
||||
|
7
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
7
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@ -369,6 +369,13 @@ func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.docker.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
7
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
7
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
@ -125,6 +125,13 @@ func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
|
||||
if err != nil {
|
||||
|
41
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
41
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
@ -5,28 +5,34 @@ import (
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
||||
type DecompressorFunc func(io.Reader) (io.Reader, error)
|
||||
// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!).
|
||||
type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
||||
|
||||
// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
|
||||
func GzipDecompressor(r io.Reader) (io.Reader, error) {
|
||||
func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
|
||||
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
|
||||
func Bzip2Decompressor(r io.Reader) (io.Reader, error) {
|
||||
return bzip2.NewReader(r), nil
|
||||
func Bzip2Decompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bzip2.NewReader(r)), nil
|
||||
}
|
||||
|
||||
// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
|
||||
func XzDecompressor(r io.Reader) (io.Reader, error) {
|
||||
return nil, errors.New("Decompressing xz streams is not supported")
|
||||
func XzDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
r, err := xz.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ioutil.NopCloser(r), nil
|
||||
}
|
||||
|
||||
// compressionAlgos is an internal implementation detail of DetectCompression
|
||||
@ -65,3 +71,24 @@ func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
|
||||
|
||||
return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
|
||||
}
|
||||
|
||||
// AutoDecompress takes a stream and returns an uncompressed version of the
|
||||
// same stream.
|
||||
// The caller must call Close() on the returned stream (even if the input does not need,
|
||||
// or does not even support, closing!).
|
||||
func AutoDecompress(stream io.Reader) (io.ReadCloser, bool, error) {
|
||||
decompressor, stream, err := DetectCompression(stream)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrapf(err, "Error detecting compression")
|
||||
}
|
||||
var res io.ReadCloser
|
||||
if decompressor != nil {
|
||||
res, err = decompressor(stream)
|
||||
if err != nil {
|
||||
return nil, false, errors.Wrapf(err, "Error initializing decompression")
|
||||
}
|
||||
} else {
|
||||
res = ioutil.NopCloser(stream)
|
||||
}
|
||||
return res, decompressor != nil, nil
|
||||
}
|
||||
|
35
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
35
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
@ -7,7 +7,6 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
@ -27,16 +26,12 @@ type dockerConfigFile struct {
|
||||
CredHelpers map[string]string `json:"credHelpers,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
defaultPath = "/run"
|
||||
authCfg = "containers"
|
||||
authCfgFileName = "auth.json"
|
||||
dockerCfg = ".docker"
|
||||
dockerCfgFileName = "config.json"
|
||||
dockerLegacyCfg = ".dockercfg"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json")
|
||||
xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json")
|
||||
dockerHomePath = filepath.FromSlash(".docker/config.json")
|
||||
dockerLegacyHomePath = ".dockercfg"
|
||||
|
||||
// ErrNotLoggedIn is returned for users not logged into a registry
|
||||
// that they are trying to logout of
|
||||
ErrNotLoggedIn = errors.New("not logged in")
|
||||
@ -64,7 +59,7 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
|
||||
return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil
|
||||
}
|
||||
|
||||
dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyCfg)
|
||||
dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyHomePath)
|
||||
var paths []string
|
||||
pathToAuth, err := getPathToAuth(sys)
|
||||
if err == nil {
|
||||
@ -75,7 +70,7 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
|
||||
// Logging the error as a warning instead and moving on to pulling the image
|
||||
logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err)
|
||||
}
|
||||
paths = append(paths, filepath.Join(homedir.Get(), dockerCfg, dockerCfgFileName), dockerLegacyPath)
|
||||
paths = append(paths, filepath.Join(homedir.Get(), dockerHomePath), dockerLegacyPath)
|
||||
|
||||
for _, path := range paths {
|
||||
legacyFormat := path == dockerLegacyPath
|
||||
@ -135,15 +130,15 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
|
||||
|
||||
// getPath gets the path of the auth.json file
|
||||
// The path can be overriden by the user if the overwrite-path flag is set
|
||||
// If the flag is not set and XDG_RUNTIME_DIR is ser, the auth.json file is saved in XDG_RUNTIME_DIR/containers
|
||||
// Otherwise, the auth.json file is stored in /run/user/UID/containers
|
||||
// If the flag is not set and XDG_RUNTIME_DIR is set, the auth.json file is saved in XDG_RUNTIME_DIR/containers
|
||||
// Otherwise, the auth.json file is stored in /run/containers/UID
|
||||
func getPathToAuth(sys *types.SystemContext) (string, error) {
|
||||
if sys != nil {
|
||||
if sys.AuthFilePath != "" {
|
||||
return sys.AuthFilePath, nil
|
||||
}
|
||||
if sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, defaultPath, strconv.Itoa(os.Getuid()), authCfg, authCfgFileName), nil
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -155,14 +150,12 @@ func getPathToAuth(sys *types.SystemContext) (string, error) {
|
||||
if os.IsNotExist(err) {
|
||||
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
|
||||
// or made a typo while setting the environment variable,
|
||||
// so return an error referring to $XDG_RUNTIME_DIR instead of …/authCfgFileName inside.
|
||||
// so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside.
|
||||
return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir)
|
||||
} // else ignore err and let the caller fail accessing …/authCfgFileName.
|
||||
runtimeDir = filepath.Join(runtimeDir, authCfg)
|
||||
} else {
|
||||
runtimeDir = filepath.Join(defaultPath, authCfg, strconv.Itoa(os.Getuid()))
|
||||
} // else ignore err and let the caller fail accessing xdgRuntimeDirPath.
|
||||
return filepath.Join(runtimeDir, xdgRuntimeDirPath), nil
|
||||
}
|
||||
return filepath.Join(runtimeDir, authCfgFileName), nil
|
||||
return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), nil
|
||||
}
|
||||
|
||||
// readJSONFile unmarshals the authentications stored in the auth.json file and returns it
|
||||
|
91
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
91
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
@ -50,8 +50,7 @@ type storageImageSource struct {
|
||||
}
|
||||
|
||||
type storageImageDestination struct {
|
||||
imageRef storageReference // The reference we'll use to name the image
|
||||
publicRef storageReference // The reference we return when asked about the name we'll give to the image
|
||||
imageRef storageReference
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
@ -102,6 +101,9 @@ func (s storageImageSource) Close() error {
|
||||
|
||||
// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
|
||||
if info.Digest == image.GzippedEmptyLayerDigest {
|
||||
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
}
|
||||
rc, n, _, err = s.getBlobAndLayerID(info)
|
||||
return rc, n, err
|
||||
}
|
||||
@ -175,11 +177,15 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
|
||||
// the image, after they've been decompressed.
|
||||
func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) {
|
||||
updatedBlobInfos := []types.BlobInfo{}
|
||||
_, manifestType, err := s.GetManifest(ctx, nil)
|
||||
manifestBlob, manifestType, err := s.GetManifest(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image manifest for %q", s.image.ID)
|
||||
}
|
||||
man, err := manifest.FromBlob(manifestBlob, manifestType)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing image manifest for %q", s.image.ID)
|
||||
}
|
||||
|
||||
uncompressedLayerType := ""
|
||||
switch manifestType {
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
@ -188,6 +194,8 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.Blo
|
||||
// This is actually a compressed type, but there's no uncompressed type defined
|
||||
uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType
|
||||
}
|
||||
|
||||
physicalBlobInfos := []types.BlobInfo{}
|
||||
layerID := s.image.TopLayer
|
||||
for layerID != "" {
|
||||
layer, err := s.imageRef.transport.store.Layer(layerID)
|
||||
@ -205,10 +213,43 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context) ([]types.Blo
|
||||
Size: layer.UncompressedSize,
|
||||
MediaType: uncompressedLayerType,
|
||||
}
|
||||
updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...)
|
||||
physicalBlobInfos = append([]types.BlobInfo{blobInfo}, physicalBlobInfos...)
|
||||
layerID = layer.Parent
|
||||
}
|
||||
return updatedBlobInfos, nil
|
||||
|
||||
res, err := buildLayerInfosForCopy(man.LayerInfos(), physicalBlobInfos)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating LayerInfosForCopy of image %q", s.image.ID)
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// buildLayerInfosForCopy builds a LayerInfosForCopy return value based on manifestInfos from the original manifest,
|
||||
// but using layer data which we can actually produce — physicalInfos for non-empty layers,
|
||||
// and image.GzippedEmptyLayer for empty ones.
|
||||
// (This is split basically only to allow easily unit-testing the part that has no dependencies on the external environment.)
|
||||
func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []types.BlobInfo) ([]types.BlobInfo, error) {
|
||||
nextPhysical := 0
|
||||
res := make([]types.BlobInfo, len(manifestInfos))
|
||||
for i, mi := range manifestInfos {
|
||||
if mi.EmptyLayer {
|
||||
res[i] = types.BlobInfo{
|
||||
Digest: image.GzippedEmptyLayerDigest,
|
||||
Size: int64(len(image.GzippedEmptyLayer)),
|
||||
MediaType: mi.MediaType,
|
||||
}
|
||||
} else {
|
||||
if nextPhysical >= len(physicalInfos) {
|
||||
return nil, fmt.Errorf("expected more than %d physical layers to exist", len(physicalInfos))
|
||||
}
|
||||
res[i] = physicalInfos[nextPhysical]
|
||||
nextPhysical++
|
||||
}
|
||||
}
|
||||
if nextPhysical != len(physicalInfos) {
|
||||
return nil, fmt.Errorf("used only %d out of %d physical layers", nextPhysical, len(physicalInfos))
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// GetSignatures() parses the image's signatures blob into a slice of byte slices.
|
||||
@ -243,15 +284,8 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating a temporary directory")
|
||||
}
|
||||
// Break reading of the reference we're writing, so that copy.Image() won't try to rewrite
|
||||
// schema1 image manifests to remove embedded references, since that changes the manifest's
|
||||
// digest, and that makes the image unusable if we subsequently try to access it using a
|
||||
// reference that mentions the no-longer-correct digest.
|
||||
publicRef := imageRef
|
||||
publicRef.name = nil
|
||||
image := &storageImageDestination{
|
||||
imageRef: imageRef,
|
||||
publicRef: publicRef,
|
||||
directory: directory,
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
@ -261,11 +295,10 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// Reference returns a mostly-usable image reference that can't return a DockerReference, to
|
||||
// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to
|
||||
// remove image names that they contain which don't match the value we're using.
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
func (s storageImageDestination) Reference() types.ImageReference {
|
||||
return s.publicRef
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
// Close cleans up the temporary directory.
|
||||
@ -408,12 +441,7 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
||||
case *manifest.Schema1:
|
||||
// Build a list of the diffIDs we've generated for the non-throwaway FS layers,
|
||||
// in reverse of the order in which they were originally listed.
|
||||
for i, history := range m.History {
|
||||
compat := manifest.Schema1V1Compatibility{}
|
||||
if err := json.Unmarshal([]byte(history.V1Compatibility), &compat); err != nil {
|
||||
logrus.Debugf("internal error reading schema 1 history: %v", err)
|
||||
return ""
|
||||
}
|
||||
for i, compat := range m.ExtractedV1Compatibility {
|
||||
if compat.ThrowAway {
|
||||
continue
|
||||
}
|
||||
@ -471,8 +499,11 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
layerBlobs := man.LayerInfos()
|
||||
// Extract or find the layers.
|
||||
lastLayer := ""
|
||||
addedLayers := []string{}
|
||||
for _, blob := range layerBlobs {
|
||||
if blob.EmptyLayer {
|
||||
continue
|
||||
}
|
||||
|
||||
var diff io.ReadCloser
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
@ -481,7 +512,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
||||
has, _, err := s.HasBlob(ctx, blob)
|
||||
has, _, err := s.HasBlob(ctx, blob.BlobInfo)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
|
||||
}
|
||||
@ -550,7 +581,6 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
|
||||
}
|
||||
lastLayer = layer.ID
|
||||
addedLayers = append([]string{lastLayer}, addedLayers...)
|
||||
}
|
||||
// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
|
||||
// was originally created, in case we're just copying it. If not, no harm done.
|
||||
@ -613,7 +643,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil {
|
||||
names := []string{}
|
||||
if name != nil {
|
||||
names = append(names, verboseName(name))
|
||||
names = append(names, name.String())
|
||||
}
|
||||
if len(oldNames) > 0 {
|
||||
names = append(names, oldNames...)
|
||||
@ -703,6 +733,13 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// IgnoresEmbeddedDockerReference returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
func (s *storageImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return true // Yes, we want the unmodified manifest
|
||||
}
|
||||
|
||||
// PutSignatures records the image's signatures for committing as a single data blob.
|
||||
func (s *storageImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
|
||||
sizes := []int{}
|
||||
|
139
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
139
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
@ -9,7 +9,6 @@ import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -17,55 +16,65 @@ import (
|
||||
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
|
||||
// value hex-encoded into a 64-character string, and a reference to a Store
|
||||
// where an image is, or would be, kept.
|
||||
// Either "named" or "id" must be set.
|
||||
type storageReference struct {
|
||||
transport storageTransport
|
||||
reference string
|
||||
named reference.Named // may include a tag and/or a digest
|
||||
id string
|
||||
name reference.Named
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference {
|
||||
func newReference(transport storageTransport, named reference.Named, id string) (*storageReference, error) {
|
||||
if named == nil && id == "" {
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
// We take a copy of the transport, which contains a pointer to the
|
||||
// store that it used for resolving this reference, so that the
|
||||
// transport that we'll return from Transport() won't be affected by
|
||||
// further calls to the original transport's SetStore() method.
|
||||
return &storageReference{
|
||||
transport: transport,
|
||||
reference: reference,
|
||||
named: named,
|
||||
id: id,
|
||||
name: name,
|
||||
tag: tag,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// imageMatchesRepo returns true iff image.Names contains an element with the same repo as ref
|
||||
func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
|
||||
repo := ref.Name()
|
||||
for _, name := range image.Names {
|
||||
if named, err := reference.ParseNormalizedNamed(name); err == nil {
|
||||
if named.Name() == repo {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Resolve the reference's name to an image ID in the store, if there's already
|
||||
// one present with the same name or ID, and return the image.
|
||||
func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
var loadedImage *storage.Image
|
||||
if s.id == "" {
|
||||
// Look for an image that has the expanded reference name as an explicit Name value.
|
||||
image, err := s.transport.store.Image(s.reference)
|
||||
image, err := s.transport.store.Image(s.named.String())
|
||||
if image != nil && err == nil {
|
||||
loadedImage = image
|
||||
s.id = image.ID
|
||||
}
|
||||
}
|
||||
if s.id == "" && s.name != nil && s.digest != "" {
|
||||
// Look for an image with the specified digest that has the same name,
|
||||
// though possibly with a different tag or digest, as a Name value, so
|
||||
// that the canonical reference can be implicitly resolved to the image.
|
||||
images, err := s.transport.store.ImagesByDigest(s.digest)
|
||||
if images != nil && err == nil {
|
||||
repo := reference.FamiliarName(reference.TrimNamed(s.name))
|
||||
search:
|
||||
for _, image := range images {
|
||||
for _, name := range image.Names {
|
||||
if named, err := reference.ParseNormalizedNamed(name); err == nil {
|
||||
if reference.FamiliarName(reference.TrimNamed(named)) == repo {
|
||||
s.id = image.ID
|
||||
break search
|
||||
}
|
||||
if s.id == "" && s.named != nil {
|
||||
if digested, ok := s.named.(reference.Digested); ok {
|
||||
// Look for an image with the specified digest that has the same name,
|
||||
// though possibly with a different tag or digest, as a Name value, so
|
||||
// that the canonical reference can be implicitly resolved to the image.
|
||||
images, err := s.transport.store.ImagesByDigest(digested.Digest())
|
||||
if images != nil && err == nil {
|
||||
for _, image := range images {
|
||||
if imageMatchesRepo(image, s.named) {
|
||||
loadedImage = image
|
||||
s.id = image.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -75,27 +84,20 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
}
|
||||
img, err := s.transport.store.Image(s.id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image %q", s.id)
|
||||
}
|
||||
if s.name != nil {
|
||||
repo := reference.FamiliarName(reference.TrimNamed(s.name))
|
||||
nameMatch := false
|
||||
for _, name := range img.Names {
|
||||
if named, err := reference.ParseNormalizedNamed(name); err == nil {
|
||||
if reference.FamiliarName(reference.TrimNamed(named)) == repo {
|
||||
nameMatch = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if loadedImage == nil {
|
||||
img, err := s.transport.store.Image(s.id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image %q", s.id)
|
||||
}
|
||||
if !nameMatch {
|
||||
loadedImage = img
|
||||
}
|
||||
if s.named != nil {
|
||||
if !imageMatchesRepo(loadedImage, s.named) {
|
||||
logrus.Errorf("no image matching reference %q found", s.StringWithinTransport())
|
||||
return nil, ErrNoSuchImage
|
||||
}
|
||||
}
|
||||
return img, nil
|
||||
return loadedImage, nil
|
||||
}
|
||||
|
||||
// Return a Transport object that defaults to using the same store that we used
|
||||
@ -110,20 +112,7 @@ func (s storageReference) Transport() types.ImageTransport {
|
||||
|
||||
// Return a name with a tag or digest, if we have either, else return it bare.
|
||||
func (s storageReference) DockerReference() reference.Named {
|
||||
if s.name == nil {
|
||||
return nil
|
||||
}
|
||||
if s.tag != "" {
|
||||
if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil {
|
||||
return namedTagged
|
||||
}
|
||||
}
|
||||
if s.digest != "" {
|
||||
if canonical, err := reference.WithDigest(s.name, s.digest); err == nil {
|
||||
return canonical
|
||||
}
|
||||
}
|
||||
return s.name
|
||||
return s.named
|
||||
}
|
||||
|
||||
// Return a name with a tag, prefixed with the graph root and driver name, to
|
||||
@ -135,25 +124,25 @@ func (s storageReference) StringWithinTransport() string {
|
||||
if len(options) > 0 {
|
||||
optionsList = ":" + strings.Join(options, ",")
|
||||
}
|
||||
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
|
||||
if s.reference == "" {
|
||||
return storeSpec + "@" + s.id
|
||||
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
|
||||
if s.named != nil {
|
||||
res = res + s.named.String()
|
||||
}
|
||||
if s.id == "" {
|
||||
return storeSpec + s.reference
|
||||
if s.id != "" {
|
||||
res = res + "@" + s.id
|
||||
}
|
||||
return storeSpec + s.reference + "@" + s.id
|
||||
return res
|
||||
}
|
||||
|
||||
func (s storageReference) PolicyConfigurationIdentity() string {
|
||||
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
|
||||
if s.name == nil {
|
||||
return storeSpec + "@" + s.id
|
||||
res := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
|
||||
if s.named != nil {
|
||||
res = res + s.named.String()
|
||||
}
|
||||
if s.id == "" {
|
||||
return storeSpec + s.reference
|
||||
if s.id != "" {
|
||||
res = res + "@" + s.id
|
||||
}
|
||||
return storeSpec + s.reference + "@" + s.id
|
||||
return res
|
||||
}
|
||||
|
||||
// Also accept policy that's tied to the combination of the graph root and
|
||||
@ -164,9 +153,17 @@ func (s storageReference) PolicyConfigurationNamespaces() []string {
|
||||
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]"
|
||||
driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
|
||||
namespaces := []string{}
|
||||
if s.name != nil {
|
||||
name := reference.TrimNamed(s.name)
|
||||
components := strings.Split(name.String(), "/")
|
||||
if s.named != nil {
|
||||
if s.id != "" {
|
||||
// The reference without the ID is also a valid namespace.
|
||||
namespaces = append(namespaces, storeSpec+s.named.String())
|
||||
}
|
||||
tagged, isTagged := s.named.(reference.Tagged)
|
||||
_, isDigested := s.named.(reference.Digested)
|
||||
if isTagged && isDigested { // s.named is "name:tag@digest"; add a "name:tag" parent namespace.
|
||||
namespaces = append(namespaces, storeSpec+s.named.Name()+":"+tagged.Tag())
|
||||
}
|
||||
components := strings.Split(s.named.Name(), "/")
|
||||
for len(components) > 0 {
|
||||
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
|
||||
components = components[:len(components)-1]
|
||||
|
192
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
192
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
@ -3,6 +3,7 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
@ -105,7 +106,6 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
|
||||
// ParseStoreReference takes a name or an ID, tries to figure out which it is
|
||||
// relative to the given store, and returns it in a reference object.
|
||||
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
|
||||
var name reference.Named
|
||||
if ref == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference")
|
||||
}
|
||||
@ -118,66 +118,36 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
|
||||
ref = ref[closeIndex+1:]
|
||||
}
|
||||
|
||||
// The last segment, if there's more than one, is either a digest from a reference, or an image ID.
|
||||
// The reference may end with an image ID. Image IDs and digests use the same "@" separator;
|
||||
// here we only peel away an image ID, and leave digests alone.
|
||||
split := strings.LastIndex(ref, "@")
|
||||
idOrDigest := ""
|
||||
if split != -1 {
|
||||
// Peel off that last bit so that we can work on the rest.
|
||||
idOrDigest = ref[split+1:]
|
||||
if idOrDigest == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
|
||||
}
|
||||
ref = ref[:split]
|
||||
}
|
||||
|
||||
// The middle segment (now the last segment), if there is one, is a digest.
|
||||
split = strings.LastIndex(ref, "@")
|
||||
sum := digest.Digest("")
|
||||
if split != -1 {
|
||||
sum = digest.Digest(ref[split+1:])
|
||||
if sum == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
|
||||
}
|
||||
ref = ref[:split]
|
||||
}
|
||||
|
||||
// If we have something that unambiguously should be a digest, validate it, and then the third part,
|
||||
// if we have one, as an ID.
|
||||
id := ""
|
||||
if sum != "" {
|
||||
if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest)
|
||||
if split != -1 {
|
||||
possibleID := ref[split+1:]
|
||||
if possibleID == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "empty trailing digest or ID in %q", ref)
|
||||
}
|
||||
if err := sum.Validate(); err != nil {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
|
||||
}
|
||||
id = idOrDigest
|
||||
if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) {
|
||||
// The ID is a truncated version of the ID of an image that's present in local storage,
|
||||
// so we might as well use the expanded value.
|
||||
id = img.ID
|
||||
}
|
||||
} else if idOrDigest != "" {
|
||||
// There was no middle portion, so the final portion could be either a digest or an ID.
|
||||
if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil {
|
||||
// It's an ID.
|
||||
id = idOrDigest
|
||||
} else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil {
|
||||
// It's a digest.
|
||||
sum = idSum
|
||||
} else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) {
|
||||
// It's a truncated version of the ID of an image that's present in local storage,
|
||||
// and we may need the expanded value.
|
||||
id = img.ID
|
||||
} else {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
|
||||
// If it looks like a digest, leave it alone for now.
|
||||
if _, err := digest.Parse(possibleID); err != nil {
|
||||
// Otherwise…
|
||||
if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil {
|
||||
id = possibleID // … it is a full ID
|
||||
} else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) {
|
||||
// … it is a truncated version of the ID of an image that's present in local storage,
|
||||
// so we might as well use the expanded value.
|
||||
id = img.ID
|
||||
} else {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID or digest", possibleID)
|
||||
}
|
||||
// We have recognized an image ID; peel it off.
|
||||
ref = ref[:split]
|
||||
}
|
||||
}
|
||||
|
||||
// If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's
|
||||
// If we only have one @-delimited portion, then _maybe_ it's a truncated image ID. Only check on that if it's
|
||||
// at least of what we guess is a reasonable minimum length, because we don't want a really short value
|
||||
// like "a" matching an image by ID prefix when the input was actually meant to specify an image name.
|
||||
if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" {
|
||||
if id == "" && len(ref) >= minimumTruncatedIDLength && !strings.ContainsAny(ref, "@:") {
|
||||
if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) {
|
||||
// It's a truncated version of the ID of an image that's present in local storage;
|
||||
// we need to expand it.
|
||||
@ -186,53 +156,24 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
|
||||
}
|
||||
}
|
||||
|
||||
// The initial portion is probably a name, possibly with a tag.
|
||||
var named reference.Named
|
||||
// Unless we have an un-named "ID" or "@ID" reference (where ID might only have been a prefix), which has been
|
||||
// completely parsed above, the initial portion should be a name, possibly with a tag and/or a digest..
|
||||
if ref != "" {
|
||||
var err error
|
||||
if name, err = reference.ParseNormalizedNamed(ref); err != nil {
|
||||
named, err = reference.ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error parsing named reference %q", ref)
|
||||
}
|
||||
}
|
||||
if name == nil && sum == "" && id == "" {
|
||||
return nil, errors.Errorf("error parsing reference")
|
||||
named = reference.TagNameOnly(named)
|
||||
}
|
||||
|
||||
// Construct a copy of the store spec.
|
||||
optionsList := ""
|
||||
options := store.GraphOptions()
|
||||
if len(options) > 0 {
|
||||
optionsList = ":" + strings.Join(options, ",")
|
||||
result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]"
|
||||
|
||||
// Convert the name back into a reference string, if we got a name.
|
||||
refname := ""
|
||||
tag := ""
|
||||
if name != nil {
|
||||
if sum.Validate() == nil {
|
||||
canonical, err := reference.WithDigest(name, sum)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum)
|
||||
}
|
||||
refname = verboseName(canonical)
|
||||
} else {
|
||||
name = reference.TagNameOnly(name)
|
||||
tagged, ok := name.(reference.Tagged)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("error parsing possibly-tagless name %q", ref)
|
||||
}
|
||||
refname = verboseName(name)
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
}
|
||||
if refname == "" {
|
||||
logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id)
|
||||
} else if id == "" {
|
||||
logrus.Debugf("parsed reference to refname into %q", storeSpec+refname)
|
||||
} else {
|
||||
logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id)
|
||||
}
|
||||
return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil
|
||||
logrus.Debugf("parsed reference into %q", result.StringWithinTransport())
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||
@ -252,7 +193,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||
}
|
||||
|
||||
// ParseReference takes a name and a tag or digest and/or ID
|
||||
// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"),
|
||||
// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"/"_name_:_tag_@_digest_"/"_name_:_tag_@_digest_@_id_"),
|
||||
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
|
||||
// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
|
||||
// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
|
||||
@ -338,7 +279,7 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
|
||||
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
|
||||
dref := ref.DockerReference()
|
||||
if dref != nil {
|
||||
if img, err := store.Image(verboseName(dref)); err == nil {
|
||||
if img, err := store.Image(dref.String()); err == nil {
|
||||
return img, nil
|
||||
}
|
||||
}
|
||||
@ -399,52 +340,29 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
if scope == "" {
|
||||
return nil
|
||||
}
|
||||
// But if there is anything left, it has to be a name, with or without
|
||||
// a tag, with or without an ID, since we don't return namespace values
|
||||
// that are just bare IDs.
|
||||
scopeInfo := strings.SplitN(scope, "@", 2)
|
||||
if len(scopeInfo) == 1 && scopeInfo[0] != "" {
|
||||
_, err := reference.ParseNormalizedNamed(scopeInfo[0])
|
||||
if err != nil {
|
||||
|
||||
fields := strings.SplitN(scope, "@", 3)
|
||||
switch len(fields) {
|
||||
case 1: // name only
|
||||
case 2: // name:tag@ID or name[:tag]@digest
|
||||
if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil {
|
||||
if _, digestErr := digest.Parse(fields[1]); digestErr != nil {
|
||||
return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error())
|
||||
}
|
||||
}
|
||||
case 3: // name[:tag]@digest@ID
|
||||
if _, err := digest.Parse(fields[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" {
|
||||
_, err := reference.ParseNormalizedNamed(scopeInfo[0])
|
||||
if err != nil {
|
||||
if _, err := digest.Parse("sha256:" + fields[2]); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = digest.Parse("sha256:" + scopeInfo[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return ErrInvalidReference
|
||||
default: // Coverage: This should never happen
|
||||
return errors.New("Internal error: unexpected number of fields form strings.SplitN")
|
||||
}
|
||||
// As for field[0], if it is non-empty at all:
|
||||
// FIXME? We could be verifying the various character set and length restrictions
|
||||
// from docker/distribution/reference.regexp.go, but other than that there
|
||||
// are few semantically invalid strings.
|
||||
return nil
|
||||
}
|
||||
|
||||
func verboseName(r reference.Reference) string {
|
||||
if r == nil {
|
||||
return ""
|
||||
}
|
||||
named, isNamed := r.(reference.Named)
|
||||
digested, isDigested := r.(reference.Digested)
|
||||
tagged, isTagged := r.(reference.Tagged)
|
||||
name := ""
|
||||
tag := ""
|
||||
sum := ""
|
||||
if isNamed {
|
||||
name = (reference.TrimNamed(named)).String()
|
||||
}
|
||||
if isTagged {
|
||||
if tagged.Tag() != "" {
|
||||
tag = ":" + tagged.Tag()
|
||||
}
|
||||
}
|
||||
if isDigested {
|
||||
if digested.Digest().Validate() == nil {
|
||||
sum = "@" + digested.Digest().String()
|
||||
}
|
||||
}
|
||||
return name + tag + sum
|
||||
}
|
||||
|
7
vendor/github.com/containers/image/types/types.go
generated
vendored
7
vendor/github.com/containers/image/types/types.go
generated
vendored
@ -174,6 +174,11 @@ type ImageDestination interface {
|
||||
AcceptsForeignLayerURLs() bool
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
MustMatchRuntimeOS() bool
|
||||
// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
// Does not make a difference if Reference().DockerReference() is nil.
|
||||
IgnoresEmbeddedDockerReference() bool
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
@ -359,6 +364,8 @@ type SystemContext struct {
|
||||
OCIInsecureSkipTLSVerify bool
|
||||
// If not "", use a shared directory for storing blobs rather than within OCI layouts
|
||||
OCISharedBlobDirPath string
|
||||
// Allow UnCompress image layer for OCI image layer
|
||||
OCIAcceptUncompressedLayers bool
|
||||
|
||||
// === docker.Transport overrides ===
|
||||
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||
|
3
vendor/github.com/containers/image/vendor.conf
generated
vendored
3
vendor/github.com/containers/image/vendor.conf
generated
vendored
@ -40,3 +40,6 @@ github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||
github.com/pquerna/ffjson master
|
||||
github.com/syndtr/gocapability master
|
||||
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
|
||||
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
|
3
vendor/github.com/containers/storage/README.md
generated
vendored
3
vendor/github.com/containers/storage/README.md
generated
vendored
@ -41,3 +41,6 @@ memory and stored along with the library's own bookkeeping information.
|
||||
Additionally, the library can store one or more of what it calls *big data* for
|
||||
images and containers. This is a named chunk of larger data, which is only in
|
||||
memory when it is being read from or being written to its own disk file.
|
||||
|
||||
**[Contributing](CONTRIBUTING.md)**
|
||||
Information about contributing to this project.
|
||||
|
4
vendor/github.com/containers/storage/containers.go
generated
vendored
4
vendor/github.com/containers/storage/containers.go
generated
vendored
@ -2,6 +2,7 @@ package storage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -278,7 +279,8 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
||||
names = dedupeNames(names)
|
||||
for _, name := range names {
|
||||
if _, nameInUse := r.byname[name]; nameInUse {
|
||||
return nil, ErrDuplicateName
|
||||
return nil, errors.Wrapf(ErrDuplicateName,
|
||||
fmt.Sprintf("the container name \"%s\" is already in use by \"%s\". You have to remove that container to be able to reuse that name.", name, r.byname[name].ID))
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
|
3
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
3
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
@ -1,5 +1,6 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: ./containers.go
|
||||
// source: containers.go
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
|
43
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
43
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
@ -56,47 +55,7 @@ func chownByMapsMain() {
|
||||
if err != nil {
|
||||
return fmt.Errorf("error walking to %q: %v", path, err)
|
||||
}
|
||||
sysinfo := info.Sys()
|
||||
if st, ok := sysinfo.(*syscall.Stat_t); ok {
|
||||
// Map an on-disk UID/GID pair from host to container
|
||||
// using the first map, then back to the host using the
|
||||
// second map. Skip that first step if they're 0, to
|
||||
// compensate for cases where a parent layer should
|
||||
// have had a mapped value, but didn't.
|
||||
uid, gid := int(st.Uid), int(st.Gid)
|
||||
if toContainer != nil {
|
||||
pair := idtools.IDPair{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
mappedUid, mappedGid, err := toContainer.ToContainer(pair)
|
||||
if err != nil {
|
||||
if (uid != 0) || (gid != 0) {
|
||||
return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
|
||||
}
|
||||
mappedUid, mappedGid = uid, gid
|
||||
}
|
||||
uid, gid = mappedUid, mappedGid
|
||||
}
|
||||
if toHost != nil {
|
||||
pair := idtools.IDPair{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
mappedPair, err := toHost.ToHost(pair)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
|
||||
}
|
||||
uid, gid = mappedPair.UID, mappedPair.GID
|
||||
}
|
||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||
// Make the change.
|
||||
if err := syscall.Lchown(path, uid, gid); err != nil {
|
||||
return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return platformLChown(path, info, toHost, toContainer)
|
||||
}
|
||||
if err := filepath.Walk(".", chown); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error during chown: %v", err)
|
||||
|
55
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
Normal file
55
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// +build !windows
|
||||
|
||||
package graphdriver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
)
|
||||
|
||||
func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
|
||||
sysinfo := info.Sys()
|
||||
if st, ok := sysinfo.(*syscall.Stat_t); ok {
|
||||
// Map an on-disk UID/GID pair from host to container
|
||||
// using the first map, then back to the host using the
|
||||
// second map. Skip that first step if they're 0, to
|
||||
// compensate for cases where a parent layer should
|
||||
// have had a mapped value, but didn't.
|
||||
uid, gid := int(st.Uid), int(st.Gid)
|
||||
if toContainer != nil {
|
||||
pair := idtools.IDPair{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
mappedUid, mappedGid, err := toContainer.ToContainer(pair)
|
||||
if err != nil {
|
||||
if (uid != 0) || (gid != 0) {
|
||||
return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
|
||||
}
|
||||
mappedUid, mappedGid = uid, gid
|
||||
}
|
||||
uid, gid = mappedUid, mappedGid
|
||||
}
|
||||
if toHost != nil {
|
||||
pair := idtools.IDPair{
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
mappedPair, err := toHost.ToHost(pair)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err)
|
||||
}
|
||||
uid, gid = mappedPair.UID, mappedPair.GID
|
||||
}
|
||||
if uid != int(st.Uid) || gid != int(st.Gid) {
|
||||
// Make the change.
|
||||
if err := syscall.Lchown(path, uid, gid); err != nil {
|
||||
return fmt.Errorf("%s: chown(%q): %v", os.Args[0], path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
14
vendor/github.com/containers/storage/drivers/chown_windows.go
generated
vendored
Normal file
14
vendor/github.com/containers/storage/drivers/chown_windows.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// +build windows
|
||||
|
||||
package graphdriver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
)
|
||||
|
||||
func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {
|
||||
return &os.PathError{"lchown", path, syscall.EWINDOWS}
|
||||
}
|
2
vendor/github.com/containers/storage/drivers/chroot_windows.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/chroot_windows.go
generated
vendored
@ -1,7 +1,7 @@
|
||||
package graphdriver
|
||||
|
||||
import (
|
||||
"os"
|
||||
"fmt"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
|
1
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
1
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
@ -163,6 +163,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin
|
||||
start := time.Now().UTC()
|
||||
logrus.Debug("Start untar layer")
|
||||
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
|
||||
logrus.Errorf("Error while applying layer: %s", err)
|
||||
return
|
||||
}
|
||||
logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds())
|
||||
|
85
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
85
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/locker"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/ostree"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
units "github.com/docker/go-units"
|
||||
@ -84,6 +85,9 @@ type overlayOptions struct {
|
||||
overrideKernelCheck bool
|
||||
imageStores []string
|
||||
quota quota.Quota
|
||||
mountProgram string
|
||||
ostreeRepo string
|
||||
skipMountHome bool
|
||||
}
|
||||
|
||||
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
||||
@ -98,6 +102,7 @@ type Driver struct {
|
||||
naiveDiff graphdriver.DiffDriver
|
||||
supportsDType bool
|
||||
locker *locker.Locker
|
||||
convert map[string]bool
|
||||
}
|
||||
|
||||
var (
|
||||
@ -147,15 +152,28 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID)
|
||||
if err != nil {
|
||||
os.Remove(filepath.Join(home, linkDir))
|
||||
os.Remove(home)
|
||||
return nil, errors.Wrap(err, "kernel does not support overlay fs")
|
||||
var supportsDType bool
|
||||
if opts.mountProgram != "" {
|
||||
supportsDType = true
|
||||
} else {
|
||||
supportsDType, err = supportsOverlay(home, fsMagic, rootUID, rootGID)
|
||||
if err != nil {
|
||||
os.Remove(filepath.Join(home, linkDir))
|
||||
os.Remove(home)
|
||||
return nil, errors.Wrap(err, "kernel does not support overlay fs")
|
||||
}
|
||||
}
|
||||
|
||||
if err := mount.MakePrivate(home); err != nil {
|
||||
return nil, err
|
||||
if !opts.skipMountHome {
|
||||
if err := mount.MakePrivate(home); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if opts.ostreeRepo != "" {
|
||||
if err := ostree.CreateOSTreeRepository(opts.ostreeRepo, rootUID, rootGID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
d := &Driver{
|
||||
@ -167,6 +185,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
supportsDType: supportsDType,
|
||||
locker: locker.New(),
|
||||
options: *opts,
|
||||
convert: make(map[string]bool),
|
||||
}
|
||||
|
||||
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d)
|
||||
@ -227,6 +246,25 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||
}
|
||||
o.imageStores = append(o.imageStores, store)
|
||||
}
|
||||
case ".mount_program", "overlay.mount_program", "overlay2.mount_program":
|
||||
logrus.Debugf("overlay: mount_program=%s", val)
|
||||
_, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("overlay: can't stat program %s: %v", val, err)
|
||||
}
|
||||
o.mountProgram = val
|
||||
case "overlay2.ostree_repo", "overlay.ostree_repo", ".ostree_repo":
|
||||
logrus.Debugf("overlay: ostree_repo=%s", val)
|
||||
if !ostree.OstreeSupport() {
|
||||
return nil, fmt.Errorf("overlay: ostree_repo specified but support for ostree is missing")
|
||||
}
|
||||
o.ostreeRepo = val
|
||||
case "overlay2.skip_mount_home", "overlay.skip_mount_home", ".skip_mount_home":
|
||||
logrus.Debugf("overlay: skip_mount_home=%s", val)
|
||||
o.skipMountHome, err = strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("overlay: Unknown option %s", key)
|
||||
}
|
||||
@ -236,6 +274,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||
|
||||
func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
|
||||
// We can try to modprobe overlay first
|
||||
|
||||
exec.Command("modprobe", "overlay").Run()
|
||||
|
||||
layerDir, err := ioutil.TempDir(home, "compat")
|
||||
@ -380,6 +419,11 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
||||
return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers")
|
||||
}
|
||||
}
|
||||
|
||||
if d.options.ostreeRepo != "" {
|
||||
d.convert[id] = true
|
||||
}
|
||||
|
||||
return d.create(id, parent, opts)
|
||||
}
|
||||
|
||||
@ -547,6 +591,12 @@ func (d *Driver) getLowerDirs(id string) ([]string, error) {
|
||||
func (d *Driver) Remove(id string) error {
|
||||
d.locker.Lock(id)
|
||||
defer d.locker.Unlock(id)
|
||||
|
||||
// Ignore errors, we don't want to fail if the ostree branch doesn't exist,
|
||||
if d.options.ostreeRepo != "" {
|
||||
ostree.DeleteOSTree(d.options.ostreeRepo, id)
|
||||
}
|
||||
|
||||
dir := d.dir(id)
|
||||
lid, err := ioutil.ReadFile(path.Join(dir, "link"))
|
||||
if err == nil {
|
||||
@ -663,7 +713,7 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
|
||||
// the page size. The mount syscall fails if the mount data cannot
|
||||
// fit within a page and relative links make the mount data much
|
||||
// smaller at the expense of requiring a fork exec to chroot.
|
||||
if len(mountData) > pageSize {
|
||||
if len(mountData) > pageSize || d.options.mountProgram != "" {
|
||||
//FIXME: We need to figure out to get this to work with additional stores
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), path.Join(id, "diff"), path.Join(id, "work"))
|
||||
mountData = label.FormatMountLabel(opts, mountLabel)
|
||||
@ -671,8 +721,16 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
|
||||
return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
|
||||
}
|
||||
|
||||
mount = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||
return mountFrom(d.home, source, target, mType, flags, label)
|
||||
if d.options.mountProgram != "" {
|
||||
mount = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||
mountProgram := exec.Command(d.options.mountProgram, "-o", label, target)
|
||||
mountProgram.Dir = d.home
|
||||
return mountProgram.Run()
|
||||
}
|
||||
} else {
|
||||
mount = func(source string, target string, mType string, flags uintptr, label string) error {
|
||||
return mountFrom(d.home, source, target, mType, flags, label)
|
||||
}
|
||||
}
|
||||
mountTarget = path.Join(id, "merged")
|
||||
}
|
||||
@ -764,6 +822,13 @@ func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent str
|
||||
return 0, err
|
||||
}
|
||||
|
||||
_, convert := d.convert[id]
|
||||
if convert {
|
||||
if err := ostree.ConvertToOSTree(d.options.ostreeRepo, applyDir, id); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return directory.Size(applyDir)
|
||||
}
|
||||
|
||||
|
54
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
54
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ostree"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
)
|
||||
@ -42,6 +43,27 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
d.homes = append(d.homes, strings.Split(option[12:], ",")...)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(option, "vfs.ostree_repo=") {
|
||||
if !ostree.OstreeSupport() {
|
||||
return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing")
|
||||
}
|
||||
d.ostreeRepo = option[16:]
|
||||
}
|
||||
if strings.HasPrefix(option, ".ostree_repo=") {
|
||||
if !ostree.OstreeSupport() {
|
||||
return nil, fmt.Errorf("vfs: ostree_repo specified but support for ostree is missing")
|
||||
}
|
||||
d.ostreeRepo = option[13:]
|
||||
}
|
||||
}
|
||||
if d.ostreeRepo != "" {
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ostree.CreateOSTreeRepository(d.ostreeRepo, rootUID, rootGID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return graphdriver.NewNaiveDiffDriver(d, graphdriver.NewNaiveLayerIDMapUpdater(d)), nil
|
||||
}
|
||||
@ -53,6 +75,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
type Driver struct {
|
||||
homes []string
|
||||
idMappings *idtools.IDMappings
|
||||
ostreeRepo string
|
||||
}
|
||||
|
||||
func (d *Driver) String() string {
|
||||
@ -77,11 +100,15 @@ func (d *Driver) Cleanup() error {
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
return d.Create(id, parent, opts)
|
||||
return d.create(id, parent, opts, false)
|
||||
}
|
||||
|
||||
// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent.
|
||||
func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
return d.create(id, parent, opts, true)
|
||||
}
|
||||
|
||||
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool) error {
|
||||
if opts != nil && len(opts.StorageOpt) != 0 {
|
||||
return fmt.Errorf("--storage-opt is not supported for vfs")
|
||||
}
|
||||
@ -106,14 +133,23 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
if _, mountLabel, err := label.InitLabels(labelOpts); err == nil {
|
||||
label.SetFileLabel(dir, mountLabel)
|
||||
}
|
||||
if parent == "" {
|
||||
return nil
|
||||
if parent != "" {
|
||||
parentDir, err := d.Get(parent, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", parent, err)
|
||||
}
|
||||
if err := CopyWithTar(parentDir, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
parentDir, err := d.Get(parent, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", parent, err)
|
||||
|
||||
if ro && d.ostreeRepo != "" {
|
||||
if err := ostree.ConvertToOSTree(d.ostreeRepo, dir, id); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return CopyWithTar(parentDir, dir)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
@ -132,6 +168,10 @@ func (d *Driver) dir(id string) string {
|
||||
|
||||
// Remove deletes the content from the directory for a given id.
|
||||
func (d *Driver) Remove(id string) error {
|
||||
if d.ostreeRepo != "" {
|
||||
// Ignore errors, we don't want to fail if the ostree branch doesn't exist,
|
||||
ostree.DeleteOSTree(d.ostreeRepo, id)
|
||||
}
|
||||
return system.EnsureRemoveAll(d.dir(id))
|
||||
}
|
||||
|
||||
|
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@ -940,11 +940,6 @@ func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
func (d *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateLayerIDMap changes ownerships in the layer's filesystem tree from
|
||||
// matching those in toContainer to matching those in toHost.
|
||||
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
|
34
vendor/github.com/containers/storage/images.go
generated
vendored
34
vendor/github.com/containers/storage/images.go
generated
vendored
@ -40,6 +40,11 @@ type Image struct {
|
||||
// same top layer.
|
||||
TopLayer string `json:"layer,omitempty"`
|
||||
|
||||
// MappedTopLayers are the IDs of alternate versions of the top layer
|
||||
// which have the same contents and parent, and which differ from
|
||||
// TopLayer only in which ID mappings they use.
|
||||
MappedTopLayers []string `json:"mapped-layers,omitempty"`
|
||||
|
||||
// Metadata is data we keep for the convenience of the caller. It is not
|
||||
// expected to be large, since it is kept in memory.
|
||||
Metadata string `json:"metadata,omitempty"`
|
||||
@ -126,16 +131,17 @@ type imageStore struct {
|
||||
|
||||
func copyImage(i *Image) *Image {
|
||||
return &Image{
|
||||
ID: i.ID,
|
||||
Digest: i.Digest,
|
||||
Names: copyStringSlice(i.Names),
|
||||
TopLayer: i.TopLayer,
|
||||
Metadata: i.Metadata,
|
||||
BigDataNames: copyStringSlice(i.BigDataNames),
|
||||
BigDataSizes: copyStringInt64Map(i.BigDataSizes),
|
||||
BigDataDigests: copyStringDigestMap(i.BigDataDigests),
|
||||
Created: i.Created,
|
||||
Flags: copyStringInterfaceMap(i.Flags),
|
||||
ID: i.ID,
|
||||
Digest: i.Digest,
|
||||
Names: copyStringSlice(i.Names),
|
||||
TopLayer: i.TopLayer,
|
||||
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
|
||||
Metadata: i.Metadata,
|
||||
BigDataNames: copyStringSlice(i.BigDataNames),
|
||||
BigDataSizes: copyStringInt64Map(i.BigDataSizes),
|
||||
BigDataDigests: copyStringDigestMap(i.BigDataDigests),
|
||||
Created: i.Created,
|
||||
Flags: copyStringInterfaceMap(i.Flags),
|
||||
}
|
||||
}
|
||||
|
||||
@ -362,6 +368,14 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
||||
return image, err
|
||||
}
|
||||
|
||||
func (r *imageStore) addMappedTopLayer(id, layer string) error {
|
||||
if image, ok := r.lookup(id); ok {
|
||||
image.MappedTopLayers = append(image.MappedTopLayers, layer)
|
||||
return r.Save()
|
||||
}
|
||||
return ErrImageUnknown
|
||||
}
|
||||
|
||||
func (r *imageStore) Metadata(id string) (string, error) {
|
||||
if image, ok := r.lookup(id); ok {
|
||||
return image.Metadata, nil
|
||||
|
112
vendor/github.com/containers/storage/images_ffjson.go
generated
vendored
112
vendor/github.com/containers/storage/images_ffjson.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: ./images.go
|
||||
// source: images.go
|
||||
|
||||
package storage
|
||||
|
||||
@ -64,6 +64,22 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
|
||||
fflib.WriteJsonString(buf, string(j.TopLayer))
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if len(j.MappedTopLayers) != 0 {
|
||||
buf.WriteString(`"mapped-layers":`)
|
||||
if j.MappedTopLayers != nil {
|
||||
buf.WriteString(`[`)
|
||||
for i, v := range j.MappedTopLayers {
|
||||
if i != 0 {
|
||||
buf.WriteString(`,`)
|
||||
}
|
||||
fflib.WriteJsonString(buf, string(v))
|
||||
}
|
||||
buf.WriteString(`]`)
|
||||
} else {
|
||||
buf.WriteString(`null`)
|
||||
}
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
if len(j.Metadata) != 0 {
|
||||
buf.WriteString(`"metadata":`)
|
||||
fflib.WriteJsonString(buf, string(j.Metadata))
|
||||
@ -157,6 +173,8 @@ const (
|
||||
|
||||
ffjtImageTopLayer
|
||||
|
||||
ffjtImageMappedTopLayers
|
||||
|
||||
ffjtImageMetadata
|
||||
|
||||
ffjtImageBigDataNames
|
||||
@ -178,6 +196,8 @@ var ffjKeyImageNames = []byte("names")
|
||||
|
||||
var ffjKeyImageTopLayer = []byte("layer")
|
||||
|
||||
var ffjKeyImageMappedTopLayers = []byte("mapped-layers")
|
||||
|
||||
var ffjKeyImageMetadata = []byte("metadata")
|
||||
|
||||
var ffjKeyImageBigDataNames = []byte("big-data-names")
|
||||
@ -311,7 +331,12 @@ mainparse:
|
||||
|
||||
case 'm':
|
||||
|
||||
if bytes.Equal(ffjKeyImageMetadata, kn) {
|
||||
if bytes.Equal(ffjKeyImageMappedTopLayers, kn) {
|
||||
currentKey = ffjtImageMappedTopLayers
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
|
||||
} else if bytes.Equal(ffjKeyImageMetadata, kn) {
|
||||
currentKey = ffjtImageMetadata
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
@ -363,6 +388,12 @@ mainparse:
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.EqualFoldRight(ffjKeyImageMappedTopLayers, kn) {
|
||||
currentKey = ffjtImageMappedTopLayers
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) {
|
||||
currentKey = ffjtImageTopLayer
|
||||
state = fflib.FFParse_want_colon
|
||||
@ -416,6 +447,9 @@ mainparse:
|
||||
case ffjtImageTopLayer:
|
||||
goto handle_TopLayer
|
||||
|
||||
case ffjtImageMappedTopLayers:
|
||||
goto handle_MappedTopLayers
|
||||
|
||||
case ffjtImageMetadata:
|
||||
goto handle_Metadata
|
||||
|
||||
@ -600,6 +634,80 @@ handle_TopLayer:
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_MappedTopLayers:
|
||||
|
||||
/* handler: j.MappedTopLayers type=[]string kind=slice quoted=false*/
|
||||
|
||||
{
|
||||
|
||||
{
|
||||
if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null {
|
||||
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok))
|
||||
}
|
||||
}
|
||||
|
||||
if tok == fflib.FFTok_null {
|
||||
j.MappedTopLayers = nil
|
||||
} else {
|
||||
|
||||
j.MappedTopLayers = []string{}
|
||||
|
||||
wantVal := true
|
||||
|
||||
for {
|
||||
|
||||
var tmpJMappedTopLayers string
|
||||
|
||||
tok = fs.Scan()
|
||||
if tok == fflib.FFTok_error {
|
||||
goto tokerror
|
||||
}
|
||||
if tok == fflib.FFTok_right_brace {
|
||||
break
|
||||
}
|
||||
|
||||
if tok == fflib.FFTok_comma {
|
||||
if wantVal == true {
|
||||
// TODO(pquerna): this isn't an ideal error message, this handles
|
||||
// things like [,,,] as an array value.
|
||||
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
wantVal = true
|
||||
}
|
||||
|
||||
/* handler: tmpJMappedTopLayers type=string kind=string quoted=false*/
|
||||
|
||||
{
|
||||
|
||||
{
|
||||
if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
|
||||
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok))
|
||||
}
|
||||
}
|
||||
|
||||
if tok == fflib.FFTok_null {
|
||||
|
||||
} else {
|
||||
|
||||
outBuf := fs.Output.Bytes()
|
||||
|
||||
tmpJMappedTopLayers = string(string(outBuf))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
j.MappedTopLayers = append(j.MappedTopLayers, tmpJMappedTopLayers)
|
||||
|
||||
wantVal = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_Metadata:
|
||||
|
||||
/* handler: j.Metadata type=string kind=string quoted=false*/
|
||||
|
37
vendor/github.com/containers/storage/layers.go
generated
vendored
37
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -211,7 +211,10 @@ type LayerStore interface {
|
||||
Mount(id, mountLabel string) (string, error)
|
||||
|
||||
// Unmount unmounts a layer when it is no longer in use.
|
||||
Unmount(id string) error
|
||||
Unmount(id string, force bool) (bool, error)
|
||||
|
||||
// Mounted returns whether or not the layer is mounted.
|
||||
Mounted(id string) (bool, error)
|
||||
|
||||
// ParentOwners returns the UIDs and GIDs of parents of the layer's mountpoint
|
||||
// for which the layer's UID and GID maps don't contain corresponding entries.
|
||||
@ -624,6 +627,14 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
|
||||
return r.CreateWithFlags(id, parent, names, mountLabel, options, moreOptions, writeable, nil)
|
||||
}
|
||||
|
||||
func (r *layerStore) Mounted(id string) (bool, error) {
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return false, ErrLayerUnknown
|
||||
}
|
||||
return layer.MountCount > 0, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) Mount(id, mountLabel string) (string, error) {
|
||||
if !r.IsReadWrite() {
|
||||
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
@ -652,21 +663,24 @@ func (r *layerStore) Mount(id, mountLabel string) (string, error) {
|
||||
return mountpoint, err
|
||||
}
|
||||
|
||||
func (r *layerStore) Unmount(id string) error {
|
||||
func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
}
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
layerByMount, ok := r.bymount[filepath.Clean(id)]
|
||||
if !ok {
|
||||
return ErrLayerUnknown
|
||||
return false, ErrLayerUnknown
|
||||
}
|
||||
layer = layerByMount
|
||||
}
|
||||
if force {
|
||||
layer.MountCount = 1
|
||||
}
|
||||
if layer.MountCount > 1 {
|
||||
layer.MountCount--
|
||||
return r.Save()
|
||||
return true, r.Save()
|
||||
}
|
||||
err := r.driver.Put(id)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
@ -675,9 +689,9 @@ func (r *layerStore) Unmount(id string) error {
|
||||
}
|
||||
layer.MountCount--
|
||||
layer.MountPoint = ""
|
||||
err = r.Save()
|
||||
return false, r.Save()
|
||||
}
|
||||
return err
|
||||
return true, err
|
||||
}
|
||||
|
||||
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
|
||||
@ -797,10 +811,8 @@ func (r *layerStore) Delete(id string) error {
|
||||
return ErrLayerUnknown
|
||||
}
|
||||
id = layer.ID
|
||||
for layer.MountCount > 0 {
|
||||
if err := r.Unmount(id); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := r.Unmount(id, true); err != nil {
|
||||
return err
|
||||
}
|
||||
err := r.driver.Remove(id)
|
||||
if err == nil {
|
||||
@ -917,7 +929,8 @@ func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) {
|
||||
}
|
||||
|
||||
func (s *simpleGetCloser) Close() error {
|
||||
return s.r.Unmount(s.id)
|
||||
_, err := s.r.Unmount(s.id, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
|
||||
|
16
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
16
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -56,6 +56,11 @@ type (
|
||||
// replaced with the matching name from this map.
|
||||
RebaseNames map[string]string
|
||||
InUserNS bool
|
||||
// CopyPass indicates that the contents of any archive we're creating
|
||||
// will instantly be extracted and written to disk, so we can deviate
|
||||
// from the traditional behavior/format to get features like subsecond
|
||||
// precision in timestamps.
|
||||
CopyPass bool
|
||||
}
|
||||
)
|
||||
|
||||
@ -396,6 +401,11 @@ type tarAppender struct {
|
||||
// by the AUFS standard are used as the tar whiteout
|
||||
// standard.
|
||||
WhiteoutConverter tarWhiteoutConverter
|
||||
// CopyPass indicates that the contents of any archive we're creating
|
||||
// will instantly be extracted and written to disk, so we can deviate
|
||||
// from the traditional behavior/format to get features like subsecond
|
||||
// precision in timestamps.
|
||||
CopyPass bool
|
||||
}
|
||||
|
||||
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender {
|
||||
@ -446,6 +456,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if ta.CopyPass {
|
||||
copyPassHeader(hdr)
|
||||
}
|
||||
|
||||
// if it's not a directory and has more than 1 link,
|
||||
// it's hard linked, so set the type flag accordingly
|
||||
@ -710,6 +723,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
options.ChownOpts,
|
||||
)
|
||||
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
|
||||
ta.CopyPass = options.CopyPass
|
||||
|
||||
defer func() {
|
||||
// Make sure to check the error on Close.
|
||||
@ -1039,6 +1053,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
UIDMaps: tarMappings.UIDs(),
|
||||
GIDMaps: tarMappings.GIDs(),
|
||||
Compression: Uncompressed,
|
||||
CopyPass: true,
|
||||
}
|
||||
archive, err := TarWithOptions(src, options)
|
||||
if err != nil {
|
||||
@ -1145,6 +1160,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
}
|
||||
hdr.Name = filepath.Base(dst)
|
||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
||||
copyPassHeader(hdr)
|
||||
|
||||
if err := remapIDs(archiver.TarIDMappings, nil, archiver.ChownOpts, hdr); err != nil {
|
||||
return err
|
||||
|
11
vendor/github.com/containers/storage/pkg/archive/archive_110.go
generated
vendored
Normal file
11
vendor/github.com/containers/storage/pkg/archive/archive_110.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
// +build go1.10
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
)
|
||||
|
||||
func copyPassHeader(hdr *tar.Header) {
|
||||
hdr.Format = tar.FormatPAX
|
||||
}
|
10
vendor/github.com/containers/storage/pkg/archive/archive_19.go
generated
vendored
Normal file
10
vendor/github.com/containers/storage/pkg/archive/archive_19.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
// +build !go1.10
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
)
|
||||
|
||||
func copyPassHeader(hdr *tar.Header) {
|
||||
}
|
118
vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
generated
vendored
118
vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: ./pkg/archive/archive.go
|
||||
// source: pkg/archive/archive.go
|
||||
|
||||
package archive
|
||||
|
||||
@ -491,6 +491,11 @@ func (j *TarOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
|
||||
} else {
|
||||
buf.WriteString(`,"InUserNS":false`)
|
||||
}
|
||||
if j.CopyPass {
|
||||
buf.WriteString(`,"CopyPass":true`)
|
||||
} else {
|
||||
buf.WriteString(`,"CopyPass":false`)
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
return nil
|
||||
}
|
||||
@ -524,6 +529,8 @@ const (
|
||||
ffjtTarOptionsRebaseNames
|
||||
|
||||
ffjtTarOptionsInUserNS
|
||||
|
||||
ffjtTarOptionsCopyPass
|
||||
)
|
||||
|
||||
var ffjKeyTarOptionsIncludeFiles = []byte("IncludeFiles")
|
||||
@ -552,6 +559,8 @@ var ffjKeyTarOptionsRebaseNames = []byte("RebaseNames")
|
||||
|
||||
var ffjKeyTarOptionsInUserNS = []byte("InUserNS")
|
||||
|
||||
var ffjKeyTarOptionsCopyPass = []byte("CopyPass")
|
||||
|
||||
// UnmarshalJSON umarshall json - template of ffjson
|
||||
func (j *TarOptions) UnmarshalJSON(input []byte) error {
|
||||
fs := fflib.NewFFLexer(input)
|
||||
@ -624,6 +633,11 @@ mainparse:
|
||||
currentKey = ffjtTarOptionsChownOpts
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
|
||||
} else if bytes.Equal(ffjKeyTarOptionsCopyPass, kn) {
|
||||
currentKey = ffjtTarOptionsCopyPass
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
case 'E':
|
||||
@ -704,6 +718,12 @@ mainparse:
|
||||
|
||||
}
|
||||
|
||||
if fflib.EqualFoldRight(ffjKeyTarOptionsCopyPass, kn) {
|
||||
currentKey = ffjtTarOptionsCopyPass
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.EqualFoldRight(ffjKeyTarOptionsInUserNS, kn) {
|
||||
currentKey = ffjtTarOptionsInUserNS
|
||||
state = fflib.FFParse_want_colon
|
||||
@ -838,6 +858,9 @@ mainparse:
|
||||
case ffjtTarOptionsInUserNS:
|
||||
goto handle_InUserNS
|
||||
|
||||
case ffjtTarOptionsCopyPass:
|
||||
goto handle_CopyPass
|
||||
|
||||
case ffjtTarOptionsnosuchkey:
|
||||
err = fs.SkipField(tok)
|
||||
if err != nil {
|
||||
@ -1481,6 +1504,41 @@ handle_InUserNS:
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_CopyPass:
|
||||
|
||||
/* handler: j.CopyPass type=bool kind=bool quoted=false*/
|
||||
|
||||
{
|
||||
if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
|
||||
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
if tok == fflib.FFTok_null {
|
||||
|
||||
} else {
|
||||
tmpb := fs.Output.Bytes()
|
||||
|
||||
if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
|
||||
|
||||
j.CopyPass = true
|
||||
|
||||
} else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
|
||||
|
||||
j.CopyPass = false
|
||||
|
||||
} else {
|
||||
err = errors.New("unexpected bytes for true/false value")
|
||||
return fs.WrapErr(err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
wantedvalue:
|
||||
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||
wrongtokenerror:
|
||||
@ -1773,6 +1831,11 @@ func (j *tarAppender) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if j.CopyPass {
|
||||
buf.WriteString(`,"CopyPass":true`)
|
||||
} else {
|
||||
buf.WriteString(`,"CopyPass":false`)
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
return nil
|
||||
}
|
||||
@ -1792,6 +1855,8 @@ const (
|
||||
ffjttarAppenderChownOpts
|
||||
|
||||
ffjttarAppenderWhiteoutConverter
|
||||
|
||||
ffjttarAppenderCopyPass
|
||||
)
|
||||
|
||||
var ffjKeytarAppenderTarWriter = []byte("TarWriter")
|
||||
@ -1806,6 +1871,8 @@ var ffjKeytarAppenderChownOpts = []byte("ChownOpts")
|
||||
|
||||
var ffjKeytarAppenderWhiteoutConverter = []byte("WhiteoutConverter")
|
||||
|
||||
var ffjKeytarAppenderCopyPass = []byte("CopyPass")
|
||||
|
||||
// UnmarshalJSON umarshall json - template of ffjson
|
||||
func (j *tarAppender) UnmarshalJSON(input []byte) error {
|
||||
fs := fflib.NewFFLexer(input)
|
||||
@ -1881,6 +1948,11 @@ mainparse:
|
||||
currentKey = ffjttarAppenderChownOpts
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
|
||||
} else if bytes.Equal(ffjKeytarAppenderCopyPass, kn) {
|
||||
currentKey = ffjttarAppenderCopyPass
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
case 'I':
|
||||
@ -1917,6 +1989,12 @@ mainparse:
|
||||
|
||||
}
|
||||
|
||||
if fflib.EqualFoldRight(ffjKeytarAppenderCopyPass, kn) {
|
||||
currentKey = ffjttarAppenderCopyPass
|
||||
state = fflib.FFParse_want_colon
|
||||
goto mainparse
|
||||
}
|
||||
|
||||
if fflib.SimpleLetterEqualFold(ffjKeytarAppenderWhiteoutConverter, kn) {
|
||||
currentKey = ffjttarAppenderWhiteoutConverter
|
||||
state = fflib.FFParse_want_colon
|
||||
@ -1988,6 +2066,9 @@ mainparse:
|
||||
case ffjttarAppenderWhiteoutConverter:
|
||||
goto handle_WhiteoutConverter
|
||||
|
||||
case ffjttarAppenderCopyPass:
|
||||
goto handle_CopyPass
|
||||
|
||||
case ffjttarAppendernosuchkey:
|
||||
err = fs.SkipField(tok)
|
||||
if err != nil {
|
||||
@ -2211,6 +2292,41 @@ handle_WhiteoutConverter:
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
handle_CopyPass:
|
||||
|
||||
/* handler: j.CopyPass type=bool kind=bool quoted=false*/
|
||||
|
||||
{
|
||||
if tok != fflib.FFTok_bool && tok != fflib.FFTok_null {
|
||||
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for bool", tok))
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
if tok == fflib.FFTok_null {
|
||||
|
||||
} else {
|
||||
tmpb := fs.Output.Bytes()
|
||||
|
||||
if bytes.Compare([]byte{'t', 'r', 'u', 'e'}, tmpb) == 0 {
|
||||
|
||||
j.CopyPass = true
|
||||
|
||||
} else if bytes.Compare([]byte{'f', 'a', 'l', 's', 'e'}, tmpb) == 0 {
|
||||
|
||||
j.CopyPass = false
|
||||
|
||||
} else {
|
||||
err = errors.New("unexpected bytes for true/false value")
|
||||
return fs.WrapErr(err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
state = fflib.FFParse_after_value
|
||||
goto mainparse
|
||||
|
||||
wantedvalue:
|
||||
return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok))
|
||||
wrongtokenerror:
|
||||
|
2
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/directory/directory_unix.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux freebsd solaris
|
||||
// +build linux darwin freebsd solaris
|
||||
|
||||
package directory
|
||||
|
||||
|
19
vendor/github.com/containers/storage/pkg/ostree/no_ostree.go
generated
vendored
Normal file
19
vendor/github.com/containers/storage/pkg/ostree/no_ostree.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
// +build !ostree
|
||||
|
||||
package ostree
|
||||
|
||||
func OstreeSupport() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func DeleteOSTree(repoLocation, id string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertToOSTree(repoLocation, root, id string) error {
|
||||
return nil
|
||||
}
|
198
vendor/github.com/containers/storage/pkg/ostree/ostree.go
generated
vendored
Normal file
198
vendor/github.com/containers/storage/pkg/ostree/ostree.go
generated
vendored
Normal file
@ -0,0 +1,198 @@
|
||||
// +build ostree
|
||||
|
||||
package ostree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"golang.org/x/sys/unix"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
|
||||
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1
|
||||
// #include <glib.h>
|
||||
// #include <glib-object.h>
|
||||
// #include <gio/gio.h>
|
||||
// #include <stdlib.h>
|
||||
// #include <ostree.h>
|
||||
// #include <gio/ginputstream.h>
|
||||
import "C"
|
||||
|
||||
func OstreeSupport() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func fixFiles(dir string, usermode bool) (bool, []string, error) {
|
||||
var SkipOstree = errors.New("skip ostree deduplication")
|
||||
|
||||
var whiteouts []string
|
||||
|
||||
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
if !usermode {
|
||||
stat, ok := info.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("not syscall.Stat_t")
|
||||
}
|
||||
|
||||
if stat.Rdev == 0 && (stat.Mode&unix.S_IFCHR) != 0 {
|
||||
whiteouts = append(whiteouts, path)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Skip the ostree deduplication if we encounter a file type that
|
||||
// ostree does not manage.
|
||||
return SkipOstree
|
||||
}
|
||||
if info.IsDir() {
|
||||
if usermode {
|
||||
if err := os.Chmod(path, info.Mode()|0700); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if usermode && (info.Mode().IsRegular()) {
|
||||
if err := os.Chmod(path, info.Mode()|0600); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err == SkipOstree {
|
||||
return true, nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
return false, whiteouts, nil
|
||||
}
|
||||
|
||||
// Create prepares the filesystem for the OSTREE driver and copies the directory for the given id under the parent.
|
||||
func ConvertToOSTree(repoLocation, root, id string) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
repo, err := otbuiltin.OpenRepo(repoLocation)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not open the OSTree repository")
|
||||
}
|
||||
|
||||
skip, whiteouts, err := fixFiles(root, os.Getuid() != 0)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not prepare the OSTree directory")
|
||||
}
|
||||
if skip {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := repo.PrepareTransaction(); err != nil {
|
||||
return errors.Wrap(err, "could not prepare the OSTree transaction")
|
||||
}
|
||||
|
||||
if skip {
|
||||
return nil
|
||||
}
|
||||
|
||||
commitOpts := otbuiltin.NewCommitOptions()
|
||||
commitOpts.Timestamp = time.Now()
|
||||
commitOpts.LinkCheckoutSpeedup = true
|
||||
commitOpts.Parent = "0000000000000000000000000000000000000000000000000000000000000000"
|
||||
branch := fmt.Sprintf("containers-storage/%s", id)
|
||||
|
||||
for _, w := range whiteouts {
|
||||
if err := os.Remove(w); err != nil {
|
||||
return errors.Wrap(err, "could not delete whiteout file")
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := repo.Commit(root, branch, commitOpts); err != nil {
|
||||
return errors.Wrap(err, "could not commit the layer")
|
||||
}
|
||||
|
||||
if _, err := repo.CommitTransaction(); err != nil {
|
||||
return errors.Wrap(err, "could not complete the OSTree transaction")
|
||||
}
|
||||
|
||||
if err := system.EnsureRemoveAll(root); err != nil {
|
||||
return errors.Wrap(err, "could not delete layer")
|
||||
}
|
||||
|
||||
checkoutOpts := otbuiltin.NewCheckoutOptions()
|
||||
checkoutOpts.RequireHardlinks = true
|
||||
checkoutOpts.Whiteouts = false
|
||||
if err := otbuiltin.Checkout(repoLocation, root, branch, checkoutOpts); err != nil {
|
||||
return errors.Wrap(err, "could not checkout from OSTree")
|
||||
}
|
||||
|
||||
for _, w := range whiteouts {
|
||||
if err := unix.Mknod(w, unix.S_IFCHR, 0); err != nil {
|
||||
return errors.Wrap(err, "could not recreate whiteout file")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateOSTreeRepository(repoLocation string, rootUID int, rootGID int) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
_, err := os.Stat(repoLocation)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else if err != nil {
|
||||
if err := idtools.MkdirAllAs(repoLocation, 0700, rootUID, rootGID); err != nil {
|
||||
return errors.Wrap(err, "could not create OSTree repository directory: %v")
|
||||
}
|
||||
|
||||
if _, err := otbuiltin.Init(repoLocation, otbuiltin.NewInitOptions()); err != nil {
|
||||
return errors.Wrap(err, "could not create OSTree repository")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func openRepo(path string) (*C.struct_OstreeRepo, error) {
|
||||
var cerr *C.GError
|
||||
cpath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cpath))
|
||||
pathc := C.g_file_new_for_path(cpath)
|
||||
defer C.g_object_unref(C.gpointer(pathc))
|
||||
repo := C.ostree_repo_new(pathc)
|
||||
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
|
||||
if !r {
|
||||
C.g_object_unref(C.gpointer(repo))
|
||||
return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
|
||||
}
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func DeleteOSTree(repoLocation, id string) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
repo, err := openRepo(repoLocation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer C.g_object_unref(C.gpointer(repo))
|
||||
|
||||
branch := fmt.Sprintf("containers-storage/%s", id)
|
||||
|
||||
cbranch := C.CString(branch)
|
||||
defer C.free(unsafe.Pointer(cbranch))
|
||||
|
||||
var cerr *C.GError
|
||||
r := glib.GoBool(glib.GBoolean(C.ostree_repo_set_ref_immediate(repo, nil, cbranch, nil, nil, &cerr)))
|
||||
if !r {
|
||||
return glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
|
||||
}
|
||||
return nil
|
||||
}
|
1
vendor/github.com/containers/storage/pkg/stringutils/README.md
generated
vendored
Normal file
1
vendor/github.com/containers/storage/pkg/stringutils/README.md
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
This package provides helper functions for dealing with strings
|
99
vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
generated
vendored
Normal file
99
vendor/github.com/containers/storage/pkg/stringutils/stringutils.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
// Package stringutils provides helper functions for dealing with strings.
|
||||
package stringutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n.
|
||||
func GenerateRandomAlphaOnlyString(n int) string {
|
||||
// make a really long string
|
||||
letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letters[rand.Intn(len(letters))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// GenerateRandomASCIIString generates an ASCII random string with length n.
|
||||
func GenerateRandomASCIIString(n int) string {
|
||||
chars := "abcdefghijklmnopqrstuvwxyz" +
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
|
||||
"~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` "
|
||||
res := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
res[i] = chars[rand.Intn(len(chars))]
|
||||
}
|
||||
return string(res)
|
||||
}
|
||||
|
||||
// Ellipsis truncates a string to fit within maxlen, and appends ellipsis (...).
|
||||
// For maxlen of 3 and lower, no ellipsis is appended.
|
||||
func Ellipsis(s string, maxlen int) string {
|
||||
r := []rune(s)
|
||||
if len(r) <= maxlen {
|
||||
return s
|
||||
}
|
||||
if maxlen <= 3 {
|
||||
return string(r[:maxlen])
|
||||
}
|
||||
return string(r[:maxlen-3]) + "..."
|
||||
}
|
||||
|
||||
// Truncate truncates a string to maxlen.
|
||||
func Truncate(s string, maxlen int) string {
|
||||
r := []rune(s)
|
||||
if len(r) <= maxlen {
|
||||
return s
|
||||
}
|
||||
return string(r[:maxlen])
|
||||
}
|
||||
|
||||
// InSlice tests whether a string is contained in a slice of strings or not.
|
||||
// Comparison is case insensitive
|
||||
func InSlice(slice []string, s string) bool {
|
||||
for _, ss := range slice {
|
||||
if strings.ToLower(s) == strings.ToLower(ss) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func quote(word string, buf *bytes.Buffer) {
|
||||
// Bail out early for "simple" strings
|
||||
if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
|
||||
buf.WriteString(word)
|
||||
return
|
||||
}
|
||||
|
||||
buf.WriteString("'")
|
||||
|
||||
for i := 0; i < len(word); i++ {
|
||||
b := word[i]
|
||||
if b == '\'' {
|
||||
// Replace literal ' with a close ', a \', and an open '
|
||||
buf.WriteString("'\\''")
|
||||
} else {
|
||||
buf.WriteByte(b)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("'")
|
||||
}
|
||||
|
||||
// ShellQuoteArguments takes a list of strings and escapes them so they will be
|
||||
// handled right when passed as arguments to a program via a shell
|
||||
func ShellQuoteArguments(args []string) string {
|
||||
var buf bytes.Buffer
|
||||
for i, arg := range args {
|
||||
if i != 0 {
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
quote(arg, &buf)
|
||||
}
|
||||
return buf.String()
|
||||
}
|
14
vendor/github.com/containers/storage/pkg/system/stat_windows.go
generated
vendored
14
vendor/github.com/containers/storage/pkg/system/stat_windows.go
generated
vendored
@ -28,6 +28,20 @@ func (s StatT) Mtim() time.Time {
|
||||
return time.Time(s.mtim)
|
||||
}
|
||||
|
||||
// UID returns file's user id of owner.
|
||||
//
|
||||
// on windows this is always 0 because there is no concept of UID
|
||||
func (s StatT) UID() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// GID returns file's group id of owner.
|
||||
//
|
||||
// on windows this is always 0 because there is no concept of GID
|
||||
func (s StatT) GID() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Stat takes a path to a file and returns
|
||||
// a system.StatT type pertaining to that file.
|
||||
//
|
||||
|
488
vendor/github.com/containers/storage/store.go
generated
vendored
488
vendor/github.com/containers/storage/store.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -18,9 +19,11 @@ import (
|
||||
"github.com/BurntSushi/toml"
|
||||
drivers "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/stringutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@ -259,8 +262,11 @@ type Store interface {
|
||||
Mount(id, mountLabel string) (string, error)
|
||||
|
||||
// Unmount attempts to unmount a layer, image, or container, given an ID, a
|
||||
// name, or a mount path.
|
||||
Unmount(id string) error
|
||||
// name, or a mount path. Returns whether or not the layer is still mounted.
|
||||
Unmount(id string, force bool) (bool, error)
|
||||
|
||||
// Unmount attempts to discover whether the specified id is mounted.
|
||||
Mounted(id string) (bool, error)
|
||||
|
||||
// Changes returns a summary of the changes which would need to be made
|
||||
// to one layer to make its contents the same as a second layer. If
|
||||
@ -346,6 +352,9 @@ type Store interface {
|
||||
// with an image.
|
||||
SetImageBigData(id, key string, data []byte) error
|
||||
|
||||
// ImageSize computes the size of the image's layers and ancillary data.
|
||||
ImageSize(id string) (int64, error)
|
||||
|
||||
// ListContainerBigData retrieves a list of the (possibly large) chunks of
|
||||
// named data associated with a container.
|
||||
ListContainerBigData(id string) ([]string, error)
|
||||
@ -366,6 +375,10 @@ type Store interface {
|
||||
// associated with a container.
|
||||
SetContainerBigData(id, key string, data []byte) error
|
||||
|
||||
// ContainerSize computes the size of the container's layer and ancillary
|
||||
// data. Warning: this is a potentially expensive operation.
|
||||
ContainerSize(id string) (int64, error)
|
||||
|
||||
// Layer returns a specific layer.
|
||||
Layer(id string) (*Layer, error)
|
||||
|
||||
@ -949,6 +962,106 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
|
||||
return ristore.Create(id, names, layer, metadata, creationDate, options.Digest)
|
||||
}
|
||||
|
||||
func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, readWrite bool, rlstore LayerStore, lstores []ROLayerStore, options IDMappingOptions) (*Layer, error) {
|
||||
layerMatchesMappingOptions := func(layer *Layer, options IDMappingOptions) bool {
|
||||
// If we want host mapping, and the layer uses mappings, it's not the best match.
|
||||
if options.HostUIDMapping && len(layer.UIDMap) != 0 {
|
||||
return false
|
||||
}
|
||||
if options.HostGIDMapping && len(layer.GIDMap) != 0 {
|
||||
return false
|
||||
}
|
||||
// If we don't care about the mapping, it's fine.
|
||||
if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 {
|
||||
return true
|
||||
}
|
||||
// Compare the maps.
|
||||
return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap)
|
||||
}
|
||||
var layer, parentLayer *Layer
|
||||
var layerHomeStore ROLayerStore
|
||||
// Locate the image's top layer and its parent, if it has one.
|
||||
for _, store := range append([]ROLayerStore{rlstore}, lstores...) {
|
||||
if store != rlstore {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if modified, err := store.Modified(); modified || err != nil {
|
||||
store.Load()
|
||||
}
|
||||
}
|
||||
// Walk the top layer list.
|
||||
for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
|
||||
if cLayer, err := store.Get(candidate); err == nil {
|
||||
// We want the layer's parent, too, if it has one.
|
||||
var cParentLayer *Layer
|
||||
if cLayer.Parent != "" {
|
||||
// Its parent should be around here, somewhere.
|
||||
if cParentLayer, err = store.Get(cLayer.Parent); err != nil {
|
||||
// Nope, couldn't find it. We're not going to be able
|
||||
// to diff this one properly.
|
||||
continue
|
||||
}
|
||||
}
|
||||
// If the layer matches the desired mappings, it's a perfect match,
|
||||
// so we're actually done here.
|
||||
if layerMatchesMappingOptions(cLayer, options) {
|
||||
return cLayer, nil
|
||||
}
|
||||
// Record the first one that we found, even if it's not ideal, so that
|
||||
// we have a starting point.
|
||||
if layer == nil {
|
||||
layer = cLayer
|
||||
parentLayer = cParentLayer
|
||||
layerHomeStore = store
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return nil, ErrLayerUnknown
|
||||
}
|
||||
// The top layer's mappings don't match the ones we want, but it's in a read-only
|
||||
// image store, so we can't create and add a mapped copy of the layer to the image.
|
||||
if !readWrite {
|
||||
return layer, nil
|
||||
}
|
||||
// The top layer's mappings don't match the ones we want, and it's in an image store
|
||||
// that lets us edit image metadata...
|
||||
if istore, ok := ristore.(*imageStore); ok {
|
||||
// ... so extract the layer's contents, create a new copy of it with the
|
||||
// desired mappings, and register it as an alternate top layer in the image.
|
||||
noCompression := archive.Uncompressed
|
||||
diffOptions := DiffOptions{
|
||||
Compression: &noCompression,
|
||||
}
|
||||
rc, err := layerHomeStore.Diff("", layer.ID, &diffOptions)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading layer %q to create an ID-mapped version of it")
|
||||
}
|
||||
defer rc.Close()
|
||||
layerOptions := LayerOptions{
|
||||
IDMappingOptions: IDMappingOptions{
|
||||
HostUIDMapping: options.HostUIDMapping,
|
||||
HostGIDMapping: options.HostGIDMapping,
|
||||
UIDMap: copyIDMap(options.UIDMap),
|
||||
GIDMap: copyIDMap(options.GIDMap),
|
||||
},
|
||||
}
|
||||
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, rc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating ID-mapped copy of layer %q")
|
||||
}
|
||||
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
|
||||
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
|
||||
err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2))
|
||||
}
|
||||
return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID)
|
||||
}
|
||||
layer = mappedLayer
|
||||
}
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
|
||||
if options == nil {
|
||||
options = &ContainerOptions{}
|
||||
@ -977,6 +1090,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
||||
uidMap := options.UIDMap
|
||||
gidMap := options.GIDMap
|
||||
if image != "" {
|
||||
var imageHomeStore ROImageStore
|
||||
istore, err := s.ImageStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -994,6 +1108,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
||||
}
|
||||
cimage, err = store.Get(image)
|
||||
if err == nil {
|
||||
imageHomeStore = store
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -1006,22 +1121,9 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ilayer *Layer
|
||||
for _, store := range append([]ROLayerStore{rlstore}, lstores...) {
|
||||
if store != rlstore {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if modified, err := store.Modified(); modified || err != nil {
|
||||
store.Load()
|
||||
}
|
||||
}
|
||||
ilayer, err = store.Get(cimage.TopLayer)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if ilayer == nil {
|
||||
return nil, ErrLayerUnknown
|
||||
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, imageHomeStore == istore, rlstore, lstores, options.IDMappingOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imageTopLayer = ilayer
|
||||
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
|
||||
@ -1279,6 +1381,200 @@ func (s *store) SetImageBigData(id, key string, data []byte) error {
|
||||
return ristore.SetBigData(id, key, data)
|
||||
}
|
||||
|
||||
func (s *store) ImageSize(id string) (int64, error) {
|
||||
var image *Image
|
||||
|
||||
lstore, err := s.LayerStore()
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error loading primary layer store data")
|
||||
}
|
||||
lstores, err := s.ROLayerStores()
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error loading additional layer stores")
|
||||
}
|
||||
for _, store := range append([]ROLayerStore{lstore}, lstores...) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if modified, err := store.Modified(); modified || err != nil {
|
||||
store.Load()
|
||||
}
|
||||
}
|
||||
|
||||
var imageStore ROBigDataStore
|
||||
istore, err := s.ImageStore()
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error loading primary image store data")
|
||||
}
|
||||
istores, err := s.ROImageStores()
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error loading additional image stores")
|
||||
}
|
||||
|
||||
// Look for the image's record.
|
||||
for _, store := range append([]ROImageStore{istore}, istores...) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if modified, err := store.Modified(); modified || err != nil {
|
||||
store.Load()
|
||||
}
|
||||
if image, err = store.Get(id); err == nil {
|
||||
imageStore = store
|
||||
break
|
||||
}
|
||||
}
|
||||
if image == nil {
|
||||
return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
|
||||
}
|
||||
|
||||
// Start with a list of the image's top layers.
|
||||
queue := make(map[string]struct{})
|
||||
for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
|
||||
queue[layerID] = struct{}{}
|
||||
}
|
||||
visited := make(map[string]struct{})
|
||||
// Walk all of the layers.
|
||||
var size int64
|
||||
for len(visited) < len(queue) {
|
||||
for layerID := range queue {
|
||||
// Visit each layer only once.
|
||||
if _, ok := visited[layerID]; ok {
|
||||
continue
|
||||
}
|
||||
visited[layerID] = struct{}{}
|
||||
// Look for the layer and the store that knows about it.
|
||||
var layerStore ROLayerStore
|
||||
var layer *Layer
|
||||
for _, store := range append([]ROLayerStore{lstore}, lstores...) {
|
||||
if layer, err = store.Get(layerID); err == nil {
|
||||
layerStore = store
|
||||
break
|
||||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID)
|
||||
}
|
||||
// The UncompressedSize is only valid if there's a digest to go with it.
|
||||
n := layer.UncompressedSize
|
||||
if layer.UncompressedDigest == "" {
|
||||
// Compute the size.
|
||||
n, err = layerStore.DiffSize("", layer.ID)
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID)
|
||||
}
|
||||
}
|
||||
// Count this layer.
|
||||
size += n
|
||||
// Make a note to visit the layer's parent if we haven't already.
|
||||
if layer.Parent != "" {
|
||||
queue[layer.Parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Count big data items.
|
||||
names, err := imageStore.BigDataNames(id)
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id)
|
||||
}
|
||||
for _, name := range names {
|
||||
n, err := imageStore.BigDataSize(id, name)
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id)
|
||||
}
|
||||
size += n
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (s *store) ContainerSize(id string) (int64, error) {
|
||||
lstore, err := s.LayerStore()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
lstores, err := s.ROLayerStores()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
for _, store := range append([]ROLayerStore{lstore}, lstores...) {
|
||||
store.Lock()
|
||||
defer store.Unlock()
|
||||
if modified, err := store.Modified(); modified || err != nil {
|
||||
store.Load()
|
||||
}
|
||||
}
|
||||
|
||||
// Get the location of the container directory and container run directory.
|
||||
// Do it before we lock the container store because they do, too.
|
||||
cdir, err := s.ContainerDirectory(id)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
rdir, err := s.ContainerRunDirectory(id)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
rcstore, err := s.ContainerStore()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
rcstore.Lock()
|
||||
defer rcstore.Unlock()
|
||||
if modified, err := rcstore.Modified(); modified || err != nil {
|
||||
rcstore.Load()
|
||||
}
|
||||
|
||||
// Read the container record.
|
||||
container, err := rcstore.Get(id)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
// Read the container's layer's size.
|
||||
var layer *Layer
|
||||
var size int64
|
||||
for _, store := range append([]ROLayerStore{lstore}, lstores...) {
|
||||
if layer, err = store.Get(container.LayerID); err == nil {
|
||||
size, err = store.DiffSize("", layer.ID)
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID)
|
||||
}
|
||||
|
||||
// Count big data items.
|
||||
names, err := rcstore.BigDataNames(id)
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID)
|
||||
}
|
||||
for _, name := range names {
|
||||
n, err := rcstore.BigDataSize(id, name)
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id)
|
||||
}
|
||||
size += n
|
||||
}
|
||||
|
||||
// Count the size of our container directory and container run directory.
|
||||
n, err := directory.Size(cdir)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
size += n
|
||||
n, err = directory.Size(rdir)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
size += n
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (s *store) ListContainerBigData(id string) ([]string, error) {
|
||||
rcstore, err := s.ContainerStore()
|
||||
if err != nil {
|
||||
@ -1614,7 +1910,7 @@ func (s *store) DeleteLayer(id string) error {
|
||||
return err
|
||||
}
|
||||
for _, image := range images {
|
||||
if image.TopLayer == id {
|
||||
if image.TopLayer == id || stringutils.InSlice(image.MappedTopLayers, id) {
|
||||
return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID)
|
||||
}
|
||||
}
|
||||
@ -1697,10 +1993,13 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
|
||||
childrenByParent[parent] = &([]string{layer.ID})
|
||||
}
|
||||
}
|
||||
anyImageByTopLayer := make(map[string]string)
|
||||
otherImagesByTopLayer := make(map[string]string)
|
||||
for _, img := range images {
|
||||
if img.ID != id {
|
||||
anyImageByTopLayer[img.TopLayer] = img.ID
|
||||
otherImagesByTopLayer[img.TopLayer] = img.ID
|
||||
for _, layerID := range img.MappedTopLayers {
|
||||
otherImagesByTopLayer[layerID] = img.ID
|
||||
}
|
||||
}
|
||||
}
|
||||
if commit {
|
||||
@ -1714,27 +2013,38 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
|
||||
if rcstore.Exists(layer) {
|
||||
break
|
||||
}
|
||||
if _, ok := anyImageByTopLayer[layer]; ok {
|
||||
if _, ok := otherImagesByTopLayer[layer]; ok {
|
||||
break
|
||||
}
|
||||
parent := ""
|
||||
if l, err := rlstore.Get(layer); err == nil {
|
||||
parent = l.Parent
|
||||
}
|
||||
otherRefs := 0
|
||||
if childList, ok := childrenByParent[layer]; ok && childList != nil {
|
||||
children := *childList
|
||||
for _, child := range children {
|
||||
if child != lastRemoved {
|
||||
otherRefs++
|
||||
hasOtherRefs := func() bool {
|
||||
layersToCheck := []string{layer}
|
||||
if layer == image.TopLayer {
|
||||
layersToCheck = append(layersToCheck, image.MappedTopLayers...)
|
||||
}
|
||||
for _, layer := range layersToCheck {
|
||||
if childList, ok := childrenByParent[layer]; ok && childList != nil {
|
||||
children := *childList
|
||||
for _, child := range children {
|
||||
if child != lastRemoved {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
if otherRefs != 0 {
|
||||
if hasOtherRefs() {
|
||||
break
|
||||
}
|
||||
lastRemoved = layer
|
||||
layersToRemove = append(layersToRemove, lastRemoved)
|
||||
if layer == image.TopLayer {
|
||||
layersToRemove = append(layersToRemove, image.MappedTopLayers...)
|
||||
}
|
||||
layer = parent
|
||||
}
|
||||
} else {
|
||||
@ -1938,13 +2248,30 @@ func (s *store) Mount(id, mountLabel string) (string, error) {
|
||||
return "", ErrLayerUnknown
|
||||
}
|
||||
|
||||
func (s *store) Unmount(id string) error {
|
||||
func (s *store) Mounted(id string) (bool, error) {
|
||||
if layerID, err := s.ContainerLayerID(id); err == nil {
|
||||
id = layerID
|
||||
}
|
||||
rlstore, err := s.LayerStore()
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
rlstore.Lock()
|
||||
defer rlstore.Unlock()
|
||||
if modified, err := rlstore.Modified(); modified || err != nil {
|
||||
rlstore.Load()
|
||||
}
|
||||
|
||||
return rlstore.Mounted(id)
|
||||
}
|
||||
|
||||
func (s *store) Unmount(id string, force bool) (bool, error) {
|
||||
if layerID, err := s.ContainerLayerID(id); err == nil {
|
||||
id = layerID
|
||||
}
|
||||
rlstore, err := s.LayerStore()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
rlstore.Lock()
|
||||
defer rlstore.Unlock()
|
||||
@ -1952,9 +2279,9 @@ func (s *store) Unmount(id string) error {
|
||||
rlstore.Load()
|
||||
}
|
||||
if rlstore.Exists(id) {
|
||||
return rlstore.Unmount(id)
|
||||
return rlstore.Unmount(id, force)
|
||||
}
|
||||
return ErrLayerUnknown
|
||||
return false, ErrLayerUnknown
|
||||
}
|
||||
|
||||
func (s *store) Changes(from, to string) ([]archive.Change, error) {
|
||||
@ -2293,7 +2620,7 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
|
||||
return nil, err
|
||||
}
|
||||
for _, image := range imageList {
|
||||
if image.TopLayer == layer.ID {
|
||||
if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) {
|
||||
images = append(images, &image)
|
||||
}
|
||||
}
|
||||
@ -2504,7 +2831,7 @@ func (s *store) Shutdown(force bool) ([]string, error) {
|
||||
mounted = append(mounted, layer.ID)
|
||||
if force {
|
||||
for layer.MountCount > 0 {
|
||||
err2 := rlstore.Unmount(layer.ID)
|
||||
_, err2 := rlstore.Unmount(layer.ID, force)
|
||||
if err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
@ -2593,7 +2920,7 @@ func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
|
||||
return ret
|
||||
}
|
||||
|
||||
const configFile = "/etc/containers/storage.conf"
|
||||
const defaultConfigFile = "/etc/containers/storage.conf"
|
||||
|
||||
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
|
||||
// TOML config table.
|
||||
@ -2680,6 +3007,14 @@ type OptionsConfig struct {
|
||||
RemapGroup string `toml:"remap-group"`
|
||||
// Thinpool container options to be handed to thinpool drivers
|
||||
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
|
||||
// OSTree repository
|
||||
OstreeRepo string `toml:"ostree_repo"`
|
||||
|
||||
// Do not create a bind mount on the storage home
|
||||
SkipMountHome string `toml:"skip_mount_home"`
|
||||
|
||||
// Alternative program to use for the mount of the file system
|
||||
MountProgram string `toml:"mount_program"`
|
||||
}
|
||||
|
||||
// TOML-friendly explicit tables used for conversions.
|
||||
@ -2692,11 +3027,9 @@ type tomlConfig struct {
|
||||
} `toml:"storage"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
DefaultStoreOptions.RunRoot = "/var/run/containers/storage"
|
||||
DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
|
||||
DefaultStoreOptions.GraphDriverName = ""
|
||||
|
||||
// ReloadConfigurationFile parses the specified configuration file and overrides
|
||||
// the configuration in storeOptions.
|
||||
func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
||||
data, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
@ -2712,66 +3045,75 @@ func init() {
|
||||
return
|
||||
}
|
||||
if config.Storage.Driver != "" {
|
||||
DefaultStoreOptions.GraphDriverName = config.Storage.Driver
|
||||
storeOptions.GraphDriverName = config.Storage.Driver
|
||||
}
|
||||
if config.Storage.RunRoot != "" {
|
||||
DefaultStoreOptions.RunRoot = config.Storage.RunRoot
|
||||
storeOptions.RunRoot = config.Storage.RunRoot
|
||||
}
|
||||
if config.Storage.GraphRoot != "" {
|
||||
DefaultStoreOptions.GraphRoot = config.Storage.GraphRoot
|
||||
storeOptions.GraphRoot = config.Storage.GraphRoot
|
||||
}
|
||||
if config.Storage.Options.Thinpool.AutoExtendPercent != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", config.Storage.Options.Thinpool.AutoExtendPercent))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", config.Storage.Options.Thinpool.AutoExtendPercent))
|
||||
}
|
||||
|
||||
if config.Storage.Options.Thinpool.AutoExtendThreshold != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", config.Storage.Options.Thinpool.AutoExtendThreshold))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", config.Storage.Options.Thinpool.AutoExtendThreshold))
|
||||
}
|
||||
|
||||
if config.Storage.Options.Thinpool.BaseSize != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.basesize=%s", config.Storage.Options.Thinpool.BaseSize))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.basesize=%s", config.Storage.Options.Thinpool.BaseSize))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.BlockSize != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.blocksize=%s", config.Storage.Options.Thinpool.BlockSize))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.blocksize=%s", config.Storage.Options.Thinpool.BlockSize))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.DirectLvmDevice != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device=%s", config.Storage.Options.Thinpool.DirectLvmDevice))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device=%s", config.Storage.Options.Thinpool.DirectLvmDevice))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.DirectLvmDeviceForce != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device_force=%s", config.Storage.Options.Thinpool.DirectLvmDeviceForce))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device_force=%s", config.Storage.Options.Thinpool.DirectLvmDeviceForce))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.Fs != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.fs=%s", config.Storage.Options.Thinpool.Fs))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.fs=%s", config.Storage.Options.Thinpool.Fs))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.LogLevel != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.libdm_log_level=%s", config.Storage.Options.Thinpool.LogLevel))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.libdm_log_level=%s", config.Storage.Options.Thinpool.LogLevel))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.MinFreeSpace != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.min_free_space=%s", config.Storage.Options.Thinpool.MinFreeSpace))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.min_free_space=%s", config.Storage.Options.Thinpool.MinFreeSpace))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.MkfsArg != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.mkfsarg=%s", config.Storage.Options.Thinpool.MkfsArg))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.mkfsarg=%s", config.Storage.Options.Thinpool.MkfsArg))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.MountOpt != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.mountopt=%s", config.Storage.Options.Thinpool.MountOpt))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.mountopt=%s", config.Storage.Options.Thinpool.MountOpt))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.UseDeferredDeletion != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_deletion=%s", config.Storage.Options.Thinpool.UseDeferredDeletion))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_deletion=%s", config.Storage.Options.Thinpool.UseDeferredDeletion))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.UseDeferredRemoval != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_removal=%s", config.Storage.Options.Thinpool.UseDeferredRemoval))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_removal=%s", config.Storage.Options.Thinpool.UseDeferredRemoval))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries))
|
||||
}
|
||||
for _, s := range config.Storage.Options.AdditionalImageStores {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s))
|
||||
}
|
||||
if config.Storage.Options.Size != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size))
|
||||
}
|
||||
if config.Storage.Options.OstreeRepo != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ostree_repo=%s", config.Storage.Driver, config.Storage.Options.OstreeRepo))
|
||||
}
|
||||
if config.Storage.Options.SkipMountHome != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.skip_mount_home=%s", config.Storage.Driver, config.Storage.Options.SkipMountHome))
|
||||
}
|
||||
if config.Storage.Options.MountProgram != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram))
|
||||
}
|
||||
if config.Storage.Options.OverrideKernelCheck != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck))
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck))
|
||||
}
|
||||
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" {
|
||||
config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser
|
||||
@ -2785,8 +3127,8 @@ func init() {
|
||||
fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
|
||||
return
|
||||
}
|
||||
DefaultStoreOptions.UIDMap = mappings.UIDs()
|
||||
DefaultStoreOptions.GIDMap = mappings.GIDs()
|
||||
storeOptions.UIDMap = mappings.UIDs()
|
||||
storeOptions.GIDMap = mappings.GIDs()
|
||||
}
|
||||
nonDigitsToWhitespace := func(r rune) rune {
|
||||
if strings.IndexRune("0123456789", r) == -1 {
|
||||
@ -2836,15 +3178,23 @@ func init() {
|
||||
}
|
||||
return idmap
|
||||
}
|
||||
DefaultStoreOptions.UIDMap = append(DefaultStoreOptions.UIDMap, parseIDMap(config.Storage.Options.RemapUIDs, "remap-uids")...)
|
||||
DefaultStoreOptions.GIDMap = append(DefaultStoreOptions.GIDMap, parseIDMap(config.Storage.Options.RemapGIDs, "remap-gids")...)
|
||||
storeOptions.UIDMap = append(storeOptions.UIDMap, parseIDMap(config.Storage.Options.RemapUIDs, "remap-uids")...)
|
||||
storeOptions.GIDMap = append(storeOptions.GIDMap, parseIDMap(config.Storage.Options.RemapGIDs, "remap-gids")...)
|
||||
if os.Getenv("STORAGE_DRIVER") != "" {
|
||||
DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
|
||||
storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
|
||||
}
|
||||
if os.Getenv("STORAGE_OPTS") != "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...)
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...)
|
||||
}
|
||||
if len(DefaultStoreOptions.GraphDriverOptions) == 1 && DefaultStoreOptions.GraphDriverOptions[0] == "" {
|
||||
DefaultStoreOptions.GraphDriverOptions = nil
|
||||
if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" {
|
||||
storeOptions.GraphDriverOptions = nil
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
DefaultStoreOptions.RunRoot = "/var/run/containers/storage"
|
||||
DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
|
||||
DefaultStoreOptions.GraphDriverName = ""
|
||||
|
||||
ReloadConfigurationFile(defaultConfigFile, &DefaultStoreOptions)
|
||||
}
|
||||
|
1
vendor/github.com/containers/storage/vendor.conf
generated
vendored
1
vendor/github.com/containers/storage/vendor.conf
generated
vendored
@ -20,3 +20,4 @@ github.com/tchap/go-patricia v2.2.6
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
|
6
vendor/github.com/docker/distribution/vendor.conf
generated
vendored
6
vendor/github.com/docker/distribution/vendor.conf
generated
vendored
@ -1,5 +1,5 @@
|
||||
github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e
|
||||
github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356
|
||||
github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b
|
||||
github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052
|
||||
github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
|
||||
github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd
|
||||
github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
|
||||
@ -19,6 +19,8 @@ github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
|
||||
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f
|
||||
github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
|
||||
github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
|
||||
|
17
vendor/github.com/go-check/check/check.go
generated
vendored
17
vendor/github.com/go-check/check/check.go
generated
vendored
@ -239,7 +239,7 @@ func (c *C) logValue(label string, value interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *C) logMultiLine(s string) {
|
||||
func formatMultiLine(s string, quote bool) []byte {
|
||||
b := make([]byte, 0, len(s)*2)
|
||||
i := 0
|
||||
n := len(s)
|
||||
@ -249,14 +249,23 @@ func (c *C) logMultiLine(s string) {
|
||||
j++
|
||||
}
|
||||
b = append(b, "... "...)
|
||||
b = strconv.AppendQuote(b, s[i:j])
|
||||
if j < n {
|
||||
if quote {
|
||||
b = strconv.AppendQuote(b, s[i:j])
|
||||
} else {
|
||||
b = append(b, s[i:j]...)
|
||||
b = bytes.TrimSpace(b)
|
||||
}
|
||||
if quote && j < n {
|
||||
b = append(b, " +"...)
|
||||
}
|
||||
b = append(b, '\n')
|
||||
i = j
|
||||
}
|
||||
c.writeLog(b)
|
||||
return b
|
||||
}
|
||||
|
||||
func (c *C) logMultiLine(s string) {
|
||||
c.writeLog(formatMultiLine(s, true))
|
||||
}
|
||||
|
||||
func isMultiLine(s string) bool {
|
||||
|
70
vendor/github.com/go-check/check/checkers.go
generated
vendored
70
vendor/github.com/go-check/check/checkers.go
generated
vendored
@ -4,6 +4,9 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/kr/pretty"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
@ -90,6 +93,10 @@ func (checker *notChecker) Info() *CheckerInfo {
|
||||
func (checker *notChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
result, error = checker.sub.Check(params, names)
|
||||
result = !result
|
||||
if result {
|
||||
// clear error message if the new result is true
|
||||
error = ""
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -153,6 +160,56 @@ func (checker *notNilChecker) Check(params []interface{}, names []string) (resul
|
||||
// -----------------------------------------------------------------------
|
||||
// Equals checker.
|
||||
|
||||
func diffworthy(a interface{}) bool {
|
||||
t := reflect.TypeOf(a)
|
||||
switch t.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct, reflect.String, reflect.Ptr:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// formatUnequal will dump the actual and expected values into a textual
|
||||
// representation and return an error message containing a diff.
|
||||
func formatUnequal(obtained interface{}, expected interface{}) string {
|
||||
// We do not do diffs for basic types because go-check already
|
||||
// shows them very cleanly.
|
||||
if !diffworthy(obtained) || !diffworthy(expected) {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Handle strings, short strings are ignored (go-check formats
|
||||
// them very nicely already). We do multi-line strings by
|
||||
// generating two string slices and using kr.Diff to compare
|
||||
// those (kr.Diff does not do string diffs by itself).
|
||||
aStr, aOK := obtained.(string)
|
||||
bStr, bOK := expected.(string)
|
||||
if aOK && bOK {
|
||||
l1 := strings.Split(aStr, "\n")
|
||||
l2 := strings.Split(bStr, "\n")
|
||||
// the "2" here is a bit arbitrary
|
||||
if len(l1) > 2 && len(l2) > 2 {
|
||||
diff := pretty.Diff(l1, l2)
|
||||
return fmt.Sprintf(`String difference:
|
||||
%s`, formatMultiLine(strings.Join(diff, "\n"), false))
|
||||
}
|
||||
// string too short
|
||||
return ""
|
||||
}
|
||||
|
||||
// generic diff
|
||||
diff := pretty.Diff(obtained, expected)
|
||||
if len(diff) == 0 {
|
||||
// No diff, this happens when e.g. just struct
|
||||
// pointers are different but the structs have
|
||||
// identical values.
|
||||
return ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf(`Difference:
|
||||
%s`, formatMultiLine(strings.Join(diff, "\n"), false))
|
||||
}
|
||||
|
||||
type equalsChecker struct {
|
||||
*CheckerInfo
|
||||
}
|
||||
@ -175,7 +232,12 @@ func (checker *equalsChecker) Check(params []interface{}, names []string) (resul
|
||||
error = fmt.Sprint(v)
|
||||
}
|
||||
}()
|
||||
return params[0] == params[1], ""
|
||||
|
||||
result = params[0] == params[1]
|
||||
if !result {
|
||||
error = formatUnequal(params[0], params[1])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
@ -200,7 +262,11 @@ var DeepEquals Checker = &deepEqualsChecker{
|
||||
}
|
||||
|
||||
func (checker *deepEqualsChecker) Check(params []interface{}, names []string) (result bool, error string) {
|
||||
return reflect.DeepEqual(params[0], params[1]), ""
|
||||
result = reflect.DeepEqual(params[0], params[1])
|
||||
if !result {
|
||||
error = formatUnequal(params[0], params[1])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
|
21
vendor/github.com/kr/pretty/License
generated
vendored
Normal file
21
vendor/github.com/kr/pretty/License
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright 2012 Keith Rarick
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
9
vendor/github.com/kr/pretty/Readme
generated
vendored
Normal file
9
vendor/github.com/kr/pretty/Readme
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
package pretty
|
||||
|
||||
import "github.com/kr/pretty"
|
||||
|
||||
Package pretty provides pretty-printing for Go values.
|
||||
|
||||
Documentation
|
||||
|
||||
http://godoc.org/github.com/kr/pretty
|
265
vendor/github.com/kr/pretty/diff.go
generated
vendored
Normal file
265
vendor/github.com/kr/pretty/diff.go
generated
vendored
Normal file
@ -0,0 +1,265 @@
|
||||
package pretty
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type sbuf []string
|
||||
|
||||
func (p *sbuf) Printf(format string, a ...interface{}) {
|
||||
s := fmt.Sprintf(format, a...)
|
||||
*p = append(*p, s)
|
||||
}
|
||||
|
||||
// Diff returns a slice where each element describes
|
||||
// a difference between a and b.
|
||||
func Diff(a, b interface{}) (desc []string) {
|
||||
Pdiff((*sbuf)(&desc), a, b)
|
||||
return desc
|
||||
}
|
||||
|
||||
// wprintfer calls Fprintf on w for each Printf call
|
||||
// with a trailing newline.
|
||||
type wprintfer struct{ w io.Writer }
|
||||
|
||||
func (p *wprintfer) Printf(format string, a ...interface{}) {
|
||||
fmt.Fprintf(p.w, format+"\n", a...)
|
||||
}
|
||||
|
||||
// Fdiff writes to w a description of the differences between a and b.
|
||||
func Fdiff(w io.Writer, a, b interface{}) {
|
||||
Pdiff(&wprintfer{w}, a, b)
|
||||
}
|
||||
|
||||
type Printfer interface {
|
||||
Printf(format string, a ...interface{})
|
||||
}
|
||||
|
||||
// Pdiff prints to p a description of the differences between a and b.
|
||||
// It calls Printf once for each difference, with no trailing newline.
|
||||
// The standard library log.Logger is a Printfer.
|
||||
func Pdiff(p Printfer, a, b interface{}) {
|
||||
diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b))
|
||||
}
|
||||
|
||||
type Logfer interface {
|
||||
Logf(format string, a ...interface{})
|
||||
}
|
||||
|
||||
// logprintfer calls Fprintf on w for each Printf call
|
||||
// with a trailing newline.
|
||||
type logprintfer struct{ l Logfer }
|
||||
|
||||
func (p *logprintfer) Printf(format string, a ...interface{}) {
|
||||
p.l.Logf(format, a...)
|
||||
}
|
||||
|
||||
// Ldiff prints to l a description of the differences between a and b.
|
||||
// It calls Logf once for each difference, with no trailing newline.
|
||||
// The standard library testing.T and testing.B are Logfers.
|
||||
func Ldiff(l Logfer, a, b interface{}) {
|
||||
Pdiff(&logprintfer{l}, a, b)
|
||||
}
|
||||
|
||||
type diffPrinter struct {
|
||||
w Printfer
|
||||
l string // label
|
||||
}
|
||||
|
||||
func (w diffPrinter) printf(f string, a ...interface{}) {
|
||||
var l string
|
||||
if w.l != "" {
|
||||
l = w.l + ": "
|
||||
}
|
||||
w.w.Printf(l+f, a...)
|
||||
}
|
||||
|
||||
func (w diffPrinter) diff(av, bv reflect.Value) {
|
||||
if !av.IsValid() && bv.IsValid() {
|
||||
w.printf("nil != %# v", formatter{v: bv, quote: true})
|
||||
return
|
||||
}
|
||||
if av.IsValid() && !bv.IsValid() {
|
||||
w.printf("%# v != nil", formatter{v: av, quote: true})
|
||||
return
|
||||
}
|
||||
if !av.IsValid() && !bv.IsValid() {
|
||||
return
|
||||
}
|
||||
|
||||
at := av.Type()
|
||||
bt := bv.Type()
|
||||
if at != bt {
|
||||
w.printf("%v != %v", at, bt)
|
||||
return
|
||||
}
|
||||
|
||||
switch kind := at.Kind(); kind {
|
||||
case reflect.Bool:
|
||||
if a, b := av.Bool(), bv.Bool(); a != b {
|
||||
w.printf("%v != %v", a, b)
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if a, b := av.Int(), bv.Int(); a != b {
|
||||
w.printf("%d != %d", a, b)
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
if a, b := av.Uint(), bv.Uint(); a != b {
|
||||
w.printf("%d != %d", a, b)
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if a, b := av.Float(), bv.Float(); a != b {
|
||||
w.printf("%v != %v", a, b)
|
||||
}
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
if a, b := av.Complex(), bv.Complex(); a != b {
|
||||
w.printf("%v != %v", a, b)
|
||||
}
|
||||
case reflect.Array:
|
||||
n := av.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
|
||||
}
|
||||
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
|
||||
if a, b := av.Pointer(), bv.Pointer(); a != b {
|
||||
w.printf("%#x != %#x", a, b)
|
||||
}
|
||||
case reflect.Interface:
|
||||
w.diff(av.Elem(), bv.Elem())
|
||||
case reflect.Map:
|
||||
ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys())
|
||||
for _, k := range ak {
|
||||
w := w.relabel(fmt.Sprintf("[%#v]", k))
|
||||
w.printf("%q != (missing)", av.MapIndex(k))
|
||||
}
|
||||
for _, k := range both {
|
||||
w := w.relabel(fmt.Sprintf("[%#v]", k))
|
||||
w.diff(av.MapIndex(k), bv.MapIndex(k))
|
||||
}
|
||||
for _, k := range bk {
|
||||
w := w.relabel(fmt.Sprintf("[%#v]", k))
|
||||
w.printf("(missing) != %q", bv.MapIndex(k))
|
||||
}
|
||||
case reflect.Ptr:
|
||||
switch {
|
||||
case av.IsNil() && !bv.IsNil():
|
||||
w.printf("nil != %# v", formatter{v: bv, quote: true})
|
||||
case !av.IsNil() && bv.IsNil():
|
||||
w.printf("%# v != nil", formatter{v: av, quote: true})
|
||||
case !av.IsNil() && !bv.IsNil():
|
||||
w.diff(av.Elem(), bv.Elem())
|
||||
}
|
||||
case reflect.Slice:
|
||||
lenA := av.Len()
|
||||
lenB := bv.Len()
|
||||
if lenA != lenB {
|
||||
w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB)
|
||||
break
|
||||
}
|
||||
for i := 0; i < lenA; i++ {
|
||||
w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i))
|
||||
}
|
||||
case reflect.String:
|
||||
if a, b := av.String(), bv.String(); a != b {
|
||||
w.printf("%q != %q", a, b)
|
||||
}
|
||||
case reflect.Struct:
|
||||
for i := 0; i < av.NumField(); i++ {
|
||||
w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i))
|
||||
}
|
||||
default:
|
||||
panic("unknown reflect Kind: " + kind.String())
|
||||
}
|
||||
}
|
||||
|
||||
func (d diffPrinter) relabel(name string) (d1 diffPrinter) {
|
||||
d1 = d
|
||||
if d.l != "" && name[0] != '[' {
|
||||
d1.l += "."
|
||||
}
|
||||
d1.l += name
|
||||
return d1
|
||||
}
|
||||
|
||||
// keyEqual compares a and b for equality.
|
||||
// Both a and b must be valid map keys.
|
||||
func keyEqual(av, bv reflect.Value) bool {
|
||||
if !av.IsValid() && !bv.IsValid() {
|
||||
return true
|
||||
}
|
||||
if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() {
|
||||
return false
|
||||
}
|
||||
switch kind := av.Kind(); kind {
|
||||
case reflect.Bool:
|
||||
a, b := av.Bool(), bv.Bool()
|
||||
return a == b
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
a, b := av.Int(), bv.Int()
|
||||
return a == b
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
a, b := av.Uint(), bv.Uint()
|
||||
return a == b
|
||||
case reflect.Float32, reflect.Float64:
|
||||
a, b := av.Float(), bv.Float()
|
||||
return a == b
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
a, b := av.Complex(), bv.Complex()
|
||||
return a == b
|
||||
case reflect.Array:
|
||||
for i := 0; i < av.Len(); i++ {
|
||||
if !keyEqual(av.Index(i), bv.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Chan, reflect.UnsafePointer, reflect.Ptr:
|
||||
a, b := av.Pointer(), bv.Pointer()
|
||||
return a == b
|
||||
case reflect.Interface:
|
||||
return keyEqual(av.Elem(), bv.Elem())
|
||||
case reflect.String:
|
||||
a, b := av.String(), bv.String()
|
||||
return a == b
|
||||
case reflect.Struct:
|
||||
for i := 0; i < av.NumField(); i++ {
|
||||
if !keyEqual(av.Field(i), bv.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
default:
|
||||
panic("invalid map key type " + av.Type().String())
|
||||
}
|
||||
}
|
||||
|
||||
func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) {
|
||||
for _, av := range a {
|
||||
inBoth := false
|
||||
for _, bv := range b {
|
||||
if keyEqual(av, bv) {
|
||||
inBoth = true
|
||||
both = append(both, av)
|
||||
break
|
||||
}
|
||||
}
|
||||
if !inBoth {
|
||||
ak = append(ak, av)
|
||||
}
|
||||
}
|
||||
for _, bv := range b {
|
||||
inBoth := false
|
||||
for _, av := range a {
|
||||
if keyEqual(av, bv) {
|
||||
inBoth = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !inBoth {
|
||||
bk = append(bk, bv)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
328
vendor/github.com/kr/pretty/formatter.go
generated
vendored
Normal file
328
vendor/github.com/kr/pretty/formatter.go
generated
vendored
Normal file
@ -0,0 +1,328 @@
|
||||
package pretty
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/kr/text"
|
||||
)
|
||||
|
||||
type formatter struct {
|
||||
v reflect.Value
|
||||
force bool
|
||||
quote bool
|
||||
}
|
||||
|
||||
// Formatter makes a wrapper, f, that will format x as go source with line
|
||||
// breaks and tabs. Object f responds to the "%v" formatting verb when both the
|
||||
// "#" and " " (space) flags are set, for example:
|
||||
//
|
||||
// fmt.Sprintf("%# v", Formatter(x))
|
||||
//
|
||||
// If one of these two flags is not set, or any other verb is used, f will
|
||||
// format x according to the usual rules of package fmt.
|
||||
// In particular, if x satisfies fmt.Formatter, then x.Format will be called.
|
||||
func Formatter(x interface{}) (f fmt.Formatter) {
|
||||
return formatter{v: reflect.ValueOf(x), quote: true}
|
||||
}
|
||||
|
||||
func (fo formatter) String() string {
|
||||
return fmt.Sprint(fo.v.Interface()) // unwrap it
|
||||
}
|
||||
|
||||
func (fo formatter) passThrough(f fmt.State, c rune) {
|
||||
s := "%"
|
||||
for i := 0; i < 128; i++ {
|
||||
if f.Flag(i) {
|
||||
s += string(i)
|
||||
}
|
||||
}
|
||||
if w, ok := f.Width(); ok {
|
||||
s += fmt.Sprintf("%d", w)
|
||||
}
|
||||
if p, ok := f.Precision(); ok {
|
||||
s += fmt.Sprintf(".%d", p)
|
||||
}
|
||||
s += string(c)
|
||||
fmt.Fprintf(f, s, fo.v.Interface())
|
||||
}
|
||||
|
||||
func (fo formatter) Format(f fmt.State, c rune) {
|
||||
if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') {
|
||||
w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0)
|
||||
p := &printer{tw: w, Writer: w, visited: make(map[visit]int)}
|
||||
p.printValue(fo.v, true, fo.quote)
|
||||
w.Flush()
|
||||
return
|
||||
}
|
||||
fo.passThrough(f, c)
|
||||
}
|
||||
|
||||
type printer struct {
|
||||
io.Writer
|
||||
tw *tabwriter.Writer
|
||||
visited map[visit]int
|
||||
depth int
|
||||
}
|
||||
|
||||
func (p *printer) indent() *printer {
|
||||
q := *p
|
||||
q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0)
|
||||
q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'})
|
||||
return &q
|
||||
}
|
||||
|
||||
func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) {
|
||||
if showType {
|
||||
io.WriteString(p, v.Type().String())
|
||||
fmt.Fprintf(p, "(%#v)", x)
|
||||
} else {
|
||||
fmt.Fprintf(p, "%#v", x)
|
||||
}
|
||||
}
|
||||
|
||||
// printValue must keep track of already-printed pointer values to avoid
|
||||
// infinite recursion.
|
||||
type visit struct {
|
||||
v uintptr
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
func (p *printer) printValue(v reflect.Value, showType, quote bool) {
|
||||
if p.depth > 10 {
|
||||
io.WriteString(p, "!%v(DEPTH EXCEEDED)")
|
||||
return
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
p.printInline(v, v.Bool(), showType)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
p.printInline(v, v.Int(), showType)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
p.printInline(v, v.Uint(), showType)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
p.printInline(v, v.Float(), showType)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
fmt.Fprintf(p, "%#v", v.Complex())
|
||||
case reflect.String:
|
||||
p.fmtString(v.String(), quote)
|
||||
case reflect.Map:
|
||||
t := v.Type()
|
||||
if showType {
|
||||
io.WriteString(p, t.String())
|
||||
}
|
||||
writeByte(p, '{')
|
||||
if nonzero(v) {
|
||||
expand := !canInline(v.Type())
|
||||
pp := p
|
||||
if expand {
|
||||
writeByte(p, '\n')
|
||||
pp = p.indent()
|
||||
}
|
||||
keys := v.MapKeys()
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
showTypeInStruct := true
|
||||
k := keys[i]
|
||||
mv := v.MapIndex(k)
|
||||
pp.printValue(k, false, true)
|
||||
writeByte(pp, ':')
|
||||
if expand {
|
||||
writeByte(pp, '\t')
|
||||
}
|
||||
showTypeInStruct = t.Elem().Kind() == reflect.Interface
|
||||
pp.printValue(mv, showTypeInStruct, true)
|
||||
if expand {
|
||||
io.WriteString(pp, ",\n")
|
||||
} else if i < v.Len()-1 {
|
||||
io.WriteString(pp, ", ")
|
||||
}
|
||||
}
|
||||
if expand {
|
||||
pp.tw.Flush()
|
||||
}
|
||||
}
|
||||
writeByte(p, '}')
|
||||
case reflect.Struct:
|
||||
t := v.Type()
|
||||
if v.CanAddr() {
|
||||
addr := v.UnsafeAddr()
|
||||
vis := visit{addr, t}
|
||||
if vd, ok := p.visited[vis]; ok && vd < p.depth {
|
||||
p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false)
|
||||
break // don't print v again
|
||||
}
|
||||
p.visited[vis] = p.depth
|
||||
}
|
||||
|
||||
if showType {
|
||||
io.WriteString(p, t.String())
|
||||
}
|
||||
writeByte(p, '{')
|
||||
if nonzero(v) {
|
||||
expand := !canInline(v.Type())
|
||||
pp := p
|
||||
if expand {
|
||||
writeByte(p, '\n')
|
||||
pp = p.indent()
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
showTypeInStruct := true
|
||||
if f := t.Field(i); f.Name != "" {
|
||||
io.WriteString(pp, f.Name)
|
||||
writeByte(pp, ':')
|
||||
if expand {
|
||||
writeByte(pp, '\t')
|
||||
}
|
||||
showTypeInStruct = labelType(f.Type)
|
||||
}
|
||||
pp.printValue(getField(v, i), showTypeInStruct, true)
|
||||
if expand {
|
||||
io.WriteString(pp, ",\n")
|
||||
} else if i < v.NumField()-1 {
|
||||
io.WriteString(pp, ", ")
|
||||
}
|
||||
}
|
||||
if expand {
|
||||
pp.tw.Flush()
|
||||
}
|
||||
}
|
||||
writeByte(p, '}')
|
||||
case reflect.Interface:
|
||||
switch e := v.Elem(); {
|
||||
case e.Kind() == reflect.Invalid:
|
||||
io.WriteString(p, "nil")
|
||||
case e.IsValid():
|
||||
pp := *p
|
||||
pp.depth++
|
||||
pp.printValue(e, showType, true)
|
||||
default:
|
||||
io.WriteString(p, v.Type().String())
|
||||
io.WriteString(p, "(nil)")
|
||||
}
|
||||
case reflect.Array, reflect.Slice:
|
||||
t := v.Type()
|
||||
if showType {
|
||||
io.WriteString(p, t.String())
|
||||
}
|
||||
if v.Kind() == reflect.Slice && v.IsNil() && showType {
|
||||
io.WriteString(p, "(nil)")
|
||||
break
|
||||
}
|
||||
if v.Kind() == reflect.Slice && v.IsNil() {
|
||||
io.WriteString(p, "nil")
|
||||
break
|
||||
}
|
||||
writeByte(p, '{')
|
||||
expand := !canInline(v.Type())
|
||||
pp := p
|
||||
if expand {
|
||||
writeByte(p, '\n')
|
||||
pp = p.indent()
|
||||
}
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
showTypeInSlice := t.Elem().Kind() == reflect.Interface
|
||||
pp.printValue(v.Index(i), showTypeInSlice, true)
|
||||
if expand {
|
||||
io.WriteString(pp, ",\n")
|
||||
} else if i < v.Len()-1 {
|
||||
io.WriteString(pp, ", ")
|
||||
}
|
||||
}
|
||||
if expand {
|
||||
pp.tw.Flush()
|
||||
}
|
||||
writeByte(p, '}')
|
||||
case reflect.Ptr:
|
||||
e := v.Elem()
|
||||
if !e.IsValid() {
|
||||
writeByte(p, '(')
|
||||
io.WriteString(p, v.Type().String())
|
||||
io.WriteString(p, ")(nil)")
|
||||
} else {
|
||||
pp := *p
|
||||
pp.depth++
|
||||
writeByte(pp, '&')
|
||||
pp.printValue(e, true, true)
|
||||
}
|
||||
case reflect.Chan:
|
||||
x := v.Pointer()
|
||||
if showType {
|
||||
writeByte(p, '(')
|
||||
io.WriteString(p, v.Type().String())
|
||||
fmt.Fprintf(p, ")(%#v)", x)
|
||||
} else {
|
||||
fmt.Fprintf(p, "%#v", x)
|
||||
}
|
||||
case reflect.Func:
|
||||
io.WriteString(p, v.Type().String())
|
||||
io.WriteString(p, " {...}")
|
||||
case reflect.UnsafePointer:
|
||||
p.printInline(v, v.Pointer(), showType)
|
||||
case reflect.Invalid:
|
||||
io.WriteString(p, "nil")
|
||||
}
|
||||
}
|
||||
|
||||
func canInline(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Map:
|
||||
return !canExpand(t.Elem())
|
||||
case reflect.Struct:
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
if canExpand(t.Field(i).Type) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Interface:
|
||||
return false
|
||||
case reflect.Array, reflect.Slice:
|
||||
return !canExpand(t.Elem())
|
||||
case reflect.Ptr:
|
||||
return false
|
||||
case reflect.Chan, reflect.Func, reflect.UnsafePointer:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func canExpand(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Map, reflect.Struct,
|
||||
reflect.Interface, reflect.Array, reflect.Slice,
|
||||
reflect.Ptr:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func labelType(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Interface, reflect.Struct:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *printer) fmtString(s string, quote bool) {
|
||||
if quote {
|
||||
s = strconv.Quote(s)
|
||||
}
|
||||
io.WriteString(p, s)
|
||||
}
|
||||
|
||||
func writeByte(w io.Writer, b byte) {
|
||||
w.Write([]byte{b})
|
||||
}
|
||||
|
||||
func getField(v reflect.Value, i int) reflect.Value {
|
||||
val := v.Field(i)
|
||||
if val.Kind() == reflect.Interface && !val.IsNil() {
|
||||
val = val.Elem()
|
||||
}
|
||||
return val
|
||||
}
|
108
vendor/github.com/kr/pretty/pretty.go
generated
vendored
Normal file
108
vendor/github.com/kr/pretty/pretty.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
// Package pretty provides pretty-printing for Go values. This is
|
||||
// useful during debugging, to avoid wrapping long output lines in
|
||||
// the terminal.
|
||||
//
|
||||
// It provides a function, Formatter, that can be used with any
|
||||
// function that accepts a format string. It also provides
|
||||
// convenience wrappers for functions in packages fmt and log.
|
||||
package pretty
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Errorf is a convenience wrapper for fmt.Errorf.
|
||||
//
|
||||
// Calling Errorf(f, x, y) is equivalent to
|
||||
// fmt.Errorf(f, Formatter(x), Formatter(y)).
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
return fmt.Errorf(format, wrap(a, false)...)
|
||||
}
|
||||
|
||||
// Fprintf is a convenience wrapper for fmt.Fprintf.
|
||||
//
|
||||
// Calling Fprintf(w, f, x, y) is equivalent to
|
||||
// fmt.Fprintf(w, f, Formatter(x), Formatter(y)).
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) {
|
||||
return fmt.Fprintf(w, format, wrap(a, false)...)
|
||||
}
|
||||
|
||||
// Log is a convenience wrapper for log.Printf.
|
||||
//
|
||||
// Calling Log(x, y) is equivalent to
|
||||
// log.Print(Formatter(x), Formatter(y)), but each operand is
|
||||
// formatted with "%# v".
|
||||
func Log(a ...interface{}) {
|
||||
log.Print(wrap(a, true)...)
|
||||
}
|
||||
|
||||
// Logf is a convenience wrapper for log.Printf.
|
||||
//
|
||||
// Calling Logf(f, x, y) is equivalent to
|
||||
// log.Printf(f, Formatter(x), Formatter(y)).
|
||||
func Logf(format string, a ...interface{}) {
|
||||
log.Printf(format, wrap(a, false)...)
|
||||
}
|
||||
|
||||
// Logln is a convenience wrapper for log.Printf.
|
||||
//
|
||||
// Calling Logln(x, y) is equivalent to
|
||||
// log.Println(Formatter(x), Formatter(y)), but each operand is
|
||||
// formatted with "%# v".
|
||||
func Logln(a ...interface{}) {
|
||||
log.Println(wrap(a, true)...)
|
||||
}
|
||||
|
||||
// Print pretty-prints its operands and writes to standard output.
|
||||
//
|
||||
// Calling Print(x, y) is equivalent to
|
||||
// fmt.Print(Formatter(x), Formatter(y)), but each operand is
|
||||
// formatted with "%# v".
|
||||
func Print(a ...interface{}) (n int, errno error) {
|
||||
return fmt.Print(wrap(a, true)...)
|
||||
}
|
||||
|
||||
// Printf is a convenience wrapper for fmt.Printf.
|
||||
//
|
||||
// Calling Printf(f, x, y) is equivalent to
|
||||
// fmt.Printf(f, Formatter(x), Formatter(y)).
|
||||
func Printf(format string, a ...interface{}) (n int, errno error) {
|
||||
return fmt.Printf(format, wrap(a, false)...)
|
||||
}
|
||||
|
||||
// Println pretty-prints its operands and writes to standard output.
|
||||
//
|
||||
// Calling Print(x, y) is equivalent to
|
||||
// fmt.Println(Formatter(x), Formatter(y)), but each operand is
|
||||
// formatted with "%# v".
|
||||
func Println(a ...interface{}) (n int, errno error) {
|
||||
return fmt.Println(wrap(a, true)...)
|
||||
}
|
||||
|
||||
// Sprint is a convenience wrapper for fmt.Sprintf.
|
||||
//
|
||||
// Calling Sprint(x, y) is equivalent to
|
||||
// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is
|
||||
// formatted with "%# v".
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(wrap(a, true)...)
|
||||
}
|
||||
|
||||
// Sprintf is a convenience wrapper for fmt.Sprintf.
|
||||
//
|
||||
// Calling Sprintf(f, x, y) is equivalent to
|
||||
// fmt.Sprintf(f, Formatter(x), Formatter(y)).
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, wrap(a, false)...)
|
||||
}
|
||||
|
||||
func wrap(a []interface{}, force bool) []interface{} {
|
||||
w := make([]interface{}, len(a))
|
||||
for i, x := range a {
|
||||
w[i] = formatter{v: reflect.ValueOf(x), force: force}
|
||||
}
|
||||
return w
|
||||
}
|
41
vendor/github.com/kr/pretty/zero.go
generated
vendored
Normal file
41
vendor/github.com/kr/pretty/zero.go
generated
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
package pretty
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func nonzero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() != 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() != 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() != 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return v.Complex() != complex(0, 0)
|
||||
case reflect.String:
|
||||
return v.String() != ""
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if nonzero(getField(v, i)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if nonzero(v.Index(i)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func:
|
||||
return !v.IsNil()
|
||||
case reflect.UnsafePointer:
|
||||
return v.Pointer() != 0
|
||||
}
|
||||
return true
|
||||
}
|
19
vendor/github.com/kr/text/License
generated
vendored
Normal file
19
vendor/github.com/kr/text/License
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
Copyright 2012 Keith Rarick
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
3
vendor/github.com/kr/text/Readme
generated
vendored
Normal file
3
vendor/github.com/kr/text/Readme
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
This is a Go package for manipulating paragraphs of text.
|
||||
|
||||
See http://go.pkgdoc.org/github.com/kr/text for full documentation.
|
3
vendor/github.com/kr/text/doc.go
generated
vendored
Normal file
3
vendor/github.com/kr/text/doc.go
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
// Package text provides rudimentary functions for manipulating text in
|
||||
// paragraphs.
|
||||
package text
|
74
vendor/github.com/kr/text/indent.go
generated
vendored
Normal file
74
vendor/github.com/kr/text/indent.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
package text
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Indent inserts prefix at the beginning of each non-empty line of s. The
|
||||
// end-of-line marker is NL.
|
||||
func Indent(s, prefix string) string {
|
||||
return string(IndentBytes([]byte(s), []byte(prefix)))
|
||||
}
|
||||
|
||||
// IndentBytes inserts prefix at the beginning of each non-empty line of b.
|
||||
// The end-of-line marker is NL.
|
||||
func IndentBytes(b, prefix []byte) []byte {
|
||||
var res []byte
|
||||
bol := true
|
||||
for _, c := range b {
|
||||
if bol && c != '\n' {
|
||||
res = append(res, prefix...)
|
||||
}
|
||||
res = append(res, c)
|
||||
bol = c == '\n'
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Writer indents each line of its input.
|
||||
type indentWriter struct {
|
||||
w io.Writer
|
||||
bol bool
|
||||
pre [][]byte
|
||||
sel int
|
||||
off int
|
||||
}
|
||||
|
||||
// NewIndentWriter makes a new write filter that indents the input
|
||||
// lines. Each line is prefixed in order with the corresponding
|
||||
// element of pre. If there are more lines than elements, the last
|
||||
// element of pre is repeated for each subsequent line.
|
||||
func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer {
|
||||
return &indentWriter{
|
||||
w: w,
|
||||
pre: pre,
|
||||
bol: true,
|
||||
}
|
||||
}
|
||||
|
||||
// The only errors returned are from the underlying indentWriter.
|
||||
func (w *indentWriter) Write(p []byte) (n int, err error) {
|
||||
for _, c := range p {
|
||||
if w.bol {
|
||||
var i int
|
||||
i, err = w.w.Write(w.pre[w.sel][w.off:])
|
||||
w.off += i
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
_, err = w.w.Write([]byte{c})
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
w.bol = c == '\n'
|
||||
if w.bol {
|
||||
w.off = 0
|
||||
if w.sel < len(w.pre)-1 {
|
||||
w.sel++
|
||||
}
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
86
vendor/github.com/kr/text/wrap.go
generated
vendored
Normal file
86
vendor/github.com/kr/text/wrap.go
generated
vendored
Normal file
@ -0,0 +1,86 @@
|
||||
package text
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
)
|
||||
|
||||
var (
|
||||
nl = []byte{'\n'}
|
||||
sp = []byte{' '}
|
||||
)
|
||||
|
||||
const defaultPenalty = 1e5
|
||||
|
||||
// Wrap wraps s into a paragraph of lines of length lim, with minimal
|
||||
// raggedness.
|
||||
func Wrap(s string, lim int) string {
|
||||
return string(WrapBytes([]byte(s), lim))
|
||||
}
|
||||
|
||||
// WrapBytes wraps b into a paragraph of lines of length lim, with minimal
|
||||
// raggedness.
|
||||
func WrapBytes(b []byte, lim int) []byte {
|
||||
words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp)
|
||||
var lines [][]byte
|
||||
for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
|
||||
lines = append(lines, bytes.Join(line, sp))
|
||||
}
|
||||
return bytes.Join(lines, nl)
|
||||
}
|
||||
|
||||
// WrapWords is the low-level line-breaking algorithm, useful if you need more
|
||||
// control over the details of the text wrapping process. For most uses, either
|
||||
// Wrap or WrapBytes will be sufficient and more convenient.
|
||||
//
|
||||
// WrapWords splits a list of words into lines with minimal "raggedness",
|
||||
// treating each byte as one unit, accounting for spc units between adjacent
|
||||
// words on each line, and attempting to limit lines to lim units. Raggedness
|
||||
// is the total error over all lines, where error is the square of the
|
||||
// difference of the length of the line and lim. Too-long lines (which only
|
||||
// happen when a single word is longer than lim units) have pen penalty units
|
||||
// added to the error.
|
||||
func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte {
|
||||
n := len(words)
|
||||
|
||||
length := make([][]int, n)
|
||||
for i := 0; i < n; i++ {
|
||||
length[i] = make([]int, n)
|
||||
length[i][i] = len(words[i])
|
||||
for j := i + 1; j < n; j++ {
|
||||
length[i][j] = length[i][j-1] + spc + len(words[j])
|
||||
}
|
||||
}
|
||||
|
||||
nbrk := make([]int, n)
|
||||
cost := make([]int, n)
|
||||
for i := range cost {
|
||||
cost[i] = math.MaxInt32
|
||||
}
|
||||
for i := n - 1; i >= 0; i-- {
|
||||
if length[i][n-1] <= lim || i == n-1 {
|
||||
cost[i] = 0
|
||||
nbrk[i] = n
|
||||
} else {
|
||||
for j := i + 1; j < n; j++ {
|
||||
d := lim - length[i][j-1]
|
||||
c := d*d + cost[j]
|
||||
if length[i][j-1] > lim {
|
||||
c += pen // too-long lines get a worse penalty
|
||||
}
|
||||
if c < cost[i] {
|
||||
cost[i] = c
|
||||
nbrk[i] = j
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var lines [][][]byte
|
||||
i := 0
|
||||
for i < n {
|
||||
lines = append(lines, words[i:nbrk[i]])
|
||||
i = nbrk[i]
|
||||
}
|
||||
return lines
|
||||
}
|
38
vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
generated
vendored
38
vendor/github.com/opencontainers/runc/libcontainer/system/linux.go
generated
vendored
@ -3,13 +3,12 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall" // only for exec
|
||||
"unsafe"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@ -102,34 +101,43 @@ func Setctty() error {
|
||||
}
|
||||
|
||||
// RunningInUserNS detects whether we are currently running in a user namespace.
|
||||
// Copied from github.com/lxc/lxd/shared/util.go
|
||||
// Originally copied from github.com/lxc/lxd/shared/util.go
|
||||
func RunningInUserNS() bool {
|
||||
file, err := os.Open("/proc/self/uid_map")
|
||||
uidmap, err := user.CurrentProcessUIDMap()
|
||||
if err != nil {
|
||||
// This kernel-provided file only exists if user namespaces are supported
|
||||
return false
|
||||
}
|
||||
defer file.Close()
|
||||
return UIDMapInUserNS(uidmap)
|
||||
}
|
||||
|
||||
buf := bufio.NewReader(file)
|
||||
l, _, err := buf.ReadLine()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
line := string(l)
|
||||
var a, b, c int64
|
||||
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
|
||||
func UIDMapInUserNS(uidmap []user.IDMap) bool {
|
||||
/*
|
||||
* We assume we are in the initial user namespace if we have a full
|
||||
* range - 4294967295 uids starting at uid 0.
|
||||
*/
|
||||
if a == 0 && b == 0 && c == 4294967295 {
|
||||
if len(uidmap) == 1 && uidmap[0].ID == 0 && uidmap[0].ParentID == 0 && uidmap[0].Count == 4294967295 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetParentNSeuid returns the euid within the parent user namespace
|
||||
func GetParentNSeuid() int64 {
|
||||
euid := int64(os.Geteuid())
|
||||
uidmap, err := user.CurrentProcessUIDMap()
|
||||
if err != nil {
|
||||
// This kernel-provided file only exists if user namespaces are supported
|
||||
return euid
|
||||
}
|
||||
for _, um := range uidmap {
|
||||
if um.ID <= euid && euid <= um.ID+um.Count-1 {
|
||||
return um.ParentID + euid - um.ID
|
||||
}
|
||||
}
|
||||
return euid
|
||||
}
|
||||
|
||||
// SetSubreaper sets the value i as the subreaper setting for the calling process
|
||||
func SetSubreaper(i int) error {
|
||||
return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
|
||||
|
18
vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
generated
vendored
18
vendor/github.com/opencontainers/runc/libcontainer/system/unsupported.go
generated
vendored
@ -2,8 +2,26 @@
|
||||
|
||||
package system
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
)
|
||||
|
||||
// RunningInUserNS is a stub for non-Linux systems
|
||||
// Always returns false
|
||||
func RunningInUserNS() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// UIDMapInUserNS is a stub for non-Linux systems
|
||||
// Always returns false
|
||||
func UIDMapInUserNS(uidmap []user.IDMap) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetParentNSeuid returns the euid within the parent user namespace
|
||||
// Always returns os.Geteuid on non-linux
|
||||
func GetParentNSeuid() int {
|
||||
return os.Geteuid()
|
||||
}
|
||||
|
26
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
26
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
@ -114,3 +114,29 @@ func CurrentUser() (User, error) {
|
||||
func CurrentGroup() (Group, error) {
|
||||
return LookupGid(unix.Getgid())
|
||||
}
|
||||
|
||||
func CurrentUserSubUIDs() ([]SubID, error) {
|
||||
u, err := CurrentUser()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseSubIDFileFilter("/etc/subuid",
|
||||
func(entry SubID) bool { return entry.Name == u.Name })
|
||||
}
|
||||
|
||||
func CurrentGroupSubGIDs() ([]SubID, error) {
|
||||
g, err := CurrentGroup()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseSubIDFileFilter("/etc/subgid",
|
||||
func(entry SubID) bool { return entry.Name == g.Name })
|
||||
}
|
||||
|
||||
func CurrentProcessUIDMap() ([]IDMap, error) {
|
||||
return ParseIDMapFile("/proc/self/uid_map")
|
||||
}
|
||||
|
||||
func CurrentProcessGIDMap() ([]IDMap, error) {
|
||||
return ParseIDMapFile("/proc/self/gid_map")
|
||||
}
|
||||
|
133
vendor/github.com/opencontainers/runc/libcontainer/user/user.go
generated
vendored
133
vendor/github.com/opencontainers/runc/libcontainer/user/user.go
generated
vendored
@ -75,12 +75,29 @@ func groupFromOS(g *user.Group) (Group, error) {
|
||||
return newGroup, nil
|
||||
}
|
||||
|
||||
// SubID represents an entry in /etc/sub{u,g}id
|
||||
type SubID struct {
|
||||
Name string
|
||||
SubID int64
|
||||
Count int64
|
||||
}
|
||||
|
||||
// IDMap represents an entry in /proc/PID/{u,g}id_map
|
||||
type IDMap struct {
|
||||
ID int64
|
||||
ParentID int64
|
||||
Count int64
|
||||
}
|
||||
|
||||
func parseLine(line string, v ...interface{}) {
|
||||
if line == "" {
|
||||
parseParts(strings.Split(line, ":"), v...)
|
||||
}
|
||||
|
||||
func parseParts(parts []string, v ...interface{}) {
|
||||
if len(parts) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
parts := strings.Split(line, ":")
|
||||
for i, p := range parts {
|
||||
// Ignore cases where we don't have enough fields to populate the arguments.
|
||||
// Some configuration files like to misbehave.
|
||||
@ -96,6 +113,8 @@ func parseLine(line string, v ...interface{}) {
|
||||
case *int:
|
||||
// "numbers", with conversion errors ignored because of some misbehaving configuration files.
|
||||
*e, _ = strconv.Atoi(p)
|
||||
case *int64:
|
||||
*e, _ = strconv.ParseInt(p, 10, 64)
|
||||
case *[]string:
|
||||
// Comma-separated lists.
|
||||
if p != "" {
|
||||
@ -105,7 +124,7 @@ func parseLine(line string, v ...interface{}) {
|
||||
}
|
||||
default:
|
||||
// Someone goof'd when writing code using this function. Scream so they can hear us.
|
||||
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e))
|
||||
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *int64, *[]string} as arguments! %#v is not a pointer!", e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -479,3 +498,111 @@ func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int
|
||||
}
|
||||
return GetAdditionalGroups(additionalGroups, group)
|
||||
}
|
||||
|
||||
func ParseSubIDFile(path string) ([]SubID, error) {
|
||||
subid, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer subid.Close()
|
||||
return ParseSubID(subid)
|
||||
}
|
||||
|
||||
func ParseSubID(subid io.Reader) ([]SubID, error) {
|
||||
return ParseSubIDFilter(subid, nil)
|
||||
}
|
||||
|
||||
func ParseSubIDFileFilter(path string, filter func(SubID) bool) ([]SubID, error) {
|
||||
subid, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer subid.Close()
|
||||
return ParseSubIDFilter(subid, filter)
|
||||
}
|
||||
|
||||
func ParseSubIDFilter(r io.Reader, filter func(SubID) bool) ([]SubID, error) {
|
||||
if r == nil {
|
||||
return nil, fmt.Errorf("nil source for subid-formatted data")
|
||||
}
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []SubID{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 5 subuid
|
||||
p := SubID{}
|
||||
parseLine(line, &p.Name, &p.SubID, &p.Count)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ParseIDMapFile(path string) ([]IDMap, error) {
|
||||
r, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
return ParseIDMap(r)
|
||||
}
|
||||
|
||||
func ParseIDMap(r io.Reader) ([]IDMap, error) {
|
||||
return ParseIDMapFilter(r, nil)
|
||||
}
|
||||
|
||||
func ParseIDMapFileFilter(path string, filter func(IDMap) bool) ([]IDMap, error) {
|
||||
r, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
return ParseIDMapFilter(r, filter)
|
||||
}
|
||||
|
||||
func ParseIDMapFilter(r io.Reader, filter func(IDMap) bool) ([]IDMap, error) {
|
||||
if r == nil {
|
||||
return nil, fmt.Errorf("nil source for idmap-formatted data")
|
||||
}
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []IDMap{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 7 user_namespaces
|
||||
p := IDMap{}
|
||||
parseParts(strings.Fields(line), &p.ID, &p.ParentID, &p.Count)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
5
vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
generated
vendored
5
vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
generated
vendored
@ -87,9 +87,6 @@ func FormatMountLabel(src, mountLabel string) string {
|
||||
// SetProcessLabel takes a process label and tells the kernel to assign the
|
||||
// label to the next program executed by the current process.
|
||||
func SetProcessLabel(processLabel string) error {
|
||||
if processLabel == "" {
|
||||
return nil
|
||||
}
|
||||
return selinux.SetExecLabel(processLabel)
|
||||
}
|
||||
|
||||
@ -133,7 +130,7 @@ func Relabel(path string, fileLabel string, shared bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true}
|
||||
exclude_paths := map[string]bool{"/": true, "/usr": true, "/etc": true, "/tmp": true, "/home": true, "/run": true, "/var": true, "/root": true}
|
||||
if exclude_paths[path] {
|
||||
return fmt.Errorf("SELinux relabeling of %s is not allowed", path)
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
// +build linux
|
||||
// +build selinux,linux
|
||||
|
||||
package selinux
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -25,7 +26,8 @@ const (
|
||||
// Permissive constant to indicate SELinux is in permissive mode
|
||||
Permissive = 0
|
||||
// Disabled constant to indicate SELinux is disabled
|
||||
Disabled = -1
|
||||
Disabled = -1
|
||||
|
||||
selinuxDir = "/etc/selinux/"
|
||||
selinuxConfig = selinuxDir + "config"
|
||||
selinuxfsMount = "/sys/fs/selinux"
|
||||
@ -46,7 +48,13 @@ type selinuxState struct {
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS.
|
||||
ErrMCSAlreadyExists = errors.New("MCS label already exists")
|
||||
// ErrEmptyPath is returned when an empty path has been specified.
|
||||
ErrEmptyPath = errors.New("empty path")
|
||||
|
||||
assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`)
|
||||
roFileLabel string
|
||||
state = selinuxState{
|
||||
mcsList: make(map[string]bool),
|
||||
}
|
||||
@ -197,7 +205,7 @@ func GetEnabled() bool {
|
||||
return state.getEnabled()
|
||||
}
|
||||
|
||||
func readConfig(target string) (value string) {
|
||||
func readConfig(target string) string {
|
||||
var (
|
||||
val, key string
|
||||
bufin *bufio.Reader
|
||||
@ -239,30 +247,42 @@ func readConfig(target string) (value string) {
|
||||
}
|
||||
|
||||
func getSELinuxPolicyRoot() string {
|
||||
return selinuxDir + readConfig(selinuxTypeTag)
|
||||
return filepath.Join(selinuxDir, readConfig(selinuxTypeTag))
|
||||
}
|
||||
|
||||
func readCon(name string) (string, error) {
|
||||
var val string
|
||||
func readCon(fpath string) (string, error) {
|
||||
if fpath == "" {
|
||||
return "", ErrEmptyPath
|
||||
}
|
||||
|
||||
in, err := os.Open(name)
|
||||
in, err := os.Open(fpath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
_, err = fmt.Fscanf(in, "%s", &val)
|
||||
return strings.Trim(val, "\x00"), err
|
||||
var retval string
|
||||
if _, err := fmt.Fscanf(in, "%s", &retval); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Trim(retval, "\x00"), nil
|
||||
}
|
||||
|
||||
// SetFileLabel sets the SELinux label for this path or returns an error.
|
||||
func SetFileLabel(path string, label string) error {
|
||||
return lsetxattr(path, xattrNameSelinux, []byte(label), 0)
|
||||
func SetFileLabel(fpath string, label string) error {
|
||||
if fpath == "" {
|
||||
return ErrEmptyPath
|
||||
}
|
||||
return lsetxattr(fpath, xattrNameSelinux, []byte(label), 0)
|
||||
}
|
||||
|
||||
// FileLabel returns the SELinux label for this path or returns an error.
|
||||
func FileLabel(path string) (string, error) {
|
||||
label, err := lgetxattr(path, xattrNameSelinux)
|
||||
func FileLabel(fpath string) (string, error) {
|
||||
if fpath == "" {
|
||||
return "", ErrEmptyPath
|
||||
}
|
||||
|
||||
label, err := lgetxattr(fpath, xattrNameSelinux)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -307,8 +327,12 @@ func ExecLabel() (string, error) {
|
||||
return readCon(fmt.Sprintf("/proc/self/task/%d/attr/exec", syscall.Gettid()))
|
||||
}
|
||||
|
||||
func writeCon(name string, val string) error {
|
||||
out, err := os.OpenFile(name, os.O_WRONLY, 0)
|
||||
func writeCon(fpath string, val string) error {
|
||||
if fpath == "" {
|
||||
return ErrEmptyPath
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(fpath, os.O_WRONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -331,9 +355,11 @@ func CanonicalizeContext(val string) (string, error) {
|
||||
return readWriteCon(filepath.Join(getSelinuxMountPoint(), "context"), val)
|
||||
}
|
||||
|
||||
func readWriteCon(name string, val string) (string, error) {
|
||||
var retval string
|
||||
f, err := os.OpenFile(name, os.O_RDWR, 0)
|
||||
func readWriteCon(fpath string, val string) (string, error) {
|
||||
if fpath == "" {
|
||||
return "", ErrEmptyPath
|
||||
}
|
||||
f, err := os.OpenFile(fpath, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -344,8 +370,11 @@ func readWriteCon(name string, val string) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, err = fmt.Fscanf(f, "%s", &retval)
|
||||
return strings.Trim(retval, "\x00"), err
|
||||
var retval string
|
||||
if _, err := fmt.Fscanf(f, "%s", &retval); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Trim(retval, "\x00"), nil
|
||||
}
|
||||
|
||||
/*
|
||||
@ -440,7 +469,7 @@ func mcsAdd(mcs string) error {
|
||||
state.Lock()
|
||||
defer state.Unlock()
|
||||
if state.mcsList[mcs] {
|
||||
return fmt.Errorf("MCS Label already exists")
|
||||
return ErrMCSAlreadyExists
|
||||
}
|
||||
state.mcsList[mcs] = true
|
||||
return nil
|
||||
@ -516,10 +545,8 @@ func ReleaseLabel(label string) {
|
||||
}
|
||||
}
|
||||
|
||||
var roFileLabel string
|
||||
|
||||
// ROFileLabel returns the specified SELinux readonly file label
|
||||
func ROFileLabel() (fileLabel string) {
|
||||
func ROFileLabel() string {
|
||||
return roFileLabel
|
||||
}
|
||||
|
||||
@ -603,7 +630,7 @@ func SecurityCheckContext(val string) error {
|
||||
}
|
||||
|
||||
/*
|
||||
CopyLevel returns a label with the MLS/MCS level from src label replaces on
|
||||
CopyLevel returns a label with the MLS/MCS level from src label replaced on
|
||||
the dest label.
|
||||
*/
|
||||
func CopyLevel(src, dest string) (string, error) {
|
||||
@ -626,20 +653,26 @@ func CopyLevel(src, dest string) (string, error) {
|
||||
|
||||
// Prevent users from relabing system files
|
||||
func badPrefix(fpath string) error {
|
||||
var badprefixes = []string{"/usr"}
|
||||
if fpath == "" {
|
||||
return ErrEmptyPath
|
||||
}
|
||||
|
||||
for _, prefix := range badprefixes {
|
||||
if fpath == prefix || strings.HasPrefix(fpath, fmt.Sprintf("%s/", prefix)) {
|
||||
badPrefixes := []string{"/usr"}
|
||||
for _, prefix := range badPrefixes {
|
||||
if strings.HasPrefix(fpath, prefix) {
|
||||
return fmt.Errorf("relabeling content in %s is not allowed", prefix)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Chcon changes the fpath file object to the SELinux label label.
|
||||
// If the fpath is a directory and recurse is true Chcon will walk the
|
||||
// directory tree setting the label
|
||||
// Chcon changes the `fpath` file object to the SELinux label `label`.
|
||||
// If `fpath` is a directory and `recurse`` is true, Chcon will walk the
|
||||
// directory tree setting the label.
|
||||
func Chcon(fpath string, label string, recurse bool) error {
|
||||
if fpath == "" {
|
||||
return ErrEmptyPath
|
||||
}
|
||||
if label == "" {
|
||||
return nil
|
||||
}
|
||||
@ -658,7 +691,7 @@ func Chcon(fpath string, label string, recurse bool) error {
|
||||
}
|
||||
|
||||
// DupSecOpt takes an SELinux process label and returns security options that
|
||||
// can will set the SELinux Type and Level for future container processes
|
||||
// can be used to set the SELinux Type and Level for future container processes.
|
||||
func DupSecOpt(src string) []string {
|
||||
if src == "" {
|
||||
return nil
|
||||
@ -681,8 +714,8 @@ func DupSecOpt(src string) []string {
|
||||
return dup
|
||||
}
|
||||
|
||||
// DisableSecOpt returns a security opt that can be used to disabling SELinux
|
||||
// labeling support for future container processes
|
||||
// DisableSecOpt returns a security opt that can be used to disable SELinux
|
||||
// labeling support for future container processes.
|
||||
func DisableSecOpt() []string {
|
||||
return []string{"disable"}
|
||||
}
|
188
vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
generated
vendored
Normal file
188
vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go
generated
vendored
Normal file
@ -0,0 +1,188 @@
|
||||
// +build !selinux
|
||||
|
||||
package selinux
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// Enforcing constant indicate SELinux is in enforcing mode
|
||||
Enforcing = 1
|
||||
// Permissive constant to indicate SELinux is in permissive mode
|
||||
Permissive = 0
|
||||
// Disabled constant to indicate SELinux is disabled
|
||||
Disabled = -1
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMCSAlreadyExists is returned when trying to allocate a duplicate MCS.
|
||||
ErrMCSAlreadyExists = errors.New("MCS label already exists")
|
||||
// ErrEmptyPath is returned when an empty path has been specified.
|
||||
ErrEmptyPath = errors.New("empty path")
|
||||
)
|
||||
|
||||
// Context is a representation of the SELinux label broken into 4 parts
|
||||
type Context map[string]string
|
||||
|
||||
// SetDisabled disables selinux support for the package
|
||||
func SetDisabled() {
|
||||
return
|
||||
}
|
||||
|
||||
// GetEnabled returns whether selinux is currently enabled.
|
||||
func GetEnabled() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// SetFileLabel sets the SELinux label for this path or returns an error.
|
||||
func SetFileLabel(fpath string, label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileLabel returns the SELinux label for this path or returns an error.
|
||||
func FileLabel(fpath string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
SetFSCreateLabel tells kernel the label to create all file system objects
|
||||
created by this task. Setting label="" to return to default.
|
||||
*/
|
||||
func SetFSCreateLabel(label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
FSCreateLabel returns the default label the kernel which the kernel is using
|
||||
for file system objects created by this task. "" indicates default.
|
||||
*/
|
||||
func FSCreateLabel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// CurrentLabel returns the SELinux label of the current process thread, or an error.
|
||||
func CurrentLabel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// PidLabel returns the SELinux label of the given pid, or an error.
|
||||
func PidLabel(pid int) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
ExecLabel returns the SELinux label that the kernel will use for any programs
|
||||
that are executed by the current process thread, or an error.
|
||||
*/
|
||||
func ExecLabel() (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
CanonicalizeContext takes a context string and writes it to the kernel
|
||||
the function then returns the context that the kernel will use. This function
|
||||
can be used to see if two contexts are equivalent
|
||||
*/
|
||||
func CanonicalizeContext(val string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
/*
|
||||
SetExecLabel sets the SELinux label that the kernel will use for any programs
|
||||
that are executed by the current process thread, or an error.
|
||||
*/
|
||||
func SetExecLabel(label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the Context as a string
|
||||
func (c Context) Get() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// NewContext creates a new Context struct from the specified label
|
||||
func NewContext(label string) Context {
|
||||
c := make(Context)
|
||||
return c
|
||||
}
|
||||
|
||||
// ReserveLabel reserves the MLS/MCS level component of the specified label
|
||||
func ReserveLabel(label string) {
|
||||
return
|
||||
}
|
||||
|
||||
// EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled
|
||||
func EnforceMode() int {
|
||||
return Disabled
|
||||
}
|
||||
|
||||
/*
|
||||
SetEnforceMode sets the current SELinux mode Enforcing, Permissive.
|
||||
Disabled is not valid, since this needs to be set at boot time.
|
||||
*/
|
||||
func SetEnforceMode(mode int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
DefaultEnforceMode returns the systems default SELinux mode Enforcing,
|
||||
Permissive or Disabled. Note this is is just the default at boot time.
|
||||
EnforceMode tells you the systems current mode.
|
||||
*/
|
||||
func DefaultEnforceMode() int {
|
||||
return Disabled
|
||||
}
|
||||
|
||||
/*
|
||||
ReleaseLabel will unreserve the MLS/MCS Level field of the specified label.
|
||||
Allowing it to be used by another process.
|
||||
*/
|
||||
func ReleaseLabel(label string) {
|
||||
return
|
||||
}
|
||||
|
||||
// ROFileLabel returns the specified SELinux readonly file label
|
||||
func ROFileLabel() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
/*
|
||||
ContainerLabels returns an allocated processLabel and fileLabel to be used for
|
||||
container labeling by the calling process.
|
||||
*/
|
||||
func ContainerLabels() (processLabel string, fileLabel string) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// SecurityCheckContext validates that the SELinux label is understood by the kernel
|
||||
func SecurityCheckContext(val string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
CopyLevel returns a label with the MLS/MCS level from src label replaced on
|
||||
the dest label.
|
||||
*/
|
||||
func CopyLevel(src, dest string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Chcon changes the `fpath` file object to the SELinux label `label`.
|
||||
// If `fpath` is a directory and `recurse`` is true, Chcon will walk the
|
||||
// directory tree setting the label.
|
||||
func Chcon(fpath string, label string, recurse bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DupSecOpt takes an SELinux process label and returns security options that
|
||||
// can be used to set the SELinux Type and Level for future container processes.
|
||||
func DupSecOpt(src string) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DisableSecOpt returns a security opt that can be used to disable SELinux
|
||||
// labeling support for future container processes.
|
||||
func DisableSecOpt() []string {
|
||||
return []string{"disable"}
|
||||
}
|
2
vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go
generated
vendored
2
vendor/github.com/opencontainers/selinux/go-selinux/xattrs.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// +build linux
|
||||
// +build selinux,linux
|
||||
|
||||
package selinux
|
||||
|
||||
|
26
vendor/github.com/ulikunitz/xz/LICENSE
generated
vendored
Normal file
26
vendor/github.com/ulikunitz/xz/LICENSE
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
Copyright (c) 2014-2016 Ulrich Kunitz
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
* My name, Ulrich Kunitz, may not be used to endorse or promote products
|
||||
derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
71
vendor/github.com/ulikunitz/xz/README.md
generated
vendored
Normal file
71
vendor/github.com/ulikunitz/xz/README.md
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
# Package xz
|
||||
|
||||
This Go language package supports the reading and writing of xz
|
||||
compressed streams. It includes also a gxz command for compressing and
|
||||
decompressing data. The package is completely written in Go and doesn't
|
||||
have any dependency on any C code.
|
||||
|
||||
The package is currently under development. There might be bugs and APIs
|
||||
are not considered stable. At this time the package cannot compete with
|
||||
the xz tool regarding compression speed and size. The algorithms there
|
||||
have been developed over a long time and are highly optimized. However
|
||||
there are a number of improvements planned and I'm very optimistic about
|
||||
parallel compression and decompression. Stay tuned!
|
||||
|
||||
# Using the API
|
||||
|
||||
The following example program shows how to use the API.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
|
||||
func main() {
|
||||
const text = "The quick brown fox jumps over the lazy dog.\n"
|
||||
var buf bytes.Buffer
|
||||
// compress text
|
||||
w, err := xz.NewWriter(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("xz.NewWriter error %s", err)
|
||||
}
|
||||
if _, err := io.WriteString(w, text); err != nil {
|
||||
log.Fatalf("WriteString error %s", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
log.Fatalf("w.Close error %s", err)
|
||||
}
|
||||
// decompress buffer and write output to stdout
|
||||
r, err := xz.NewReader(&buf)
|
||||
if err != nil {
|
||||
log.Fatalf("NewReader error %s", err)
|
||||
}
|
||||
if _, err = io.Copy(os.Stdout, r); err != nil {
|
||||
log.Fatalf("io.Copy error %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
# Using the gxz compression tool
|
||||
|
||||
The package includes a gxz command line utility for compression and
|
||||
decompression.
|
||||
|
||||
Use following command for installation:
|
||||
|
||||
$ go get github.com/ulikunitz/xz/cmd/gxz
|
||||
|
||||
To test it call the following command.
|
||||
|
||||
$ gxz bigfile
|
||||
|
||||
After some time a much smaller file bigfile.xz will replace bigfile.
|
||||
To decompress it use the following command.
|
||||
|
||||
$ gxz -d bigfile.xz
|
||||
|
74
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
Normal file
74
vendor/github.com/ulikunitz/xz/bits.go
generated
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// putUint32LE puts the little-endian representation of x into the first
|
||||
// four bytes of p.
|
||||
func putUint32LE(p []byte, x uint32) {
|
||||
p[0] = byte(x)
|
||||
p[1] = byte(x >> 8)
|
||||
p[2] = byte(x >> 16)
|
||||
p[3] = byte(x >> 24)
|
||||
}
|
||||
|
||||
// putUint64LE puts the little-endian representation of x into the first
|
||||
// eight bytes of p.
|
||||
func putUint64LE(p []byte, x uint64) {
|
||||
p[0] = byte(x)
|
||||
p[1] = byte(x >> 8)
|
||||
p[2] = byte(x >> 16)
|
||||
p[3] = byte(x >> 24)
|
||||
p[4] = byte(x >> 32)
|
||||
p[5] = byte(x >> 40)
|
||||
p[6] = byte(x >> 48)
|
||||
p[7] = byte(x >> 56)
|
||||
}
|
||||
|
||||
// uint32LE converts a little endian representation to an uint32 value.
|
||||
func uint32LE(p []byte) uint32 {
|
||||
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 |
|
||||
uint32(p[3])<<24
|
||||
}
|
||||
|
||||
// putUvarint puts a uvarint representation of x into the byte slice.
|
||||
func putUvarint(p []byte, x uint64) int {
|
||||
i := 0
|
||||
for x >= 0x80 {
|
||||
p[i] = byte(x) | 0x80
|
||||
x >>= 7
|
||||
i++
|
||||
}
|
||||
p[i] = byte(x)
|
||||
return i + 1
|
||||
}
|
||||
|
||||
// errOverflow indicates an overflow of the 64-bit unsigned integer.
|
||||
var errOverflowU64 = errors.New("xz: uvarint overflows 64-bit unsigned integer")
|
||||
|
||||
// readUvarint reads a uvarint from the given byte reader.
|
||||
func readUvarint(r io.ByteReader) (x uint64, n int, err error) {
|
||||
var s uint
|
||||
i := 0
|
||||
for {
|
||||
b, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return x, i, err
|
||||
}
|
||||
i++
|
||||
if b < 0x80 {
|
||||
if i > 10 || i == 10 && b > 1 {
|
||||
return x, i, errOverflowU64
|
||||
}
|
||||
return x | uint64(b)<<s, i, nil
|
||||
}
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
}
|
||||
}
|
54
vendor/github.com/ulikunitz/xz/crc.go
generated
vendored
Normal file
54
vendor/github.com/ulikunitz/xz/crc.go
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"hash/crc64"
|
||||
)
|
||||
|
||||
// crc32Hash implements the hash.Hash32 interface with Sum returning the
|
||||
// crc32 value in little-endian encoding.
|
||||
type crc32Hash struct {
|
||||
hash.Hash32
|
||||
}
|
||||
|
||||
// Sum returns the crc32 value as little endian.
|
||||
func (h crc32Hash) Sum(b []byte) []byte {
|
||||
p := make([]byte, 4)
|
||||
putUint32LE(p, h.Hash32.Sum32())
|
||||
b = append(b, p...)
|
||||
return b
|
||||
}
|
||||
|
||||
// newCRC32 returns a CRC-32 hash that returns the 64-bit value in
|
||||
// little-endian encoding using the IEEE polynomial.
|
||||
func newCRC32() hash.Hash {
|
||||
return crc32Hash{Hash32: crc32.NewIEEE()}
|
||||
}
|
||||
|
||||
// crc64Hash implements the Hash64 interface with Sum returning the
|
||||
// CRC-64 value in little-endian encoding.
|
||||
type crc64Hash struct {
|
||||
hash.Hash64
|
||||
}
|
||||
|
||||
// Sum returns the CRC-64 value in little-endian encoding.
|
||||
func (h crc64Hash) Sum(b []byte) []byte {
|
||||
p := make([]byte, 8)
|
||||
putUint64LE(p, h.Hash64.Sum64())
|
||||
b = append(b, p...)
|
||||
return b
|
||||
}
|
||||
|
||||
// crc64Table is used to create a CRC-64 hash.
|
||||
var crc64Table = crc64.MakeTable(crc64.ECMA)
|
||||
|
||||
// newCRC64 returns a CRC-64 hash that returns the 64-bit value in
|
||||
// little-endian encoding using the ECMA polynomial.
|
||||
func newCRC64() hash.Hash {
|
||||
return crc64Hash{Hash64: crc64.New(crc64Table)}
|
||||
}
|
728
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
Normal file
728
vendor/github.com/ulikunitz/xz/format.go
generated
vendored
Normal file
@ -0,0 +1,728 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package xz
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
|
||||
"github.com/ulikunitz/xz/lzma"
|
||||
)
|
||||
|
||||
// allZeros checks whether a given byte slice has only zeros.
|
||||
func allZeros(p []byte) bool {
|
||||
for _, c := range p {
|
||||
if c != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// padLen returns the length of the padding required for the given
|
||||
// argument.
|
||||
func padLen(n int64) int {
|
||||
k := int(n % 4)
|
||||
if k > 0 {
|
||||
k = 4 - k
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
/*** Header ***/
|
||||
|
||||
// headerMagic stores the magic bytes for the header
|
||||
var headerMagic = []byte{0xfd, '7', 'z', 'X', 'Z', 0x00}
|
||||
|
||||
// HeaderLen provides the length of the xz file header.
|
||||
const HeaderLen = 12
|
||||
|
||||
// Constants for the checksum methods supported by xz.
|
||||
const (
|
||||
CRC32 byte = 0x1
|
||||
CRC64 = 0x4
|
||||
SHA256 = 0xa
|
||||
)
|
||||
|
||||
// errInvalidFlags indicates that flags are invalid.
|
||||
var errInvalidFlags = errors.New("xz: invalid flags")
|
||||
|
||||
// verifyFlags returns the error errInvalidFlags if the value is
|
||||
// invalid.
|
||||
func verifyFlags(flags byte) error {
|
||||
switch flags {
|
||||
case CRC32, CRC64, SHA256:
|
||||
return nil
|
||||
default:
|
||||
return errInvalidFlags
|
||||
}
|
||||
}
|
||||
|
||||
// flagstrings maps flag values to strings.
|
||||
var flagstrings = map[byte]string{
|
||||
CRC32: "CRC-32",
|
||||
CRC64: "CRC-64",
|
||||
SHA256: "SHA-256",
|
||||
}
|
||||
|
||||
// flagString returns the string representation for the given flags.
|
||||
func flagString(flags byte) string {
|
||||
s, ok := flagstrings[flags]
|
||||
if !ok {
|
||||
return "invalid"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// newHashFunc returns a function that creates hash instances for the
|
||||
// hash method encoded in flags.
|
||||
func newHashFunc(flags byte) (newHash func() hash.Hash, err error) {
|
||||
switch flags {
|
||||
case CRC32:
|
||||
newHash = newCRC32
|
||||
case CRC64:
|
||||
newHash = newCRC64
|
||||
case SHA256:
|
||||
newHash = sha256.New
|
||||
default:
|
||||
err = errInvalidFlags
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// header provides the actual content of the xz file header: the flags.
|
||||
type header struct {
|
||||
flags byte
|
||||
}
|
||||
|
||||
// Errors returned by readHeader.
|
||||
var errHeaderMagic = errors.New("xz: invalid header magic bytes")
|
||||
|
||||
// ValidHeader checks whether data is a correct xz file header. The
|
||||
// length of data must be HeaderLen.
|
||||
func ValidHeader(data []byte) bool {
|
||||
var h header
|
||||
err := h.UnmarshalBinary(data)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// String returns a string representation of the flags.
|
||||
func (h header) String() string {
|
||||
return flagString(h.flags)
|
||||
}
|
||||
|
||||
// UnmarshalBinary reads header from the provided data slice.
|
||||
func (h *header) UnmarshalBinary(data []byte) error {
|
||||
// header length
|
||||
if len(data) != HeaderLen {
|
||||
return errors.New("xz: wrong file header length")
|
||||
}
|
||||
|
||||
// magic header
|
||||
if !bytes.Equal(headerMagic, data[:6]) {
|
||||
return errHeaderMagic
|
||||
}
|
||||
|
||||
// checksum
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[6:8])
|
||||
if uint32LE(data[8:]) != crc.Sum32() {
|
||||
return errors.New("xz: invalid checksum for file header")
|
||||
}
|
||||
|
||||
// stream flags
|
||||
if data[6] != 0 {
|
||||
return errInvalidFlags
|
||||
}
|
||||
flags := data[7]
|
||||
if err := verifyFlags(flags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h.flags = flags
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary generates the xz file header.
|
||||
func (h *header) MarshalBinary() (data []byte, err error) {
|
||||
if err = verifyFlags(h.flags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data = make([]byte, 12)
|
||||
copy(data, headerMagic)
|
||||
data[7] = h.flags
|
||||
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[6:8])
|
||||
putUint32LE(data[8:], crc.Sum32())
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
/*** Footer ***/
|
||||
|
||||
// footerLen defines the length of the footer.
|
||||
const footerLen = 12
|
||||
|
||||
// footerMagic contains the footer magic bytes.
|
||||
var footerMagic = []byte{'Y', 'Z'}
|
||||
|
||||
// footer represents the content of the xz file footer.
|
||||
type footer struct {
|
||||
indexSize int64
|
||||
flags byte
|
||||
}
|
||||
|
||||
// String prints a string representation of the footer structure.
|
||||
func (f footer) String() string {
|
||||
return fmt.Sprintf("%s index size %d", flagString(f.flags), f.indexSize)
|
||||
}
|
||||
|
||||
// Minimum and maximum for the size of the index (backward size).
|
||||
const (
|
||||
minIndexSize = 4
|
||||
maxIndexSize = (1 << 32) * 4
|
||||
)
|
||||
|
||||
// MarshalBinary converts footer values into an xz file footer. Note
|
||||
// that the footer value is checked for correctness.
|
||||
func (f *footer) MarshalBinary() (data []byte, err error) {
|
||||
if err = verifyFlags(f.flags); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !(minIndexSize <= f.indexSize && f.indexSize <= maxIndexSize) {
|
||||
return nil, errors.New("xz: index size out of range")
|
||||
}
|
||||
if f.indexSize%4 != 0 {
|
||||
return nil, errors.New(
|
||||
"xz: index size not aligned to four bytes")
|
||||
}
|
||||
|
||||
data = make([]byte, footerLen)
|
||||
|
||||
// backward size (index size)
|
||||
s := (f.indexSize / 4) - 1
|
||||
putUint32LE(data[4:], uint32(s))
|
||||
// flags
|
||||
data[9] = f.flags
|
||||
// footer magic
|
||||
copy(data[10:], footerMagic)
|
||||
|
||||
// CRC-32
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[4:10])
|
||||
putUint32LE(data, crc.Sum32())
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary sets the footer value by unmarshalling an xz file
|
||||
// footer.
|
||||
func (f *footer) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != footerLen {
|
||||
return errors.New("xz: wrong footer length")
|
||||
}
|
||||
|
||||
// magic bytes
|
||||
if !bytes.Equal(data[10:], footerMagic) {
|
||||
return errors.New("xz: footer magic invalid")
|
||||
}
|
||||
|
||||
// CRC-32
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[4:10])
|
||||
if uint32LE(data) != crc.Sum32() {
|
||||
return errors.New("xz: footer checksum error")
|
||||
}
|
||||
|
||||
var g footer
|
||||
// backward size (index size)
|
||||
g.indexSize = (int64(uint32LE(data[4:])) + 1) * 4
|
||||
|
||||
// flags
|
||||
if data[8] != 0 {
|
||||
return errInvalidFlags
|
||||
}
|
||||
g.flags = data[9]
|
||||
if err := verifyFlags(g.flags); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*f = g
|
||||
return nil
|
||||
}
|
||||
|
||||
/*** Block Header ***/
|
||||
|
||||
// blockHeader represents the content of an xz block header.
|
||||
type blockHeader struct {
|
||||
compressedSize int64
|
||||
uncompressedSize int64
|
||||
filters []filter
|
||||
}
|
||||
|
||||
// String converts the block header into a string.
|
||||
func (h blockHeader) String() string {
|
||||
var buf bytes.Buffer
|
||||
first := true
|
||||
if h.compressedSize >= 0 {
|
||||
fmt.Fprintf(&buf, "compressed size %d", h.compressedSize)
|
||||
first = false
|
||||
}
|
||||
if h.uncompressedSize >= 0 {
|
||||
if !first {
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "uncompressed size %d", h.uncompressedSize)
|
||||
first = false
|
||||
}
|
||||
for _, f := range h.filters {
|
||||
if !first {
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
fmt.Fprintf(&buf, "filter %s", f)
|
||||
first = false
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Masks for the block flags.
|
||||
const (
|
||||
filterCountMask = 0x03
|
||||
compressedSizePresent = 0x40
|
||||
uncompressedSizePresent = 0x80
|
||||
reservedBlockFlags = 0x3C
|
||||
)
|
||||
|
||||
// errIndexIndicator signals that an index indicator (0x00) has been found
|
||||
// instead of an expected block header indicator.
|
||||
var errIndexIndicator = errors.New("xz: found index indicator")
|
||||
|
||||
// readBlockHeader reads the block header.
|
||||
func readBlockHeader(r io.Reader) (h *blockHeader, n int, err error) {
|
||||
var buf bytes.Buffer
|
||||
buf.Grow(20)
|
||||
|
||||
// block header size
|
||||
z, err := io.CopyN(&buf, r, 1)
|
||||
n = int(z)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
s := buf.Bytes()[0]
|
||||
if s == 0 {
|
||||
return nil, n, errIndexIndicator
|
||||
}
|
||||
|
||||
// read complete header
|
||||
headerLen := (int(s) + 1) * 4
|
||||
buf.Grow(headerLen - 1)
|
||||
z, err = io.CopyN(&buf, r, int64(headerLen-1))
|
||||
n += int(z)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
|
||||
// unmarshal block header
|
||||
h = new(blockHeader)
|
||||
if err = h.UnmarshalBinary(buf.Bytes()); err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
|
||||
return h, n, nil
|
||||
}
|
||||
|
||||
// readSizeInBlockHeader reads the uncompressed or compressed size
|
||||
// fields in the block header. The present value informs the function
|
||||
// whether the respective field is actually present in the header.
|
||||
func readSizeInBlockHeader(r io.ByteReader, present bool) (n int64, err error) {
|
||||
if !present {
|
||||
return -1, nil
|
||||
}
|
||||
x, _, err := readUvarint(r)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if x >= 1<<63 {
|
||||
return 0, errors.New("xz: size overflow in block header")
|
||||
}
|
||||
return int64(x), nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary unmarshals the block header.
|
||||
func (h *blockHeader) UnmarshalBinary(data []byte) error {
|
||||
// Check header length
|
||||
s := data[0]
|
||||
if data[0] == 0 {
|
||||
return errIndexIndicator
|
||||
}
|
||||
headerLen := (int(s) + 1) * 4
|
||||
if len(data) != headerLen {
|
||||
return fmt.Errorf("xz: data length %d; want %d", len(data),
|
||||
headerLen)
|
||||
}
|
||||
n := headerLen - 4
|
||||
|
||||
// Check CRC-32
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[:n])
|
||||
if crc.Sum32() != uint32LE(data[n:]) {
|
||||
return errors.New("xz: checksum error for block header")
|
||||
}
|
||||
|
||||
// Block header flags
|
||||
flags := data[1]
|
||||
if flags&reservedBlockFlags != 0 {
|
||||
return errors.New("xz: reserved block header flags set")
|
||||
}
|
||||
|
||||
r := bytes.NewReader(data[2:n])
|
||||
|
||||
// Compressed size
|
||||
var err error
|
||||
h.compressedSize, err = readSizeInBlockHeader(
|
||||
r, flags&compressedSizePresent != 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Uncompressed size
|
||||
h.uncompressedSize, err = readSizeInBlockHeader(
|
||||
r, flags&uncompressedSizePresent != 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
h.filters, err = readFilters(r, int(flags&filterCountMask)+1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check padding
|
||||
// Since headerLen is a multiple of 4 we don't need to check
|
||||
// alignment.
|
||||
k := r.Len()
|
||||
// The standard spec says that the padding should have not more
|
||||
// than 3 bytes. However we found paddings of 4 or 5 in the
|
||||
// wild. See https://github.com/ulikunitz/xz/pull/11 and
|
||||
// https://github.com/ulikunitz/xz/issues/15
|
||||
//
|
||||
// The only reasonable approach seems to be to ignore the
|
||||
// padding size. We still check that all padding bytes are zero.
|
||||
if !allZeros(data[n-k : n]) {
|
||||
return errPadding
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary marshals the binary header.
|
||||
func (h *blockHeader) MarshalBinary() (data []byte, err error) {
|
||||
if !(minFilters <= len(h.filters) && len(h.filters) <= maxFilters) {
|
||||
return nil, errors.New("xz: filter count wrong")
|
||||
}
|
||||
for i, f := range h.filters {
|
||||
if i < len(h.filters)-1 {
|
||||
if f.id() == lzmaFilterID {
|
||||
return nil, errors.New(
|
||||
"xz: LZMA2 filter is not the last")
|
||||
}
|
||||
} else {
|
||||
// last filter
|
||||
if f.id() != lzmaFilterID {
|
||||
return nil, errors.New("xz: " +
|
||||
"last filter must be the LZMA2 filter")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
// header size must set at the end
|
||||
buf.WriteByte(0)
|
||||
|
||||
// flags
|
||||
flags := byte(len(h.filters) - 1)
|
||||
if h.compressedSize >= 0 {
|
||||
flags |= compressedSizePresent
|
||||
}
|
||||
if h.uncompressedSize >= 0 {
|
||||
flags |= uncompressedSizePresent
|
||||
}
|
||||
buf.WriteByte(flags)
|
||||
|
||||
p := make([]byte, 10)
|
||||
if h.compressedSize >= 0 {
|
||||
k := putUvarint(p, uint64(h.compressedSize))
|
||||
buf.Write(p[:k])
|
||||
}
|
||||
if h.uncompressedSize >= 0 {
|
||||
k := putUvarint(p, uint64(h.uncompressedSize))
|
||||
buf.Write(p[:k])
|
||||
}
|
||||
|
||||
for _, f := range h.filters {
|
||||
fp, err := f.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf.Write(fp)
|
||||
}
|
||||
|
||||
// padding
|
||||
for i := padLen(int64(buf.Len())); i > 0; i-- {
|
||||
buf.WriteByte(0)
|
||||
}
|
||||
|
||||
// crc place holder
|
||||
buf.Write(p[:4])
|
||||
|
||||
data = buf.Bytes()
|
||||
if len(data)%4 != 0 {
|
||||
panic("data length not aligned")
|
||||
}
|
||||
s := len(data)/4 - 1
|
||||
if !(1 < s && s <= 255) {
|
||||
panic("wrong block header size")
|
||||
}
|
||||
data[0] = byte(s)
|
||||
|
||||
crc := crc32.NewIEEE()
|
||||
crc.Write(data[:len(data)-4])
|
||||
putUint32LE(data[len(data)-4:], crc.Sum32())
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Constants used for marshalling and unmarshalling filters in the xz
|
||||
// block header.
|
||||
const (
|
||||
minFilters = 1
|
||||
maxFilters = 4
|
||||
minReservedID = 1 << 62
|
||||
)
|
||||
|
||||
// filter represents a filter in the block header.
|
||||
type filter interface {
|
||||
id() uint64
|
||||
UnmarshalBinary(data []byte) error
|
||||
MarshalBinary() (data []byte, err error)
|
||||
reader(r io.Reader, c *ReaderConfig) (fr io.Reader, err error)
|
||||
writeCloser(w io.WriteCloser, c *WriterConfig) (fw io.WriteCloser, err error)
|
||||
// filter must be last filter
|
||||
last() bool
|
||||
}
|
||||
|
||||
// readFilter reads a block filter from the block header. At this point
|
||||
// in time only the LZMA2 filter is supported.
|
||||
func readFilter(r io.Reader) (f filter, err error) {
|
||||
br := lzma.ByteReader(r)
|
||||
|
||||
// index
|
||||
id, _, err := readUvarint(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var data []byte
|
||||
switch id {
|
||||
case lzmaFilterID:
|
||||
data = make([]byte, lzmaFilterLen)
|
||||
data[0] = lzmaFilterID
|
||||
if _, err = io.ReadFull(r, data[1:]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f = new(lzmaFilter)
|
||||
default:
|
||||
if id >= minReservedID {
|
||||
return nil, errors.New(
|
||||
"xz: reserved filter id in block stream header")
|
||||
}
|
||||
return nil, errors.New("xz: invalid filter id")
|
||||
}
|
||||
if err = f.UnmarshalBinary(data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// readFilters reads count filters. At this point in time only the count
|
||||
// 1 is supported.
|
||||
func readFilters(r io.Reader, count int) (filters []filter, err error) {
|
||||
if count != 1 {
|
||||
return nil, errors.New("xz: unsupported filter count")
|
||||
}
|
||||
f, err := readFilter(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []filter{f}, err
|
||||
}
|
||||
|
||||
// writeFilters writes the filters.
|
||||
func writeFilters(w io.Writer, filters []filter) (n int, err error) {
|
||||
for _, f := range filters {
|
||||
p, err := f.MarshalBinary()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
k, err := w.Write(p)
|
||||
n += k
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
/*** Index ***/
|
||||
|
||||
// record describes a block in the xz file index.
|
||||
type record struct {
|
||||
unpaddedSize int64
|
||||
uncompressedSize int64
|
||||
}
|
||||
|
||||
// readRecord reads an index record.
|
||||
func readRecord(r io.ByteReader) (rec record, n int, err error) {
|
||||
u, k, err := readUvarint(r)
|
||||
n += k
|
||||
if err != nil {
|
||||
return rec, n, err
|
||||
}
|
||||
rec.unpaddedSize = int64(u)
|
||||
if rec.unpaddedSize < 0 {
|
||||
return rec, n, errors.New("xz: unpadded size negative")
|
||||
}
|
||||
|
||||
u, k, err = readUvarint(r)
|
||||
n += k
|
||||
if err != nil {
|
||||
return rec, n, err
|
||||
}
|
||||
rec.uncompressedSize = int64(u)
|
||||
if rec.uncompressedSize < 0 {
|
||||
return rec, n, errors.New("xz: uncompressed size negative")
|
||||
}
|
||||
|
||||
return rec, n, nil
|
||||
}
|
||||
|
||||
// MarshalBinary converts an index record in its binary encoding.
|
||||
func (rec *record) MarshalBinary() (data []byte, err error) {
|
||||
// maximum length of a uvarint is 10
|
||||
p := make([]byte, 20)
|
||||
n := putUvarint(p, uint64(rec.unpaddedSize))
|
||||
n += putUvarint(p[n:], uint64(rec.uncompressedSize))
|
||||
return p[:n], nil
|
||||
}
|
||||
|
||||
// writeIndex writes the index, a sequence of records.
|
||||
func writeIndex(w io.Writer, index []record) (n int64, err error) {
|
||||
crc := crc32.NewIEEE()
|
||||
mw := io.MultiWriter(w, crc)
|
||||
|
||||
// index indicator
|
||||
k, err := mw.Write([]byte{0})
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// number of records
|
||||
p := make([]byte, 10)
|
||||
k = putUvarint(p, uint64(len(index)))
|
||||
k, err = mw.Write(p[:k])
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// list of records
|
||||
for _, rec := range index {
|
||||
p, err := rec.MarshalBinary()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
k, err = mw.Write(p)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// index padding
|
||||
k, err = mw.Write(make([]byte, padLen(int64(n))))
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// crc32 checksum
|
||||
putUint32LE(p, crc.Sum32())
|
||||
k, err = w.Write(p[:4])
|
||||
n += int64(k)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// readIndexBody reads the index from the reader. It assumes that the
|
||||
// index indicator has already been read.
|
||||
func readIndexBody(r io.Reader) (records []record, n int64, err error) {
|
||||
crc := crc32.NewIEEE()
|
||||
// index indicator
|
||||
crc.Write([]byte{0})
|
||||
|
||||
br := lzma.ByteReader(io.TeeReader(r, crc))
|
||||
|
||||
// number of records
|
||||
u, k, err := readUvarint(br)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
recLen := int(u)
|
||||
if recLen < 0 || uint64(recLen) != u {
|
||||
return nil, n, errors.New("xz: record number overflow")
|
||||
}
|
||||
|
||||
// list of records
|
||||
records = make([]record, recLen)
|
||||
for i := range records {
|
||||
records[i], k, err = readRecord(br)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
}
|
||||
|
||||
p := make([]byte, padLen(int64(n+1)), 4)
|
||||
k, err = io.ReadFull(br.(io.Reader), p)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return nil, n, err
|
||||
}
|
||||
if !allZeros(p) {
|
||||
return nil, n, errors.New("xz: non-zero byte in index padding")
|
||||
}
|
||||
|
||||
// crc32
|
||||
s := crc.Sum32()
|
||||
p = p[:4]
|
||||
k, err = io.ReadFull(br.(io.Reader), p)
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
return records, n, err
|
||||
}
|
||||
if uint32LE(p) != s {
|
||||
return nil, n, errors.New("xz: wrong checksum for index")
|
||||
}
|
||||
|
||||
return records, n, nil
|
||||
}
|
181
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
generated
vendored
Normal file
181
vendor/github.com/ulikunitz/xz/internal/hash/cyclic_poly.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
// CyclicPoly provides a cyclic polynomial rolling hash.
|
||||
type CyclicPoly struct {
|
||||
h uint64
|
||||
p []uint64
|
||||
i int
|
||||
}
|
||||
|
||||
// ror rotates the unsigned 64-bit integer to right. The argument s must be
|
||||
// less than 64.
|
||||
func ror(x uint64, s uint) uint64 {
|
||||
return (x >> s) | (x << (64 - s))
|
||||
}
|
||||
|
||||
// NewCyclicPoly creates a new instance of the CyclicPoly structure. The
|
||||
// argument n gives the number of bytes for which a hash will be executed.
|
||||
// This number must be positive; the method panics if this isn't the case.
|
||||
func NewCyclicPoly(n int) *CyclicPoly {
|
||||
if n < 1 {
|
||||
panic("argument n must be positive")
|
||||
}
|
||||
return &CyclicPoly{p: make([]uint64, 0, n)}
|
||||
}
|
||||
|
||||
// Len returns the length of the byte sequence for which a hash is generated.
|
||||
func (r *CyclicPoly) Len() int {
|
||||
return cap(r.p)
|
||||
}
|
||||
|
||||
// RollByte hashes the next byte and returns a hash value. The complete becomes
|
||||
// available after at least Len() bytes have been hashed.
|
||||
func (r *CyclicPoly) RollByte(x byte) uint64 {
|
||||
y := hash[x]
|
||||
if len(r.p) < cap(r.p) {
|
||||
r.h = ror(r.h, 1) ^ y
|
||||
r.p = append(r.p, y)
|
||||
} else {
|
||||
r.h ^= ror(r.p[r.i], uint(cap(r.p)-1))
|
||||
r.h = ror(r.h, 1) ^ y
|
||||
r.p[r.i] = y
|
||||
r.i = (r.i + 1) % cap(r.p)
|
||||
}
|
||||
return r.h
|
||||
}
|
||||
|
||||
// Stores the hash for the individual bytes.
|
||||
var hash = [256]uint64{
|
||||
0x2e4fc3f904065142, 0xc790984cfbc99527,
|
||||
0x879f95eb8c62f187, 0x3b61be86b5021ef2,
|
||||
0x65a896a04196f0a5, 0xc5b307b80470b59e,
|
||||
0xd3bff376a70df14b, 0xc332f04f0b3f1701,
|
||||
0x753b5f0e9abf3e0d, 0xb41538fdfe66ef53,
|
||||
0x1906a10c2c1c0208, 0xfb0c712a03421c0d,
|
||||
0x38be311a65c9552b, 0xfee7ee4ca6445c7e,
|
||||
0x71aadeded184f21e, 0xd73426fccda23b2d,
|
||||
0x29773fb5fb9600b5, 0xce410261cd32981a,
|
||||
0xfe2848b3c62dbc2d, 0x459eaaff6e43e11c,
|
||||
0xc13e35fc9c73a887, 0xf30ed5c201e76dbc,
|
||||
0xa5f10b3910482cea, 0x2945d59be02dfaad,
|
||||
0x06ee334ff70571b5, 0xbabf9d8070f44380,
|
||||
0xee3e2e9912ffd27c, 0x2a7118d1ea6b8ea7,
|
||||
0x26183cb9f7b1664c, 0xea71dac7da068f21,
|
||||
0xea92eca5bd1d0bb7, 0x415595862defcd75,
|
||||
0x248a386023c60648, 0x9cf021ab284b3c8a,
|
||||
0xfc9372df02870f6c, 0x2b92d693eeb3b3fc,
|
||||
0x73e799d139dc6975, 0x7b15ae312486363c,
|
||||
0xb70e5454a2239c80, 0x208e3fb31d3b2263,
|
||||
0x01f563cabb930f44, 0x2ac4533d2a3240d8,
|
||||
0x84231ed1064f6f7c, 0xa9f020977c2a6d19,
|
||||
0x213c227271c20122, 0x09fe8a9a0a03d07a,
|
||||
0x4236dc75bcaf910c, 0x460a8b2bead8f17e,
|
||||
0xd9b27be1aa07055f, 0xd202d5dc4b11c33e,
|
||||
0x70adb010543bea12, 0xcdae938f7ea6f579,
|
||||
0x3f3d870208672f4d, 0x8e6ccbce9d349536,
|
||||
0xe4c0871a389095ae, 0xf5f2a49152bca080,
|
||||
0x9a43f9b97269934e, 0xc17b3753cb6f475c,
|
||||
0xd56d941e8e206bd4, 0xac0a4f3e525eda00,
|
||||
0xa06d5a011912a550, 0x5537ed19537ad1df,
|
||||
0xa32fe713d611449d, 0x2a1d05b47c3b579f,
|
||||
0x991d02dbd30a2a52, 0x39e91e7e28f93eb0,
|
||||
0x40d06adb3e92c9ac, 0x9b9d3afde1c77c97,
|
||||
0x9a3f3f41c02c616f, 0x22ecd4ba00f60c44,
|
||||
0x0b63d5d801708420, 0x8f227ca8f37ffaec,
|
||||
0x0256278670887c24, 0x107e14877dbf540b,
|
||||
0x32c19f2786ac1c05, 0x1df5b12bb4bc9c61,
|
||||
0xc0cac129d0d4c4e2, 0x9fdb52ee9800b001,
|
||||
0x31f601d5d31c48c4, 0x72ff3c0928bcaec7,
|
||||
0xd99264421147eb03, 0x535a2d6d38aefcfe,
|
||||
0x6ba8b4454a916237, 0xfa39366eaae4719c,
|
||||
0x10f00fd7bbb24b6f, 0x5bd23185c76c84d4,
|
||||
0xb22c3d7e1b00d33f, 0x3efc20aa6bc830a8,
|
||||
0xd61c2503fe639144, 0x30ce625441eb92d3,
|
||||
0xe5d34cf359e93100, 0xa8e5aa13f2b9f7a5,
|
||||
0x5c2b8d851ca254a6, 0x68fb6c5e8b0d5fdf,
|
||||
0xc7ea4872c96b83ae, 0x6dd5d376f4392382,
|
||||
0x1be88681aaa9792f, 0xfef465ee1b6c10d9,
|
||||
0x1f98b65ed43fcb2e, 0x4d1ca11eb6e9a9c9,
|
||||
0x7808e902b3857d0b, 0x171c9c4ea4607972,
|
||||
0x58d66274850146df, 0x42b311c10d3981d1,
|
||||
0x647fa8c621c41a4c, 0xf472771c66ddfedc,
|
||||
0x338d27e3f847b46b, 0x6402ce3da97545ce,
|
||||
0x5162db616fc38638, 0x9c83be97bc22a50e,
|
||||
0x2d3d7478a78d5e72, 0xe621a9b938fd5397,
|
||||
0x9454614eb0f81c45, 0x395fb6e742ed39b6,
|
||||
0x77dd9179d06037bf, 0xc478d0fee4d2656d,
|
||||
0x35d9d6cb772007af, 0x83a56e92c883f0f6,
|
||||
0x27937453250c00a1, 0x27bd6ebc3a46a97d,
|
||||
0x9f543bf784342d51, 0xd158f38c48b0ed52,
|
||||
0x8dd8537c045f66b4, 0x846a57230226f6d5,
|
||||
0x6b13939e0c4e7cdf, 0xfca25425d8176758,
|
||||
0x92e5fc6cd52788e6, 0x9992e13d7a739170,
|
||||
0x518246f7a199e8ea, 0xf104c2a71b9979c7,
|
||||
0x86b3ffaabea4768f, 0x6388061cf3e351ad,
|
||||
0x09d9b5295de5bbb5, 0x38bf1638c2599e92,
|
||||
0x1d759846499e148d, 0x4c0ff015e5f96ef4,
|
||||
0xa41a94cfa270f565, 0x42d76f9cb2326c0b,
|
||||
0x0cf385dd3c9c23ba, 0x0508a6c7508d6e7a,
|
||||
0x337523aabbe6cf8d, 0x646bb14001d42b12,
|
||||
0xc178729d138adc74, 0xf900ef4491f24086,
|
||||
0xee1a90d334bb5ac4, 0x9755c92247301a50,
|
||||
0xb999bf7c4ff1b610, 0x6aeeb2f3b21e8fc9,
|
||||
0x0fa8084cf91ac6ff, 0x10d226cf136e6189,
|
||||
0xd302057a07d4fb21, 0x5f03800e20a0fcc3,
|
||||
0x80118d4ae46bd210, 0x58ab61a522843733,
|
||||
0x51edd575c5432a4b, 0x94ee6ff67f9197f7,
|
||||
0x765669e0e5e8157b, 0xa5347830737132f0,
|
||||
0x3ba485a69f01510c, 0x0b247d7b957a01c3,
|
||||
0x1b3d63449fd807dc, 0x0fdc4721c30ad743,
|
||||
0x8b535ed3829b2b14, 0xee41d0cad65d232c,
|
||||
0xe6a99ed97a6a982f, 0x65ac6194c202003d,
|
||||
0x692accf3a70573eb, 0xcc3c02c3e200d5af,
|
||||
0x0d419e8b325914a3, 0x320f160f42c25e40,
|
||||
0x00710d647a51fe7a, 0x3c947692330aed60,
|
||||
0x9288aa280d355a7a, 0xa1806a9b791d1696,
|
||||
0x5d60e38496763da1, 0x6c69e22e613fd0f4,
|
||||
0x977fc2a5aadffb17, 0xfb7bd063fc5a94ba,
|
||||
0x460c17992cbaece1, 0xf7822c5444d3297f,
|
||||
0x344a9790c69b74aa, 0xb80a42e6cae09dce,
|
||||
0x1b1361eaf2b1e757, 0xd84c1e758e236f01,
|
||||
0x88e0b7be347627cc, 0x45246009b7a99490,
|
||||
0x8011c6dd3fe50472, 0xc341d682bffb99d7,
|
||||
0x2511be93808e2d15, 0xd5bc13d7fd739840,
|
||||
0x2a3cd030679ae1ec, 0x8ad9898a4b9ee157,
|
||||
0x3245fef0a8eaf521, 0x3d6d8dbbb427d2b0,
|
||||
0x1ed146d8968b3981, 0x0c6a28bf7d45f3fc,
|
||||
0x4a1fd3dbcee3c561, 0x4210ff6a476bf67e,
|
||||
0xa559cce0d9199aac, 0xde39d47ef3723380,
|
||||
0xe5b69d848ce42e35, 0xefa24296f8e79f52,
|
||||
0x70190b59db9a5afc, 0x26f166cdb211e7bf,
|
||||
0x4deaf2df3c6b8ef5, 0xf171dbdd670f1017,
|
||||
0xb9059b05e9420d90, 0x2f0da855c9388754,
|
||||
0x611d5e9ab77949cc, 0x2912038ac01163f4,
|
||||
0x0231df50402b2fba, 0x45660fc4f3245f58,
|
||||
0xb91cc97c7c8dac50, 0xb72d2aafe4953427,
|
||||
0xfa6463f87e813d6b, 0x4515f7ee95d5c6a2,
|
||||
0x1310e1c1a48d21c3, 0xad48a7810cdd8544,
|
||||
0x4d5bdfefd5c9e631, 0xa43ed43f1fdcb7de,
|
||||
0xe70cfc8fe1ee9626, 0xef4711b0d8dda442,
|
||||
0xb80dd9bd4dab6c93, 0xa23be08d31ba4d93,
|
||||
0x9b37db9d0335a39c, 0x494b6f870f5cfebc,
|
||||
0x6d1b3c1149dda943, 0x372c943a518c1093,
|
||||
0xad27af45e77c09c4, 0x3b6f92b646044604,
|
||||
0xac2917909f5fcf4f, 0x2069a60e977e5557,
|
||||
0x353a469e71014de5, 0x24be356281f55c15,
|
||||
0x2b6d710ba8e9adea, 0x404ad1751c749c29,
|
||||
0xed7311bf23d7f185, 0xba4f6976b4acc43e,
|
||||
0x32d7198d2bc39000, 0xee667019014d6e01,
|
||||
0x494ef3e128d14c83, 0x1f95a152baecd6be,
|
||||
0x201648dff1f483a5, 0x68c28550c8384af6,
|
||||
0x5fc834a6824a7f48, 0x7cd06cb7365eaf28,
|
||||
0xd82bbd95e9b30909, 0x234f0d1694c53f6d,
|
||||
0xd2fb7f4a96d83f4a, 0xff0d5da83acac05e,
|
||||
0xf8f6b97f5585080a, 0x74236084be57b95b,
|
||||
0xa25e40c03bbc36ad, 0x6b6e5c14ce88465b,
|
||||
0x4378ffe93e1528c5, 0x94ca92a17118e2d2,
|
||||
}
|
14
vendor/github.com/ulikunitz/xz/internal/hash/doc.go
generated
vendored
Normal file
14
vendor/github.com/ulikunitz/xz/internal/hash/doc.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package hash provides rolling hashes.
|
||||
|
||||
Rolling hashes have to be used for maintaining the positions of n-byte
|
||||
sequences in the dictionary buffer.
|
||||
|
||||
The package provides currently the Rabin-Karp rolling hash and a Cyclic
|
||||
Polynomial hash. Both support the Hashes method to be used with an interface.
|
||||
*/
|
||||
package hash
|
66
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
generated
vendored
Normal file
66
vendor/github.com/ulikunitz/xz/internal/hash/rabin_karp.go
generated
vendored
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
// A is the default constant for Robin-Karp rolling hash. This is a random
|
||||
// prime.
|
||||
const A = 0x97b548add41d5da1
|
||||
|
||||
// RabinKarp supports the computation of a rolling hash.
|
||||
type RabinKarp struct {
|
||||
A uint64
|
||||
// a^n
|
||||
aOldest uint64
|
||||
h uint64
|
||||
p []byte
|
||||
i int
|
||||
}
|
||||
|
||||
// NewRabinKarp creates a new RabinKarp value. The argument n defines the
|
||||
// length of the byte sequence to be hashed. The default constant will will be
|
||||
// used.
|
||||
func NewRabinKarp(n int) *RabinKarp {
|
||||
return NewRabinKarpConst(n, A)
|
||||
}
|
||||
|
||||
// NewRabinKarpConst creates a new RabinKarp value. The argument n defines the
|
||||
// length of the byte sequence to be hashed. The argument a provides the
|
||||
// constant used to compute the hash.
|
||||
func NewRabinKarpConst(n int, a uint64) *RabinKarp {
|
||||
if n <= 0 {
|
||||
panic("number of bytes n must be positive")
|
||||
}
|
||||
aOldest := uint64(1)
|
||||
// There are faster methods. For the small n required by the LZMA
|
||||
// compressor O(n) is sufficient.
|
||||
for i := 0; i < n; i++ {
|
||||
aOldest *= a
|
||||
}
|
||||
return &RabinKarp{
|
||||
A: a, aOldest: aOldest,
|
||||
p: make([]byte, 0, n),
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the length of the byte sequence.
|
||||
func (r *RabinKarp) Len() int {
|
||||
return cap(r.p)
|
||||
}
|
||||
|
||||
// RollByte computes the hash after x has been added.
|
||||
func (r *RabinKarp) RollByte(x byte) uint64 {
|
||||
if len(r.p) < cap(r.p) {
|
||||
r.h += uint64(x)
|
||||
r.h *= r.A
|
||||
r.p = append(r.p, x)
|
||||
} else {
|
||||
r.h -= uint64(r.p[r.i]) * r.aOldest
|
||||
r.h += uint64(x)
|
||||
r.h *= r.A
|
||||
r.p[r.i] = x
|
||||
r.i = (r.i + 1) % cap(r.p)
|
||||
}
|
||||
return r.h
|
||||
}
|
29
vendor/github.com/ulikunitz/xz/internal/hash/roller.go
generated
vendored
Normal file
29
vendor/github.com/ulikunitz/xz/internal/hash/roller.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package hash
|
||||
|
||||
// Roller provides an interface for rolling hashes. The hash value will become
|
||||
// valid after hash has been called Len times.
|
||||
type Roller interface {
|
||||
Len() int
|
||||
RollByte(x byte) uint64
|
||||
}
|
||||
|
||||
// Hashes computes all hash values for the array p. Note that the state of the
|
||||
// roller is changed.
|
||||
func Hashes(r Roller, p []byte) []uint64 {
|
||||
n := r.Len()
|
||||
if len(p) < n {
|
||||
return nil
|
||||
}
|
||||
h := make([]uint64, len(p)-n+1)
|
||||
for i := 0; i < n-1; i++ {
|
||||
r.RollByte(p[i])
|
||||
}
|
||||
for i := range h {
|
||||
h[i] = r.RollByte(p[i+n-1])
|
||||
}
|
||||
return h
|
||||
}
|
457
vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
generated
vendored
Normal file
457
vendor/github.com/ulikunitz/xz/internal/xlog/xlog.go
generated
vendored
Normal file
@ -0,0 +1,457 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package xlog provides a simple logging package that allows to disable
|
||||
// certain message categories. It defines a type, Logger, with multiple
|
||||
// methods for formatting output. The package has also a predefined
|
||||
// 'standard' Logger accessible through helper function Print[f|ln],
|
||||
// Fatal[f|ln], Panic[f|ln], Warn[f|ln], Print[f|ln] and Debug[f|ln]
|
||||
// that are easier to use then creating a Logger manually. That logger
|
||||
// writes to standard error and prints the date and time of each logged
|
||||
// message, which can be configured using the function SetFlags.
|
||||
//
|
||||
// The Fatal functions call os.Exit(1) after the message is output
|
||||
// unless not suppressed by the flags. The Panic functions call panic
|
||||
// after the writing the log message unless suppressed.
|
||||
package xlog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The flags define what information is prefixed to each log entry
|
||||
// generated by the Logger. The Lno* versions allow the suppression of
|
||||
// specific output. The bits are or'ed together to control what will be
|
||||
// printed. There is no control over the order of the items printed and
|
||||
// the format. The full format is:
|
||||
//
|
||||
// 2009-01-23 01:23:23.123123 /a/b/c/d.go:23: message
|
||||
//
|
||||
const (
|
||||
Ldate = 1 << iota // the date: 2009-01-23
|
||||
Ltime // the time: 01:23:23
|
||||
Lmicroseconds // microsecond resolution: 01:23:23.123123
|
||||
Llongfile // full file name and line number: /a/b/c/d.go:23
|
||||
Lshortfile // final file name element and line number: d.go:23
|
||||
Lnopanic // suppresses output from Panic[f|ln] but not the panic call
|
||||
Lnofatal // suppresses output from Fatal[f|ln] but not the exit
|
||||
Lnowarn // suppresses output from Warn[f|ln]
|
||||
Lnoprint // suppresses output from Print[f|ln]
|
||||
Lnodebug // suppresses output from Debug[f|ln]
|
||||
// initial values for the standard logger
|
||||
Lstdflags = Ldate | Ltime | Lnodebug
|
||||
)
|
||||
|
||||
// A Logger represents an active logging object that generates lines of
|
||||
// output to an io.Writer. Each logging operation if not suppressed
|
||||
// makes a single call to the Writer's Write method. A Logger can be
|
||||
// used simultaneously from multiple goroutines; it guarantees to
|
||||
// serialize access to the Writer.
|
||||
type Logger struct {
|
||||
mu sync.Mutex // ensures atomic writes; and protects the following
|
||||
// fields
|
||||
prefix string // prefix to write at beginning of each line
|
||||
flag int // properties
|
||||
out io.Writer // destination for output
|
||||
buf []byte // for accumulating text to write
|
||||
}
|
||||
|
||||
// New creates a new Logger. The out argument sets the destination to
|
||||
// which the log output will be written. The prefix appears at the
|
||||
// beginning of each log line. The flag argument defines the logging
|
||||
// properties.
|
||||
func New(out io.Writer, prefix string, flag int) *Logger {
|
||||
return &Logger{out: out, prefix: prefix, flag: flag}
|
||||
}
|
||||
|
||||
// std is the standard logger used by the package scope functions.
|
||||
var std = New(os.Stderr, "", Lstdflags)
|
||||
|
||||
// itoa converts the integer to ASCII. A negative widths will avoid
|
||||
// zero-padding. The function supports only non-negative integers.
|
||||
func itoa(buf *[]byte, i int, wid int) {
|
||||
var u = uint(i)
|
||||
if u == 0 && wid <= 1 {
|
||||
*buf = append(*buf, '0')
|
||||
return
|
||||
}
|
||||
var b [32]byte
|
||||
bp := len(b)
|
||||
for ; u > 0 || wid > 0; u /= 10 {
|
||||
bp--
|
||||
wid--
|
||||
b[bp] = byte(u%10) + '0'
|
||||
}
|
||||
*buf = append(*buf, b[bp:]...)
|
||||
}
|
||||
|
||||
// formatHeader puts the header into the buf field of the buffer.
|
||||
func (l *Logger) formatHeader(t time.Time, file string, line int) {
|
||||
l.buf = append(l.buf, l.prefix...)
|
||||
if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {
|
||||
if l.flag&Ldate != 0 {
|
||||
year, month, day := t.Date()
|
||||
itoa(&l.buf, year, 4)
|
||||
l.buf = append(l.buf, '-')
|
||||
itoa(&l.buf, int(month), 2)
|
||||
l.buf = append(l.buf, '-')
|
||||
itoa(&l.buf, day, 2)
|
||||
l.buf = append(l.buf, ' ')
|
||||
}
|
||||
if l.flag&(Ltime|Lmicroseconds) != 0 {
|
||||
hour, min, sec := t.Clock()
|
||||
itoa(&l.buf, hour, 2)
|
||||
l.buf = append(l.buf, ':')
|
||||
itoa(&l.buf, min, 2)
|
||||
l.buf = append(l.buf, ':')
|
||||
itoa(&l.buf, sec, 2)
|
||||
if l.flag&Lmicroseconds != 0 {
|
||||
l.buf = append(l.buf, '.')
|
||||
itoa(&l.buf, t.Nanosecond()/1e3, 6)
|
||||
}
|
||||
l.buf = append(l.buf, ' ')
|
||||
}
|
||||
}
|
||||
if l.flag&(Lshortfile|Llongfile) != 0 {
|
||||
if l.flag&Lshortfile != 0 {
|
||||
short := file
|
||||
for i := len(file) - 1; i > 0; i-- {
|
||||
if file[i] == '/' {
|
||||
short = file[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
file = short
|
||||
}
|
||||
l.buf = append(l.buf, file...)
|
||||
l.buf = append(l.buf, ':')
|
||||
itoa(&l.buf, line, -1)
|
||||
l.buf = append(l.buf, ": "...)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Logger) output(calldepth int, now time.Time, s string) error {
|
||||
var file string
|
||||
var line int
|
||||
if l.flag&(Lshortfile|Llongfile) != 0 {
|
||||
l.mu.Unlock()
|
||||
var ok bool
|
||||
_, file, line, ok = runtime.Caller(calldepth)
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 0
|
||||
}
|
||||
l.mu.Lock()
|
||||
}
|
||||
l.buf = l.buf[:0]
|
||||
l.formatHeader(now, file, line)
|
||||
l.buf = append(l.buf, s...)
|
||||
if len(s) == 0 || s[len(s)-1] != '\n' {
|
||||
l.buf = append(l.buf, '\n')
|
||||
}
|
||||
_, err := l.out.Write(l.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Output writes the string s with the header controlled by the flags to
|
||||
// the l.out writer. A newline will be appended if s doesn't end in a
|
||||
// newline. Calldepth is used to recover the PC, although all current
|
||||
// calls of Output use the call depth 2. Access to the function is serialized.
|
||||
func (l *Logger) Output(calldepth, noflag int, v ...interface{}) error {
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.flag&noflag != 0 {
|
||||
return nil
|
||||
}
|
||||
s := fmt.Sprint(v...)
|
||||
return l.output(calldepth+1, now, s)
|
||||
}
|
||||
|
||||
// Outputf works like output but formats the output like Printf.
|
||||
func (l *Logger) Outputf(calldepth int, noflag int, format string, v ...interface{}) error {
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.flag&noflag != 0 {
|
||||
return nil
|
||||
}
|
||||
s := fmt.Sprintf(format, v...)
|
||||
return l.output(calldepth+1, now, s)
|
||||
}
|
||||
|
||||
// Outputln works like output but formats the output like Println.
|
||||
func (l *Logger) Outputln(calldepth int, noflag int, v ...interface{}) error {
|
||||
now := time.Now()
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
if l.flag&noflag != 0 {
|
||||
return nil
|
||||
}
|
||||
s := fmt.Sprintln(v...)
|
||||
return l.output(calldepth+1, now, s)
|
||||
}
|
||||
|
||||
// Panic prints the message like Print and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func (l *Logger) Panic(v ...interface{}) {
|
||||
l.Output(2, Lnopanic, v...)
|
||||
s := fmt.Sprint(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panic prints the message like Print and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func Panic(v ...interface{}) {
|
||||
std.Output(2, Lnopanic, v...)
|
||||
s := fmt.Sprint(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicf prints the message like Printf and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func (l *Logger) Panicf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnopanic, format, v...)
|
||||
s := fmt.Sprintf(format, v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicf prints the message like Printf and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func Panicf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnopanic, format, v...)
|
||||
s := fmt.Sprintf(format, v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicln prints the message like Println and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func (l *Logger) Panicln(v ...interface{}) {
|
||||
l.Outputln(2, Lnopanic, v...)
|
||||
s := fmt.Sprintln(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Panicln prints the message like Println and calls panic. The printing
|
||||
// might be suppressed by the flag Lnopanic.
|
||||
func Panicln(v ...interface{}) {
|
||||
std.Outputln(2, Lnopanic, v...)
|
||||
s := fmt.Sprintln(v...)
|
||||
panic(s)
|
||||
}
|
||||
|
||||
// Fatal prints the message like Print and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func (l *Logger) Fatal(v ...interface{}) {
|
||||
l.Output(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatal prints the message like Print and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func Fatal(v ...interface{}) {
|
||||
std.Output(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf prints the message like Printf and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func (l *Logger) Fatalf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnofatal, format, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf prints the message like Printf and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func Fatalf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnofatal, format, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalln prints the message like Println and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func (l *Logger) Fatalln(format string, v ...interface{}) {
|
||||
l.Outputln(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalln prints the message like Println and calls os.Exit(1). The
|
||||
// printing might be suppressed by the flag Lnofatal.
|
||||
func Fatalln(format string, v ...interface{}) {
|
||||
std.Outputln(2, Lnofatal, v...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Warn prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func (l *Logger) Warn(v ...interface{}) {
|
||||
l.Output(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Warn prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func Warn(v ...interface{}) {
|
||||
std.Output(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Warnf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func (l *Logger) Warnf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnowarn, format, v...)
|
||||
}
|
||||
|
||||
// Warnf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func Warnf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnowarn, format, v...)
|
||||
}
|
||||
|
||||
// Warnln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func (l *Logger) Warnln(v ...interface{}) {
|
||||
l.Outputln(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Warnln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnowarn.
|
||||
func Warnln(v ...interface{}) {
|
||||
std.Outputln(2, Lnowarn, v...)
|
||||
}
|
||||
|
||||
// Print prints the message like fmt.Print. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func (l *Logger) Print(v ...interface{}) {
|
||||
l.Output(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Print prints the message like fmt.Print. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func Print(v ...interface{}) {
|
||||
std.Output(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Printf prints the message like fmt.Printf. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func (l *Logger) Printf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnoprint, format, v...)
|
||||
}
|
||||
|
||||
// Printf prints the message like fmt.Printf. The printing might be suppressed
|
||||
// by the flag Lnoprint.
|
||||
func Printf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnoprint, format, v...)
|
||||
}
|
||||
|
||||
// Println prints the message like fmt.Println. The printing might be
|
||||
// suppressed by the flag Lnoprint.
|
||||
func (l *Logger) Println(v ...interface{}) {
|
||||
l.Outputln(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Println prints the message like fmt.Println. The printing might be
|
||||
// suppressed by the flag Lnoprint.
|
||||
func Println(v ...interface{}) {
|
||||
std.Outputln(2, Lnoprint, v...)
|
||||
}
|
||||
|
||||
// Debug prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func (l *Logger) Debug(v ...interface{}) {
|
||||
l.Output(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Debug prints the message like Print. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func Debug(v ...interface{}) {
|
||||
std.Output(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Debugf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func (l *Logger) Debugf(format string, v ...interface{}) {
|
||||
l.Outputf(2, Lnodebug, format, v...)
|
||||
}
|
||||
|
||||
// Debugf prints the message like Printf. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func Debugf(format string, v ...interface{}) {
|
||||
std.Outputf(2, Lnodebug, format, v...)
|
||||
}
|
||||
|
||||
// Debugln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func (l *Logger) Debugln(v ...interface{}) {
|
||||
l.Outputln(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Debugln prints the message like Println. The printing might be suppressed
|
||||
// by the flag Lnodebug.
|
||||
func Debugln(v ...interface{}) {
|
||||
std.Outputln(2, Lnodebug, v...)
|
||||
}
|
||||
|
||||
// Flags returns the current flags used by the logger.
|
||||
func (l *Logger) Flags() int {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.flag
|
||||
}
|
||||
|
||||
// Flags returns the current flags used by the standard logger.
|
||||
func Flags() int {
|
||||
return std.Flags()
|
||||
}
|
||||
|
||||
// SetFlags sets the flags of the logger.
|
||||
func (l *Logger) SetFlags(flag int) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.flag = flag
|
||||
}
|
||||
|
||||
// SetFlags sets the flags for the standard logger.
|
||||
func SetFlags(flag int) {
|
||||
std.SetFlags(flag)
|
||||
}
|
||||
|
||||
// Prefix returns the prefix used by the logger.
|
||||
func (l *Logger) Prefix() string {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
return l.prefix
|
||||
}
|
||||
|
||||
// Prefix returns the prefix used by the standard logger of the package.
|
||||
func Prefix() string {
|
||||
return std.Prefix()
|
||||
}
|
||||
|
||||
// SetPrefix sets the prefix for the logger.
|
||||
func (l *Logger) SetPrefix(prefix string) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.prefix = prefix
|
||||
}
|
||||
|
||||
// SetPrefix sets the prefix of the standard logger of the package.
|
||||
func SetPrefix(prefix string) {
|
||||
std.SetPrefix(prefix)
|
||||
}
|
||||
|
||||
// SetOutput sets the output of the logger.
|
||||
func (l *Logger) SetOutput(w io.Writer) {
|
||||
l.mu.Lock()
|
||||
defer l.mu.Unlock()
|
||||
l.out = w
|
||||
}
|
||||
|
||||
// SetOutput sets the output for the standard logger of the package.
|
||||
func SetOutput(w io.Writer) {
|
||||
std.SetOutput(w)
|
||||
}
|
523
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
Normal file
523
vendor/github.com/ulikunitz/xz/lzma/bintree.go
generated
vendored
Normal file
@ -0,0 +1,523 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// node represents a node in the binary tree.
|
||||
type node struct {
|
||||
// x is the search value
|
||||
x uint32
|
||||
// p parent node
|
||||
p uint32
|
||||
// l left child
|
||||
l uint32
|
||||
// r right child
|
||||
r uint32
|
||||
}
|
||||
|
||||
// wordLen is the number of bytes represented by the v field of a node.
|
||||
const wordLen = 4
|
||||
|
||||
// binTree supports the identification of the next operation based on a
|
||||
// binary tree.
|
||||
//
|
||||
// Nodes will be identified by their index into the ring buffer.
|
||||
type binTree struct {
|
||||
dict *encoderDict
|
||||
// ring buffer of nodes
|
||||
node []node
|
||||
// absolute offset of the entry for the next node. Position 4
|
||||
// byte larger.
|
||||
hoff int64
|
||||
// front position in the node ring buffer
|
||||
front uint32
|
||||
// index of the root node
|
||||
root uint32
|
||||
// current x value
|
||||
x uint32
|
||||
// preallocated array
|
||||
data []byte
|
||||
}
|
||||
|
||||
// null represents the nonexistent index. We can't use zero because it
|
||||
// would always exist or we would need to decrease the index for each
|
||||
// reference.
|
||||
const null uint32 = 1<<32 - 1
|
||||
|
||||
// newBinTree initializes the binTree structure. The capacity defines
|
||||
// the size of the buffer and defines the maximum distance for which
|
||||
// matches will be found.
|
||||
func newBinTree(capacity int) (t *binTree, err error) {
|
||||
if capacity < 1 {
|
||||
return nil, errors.New(
|
||||
"newBinTree: capacity must be larger than zero")
|
||||
}
|
||||
if int64(capacity) >= int64(null) {
|
||||
return nil, errors.New(
|
||||
"newBinTree: capacity must less 2^{32}-1")
|
||||
}
|
||||
t = &binTree{
|
||||
node: make([]node, capacity),
|
||||
hoff: -int64(wordLen),
|
||||
root: null,
|
||||
data: make([]byte, maxMatchLen),
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *binTree) SetDict(d *encoderDict) { t.dict = d }
|
||||
|
||||
// WriteByte writes a single byte into the binary tree.
|
||||
func (t *binTree) WriteByte(c byte) error {
|
||||
t.x = (t.x << 8) | uint32(c)
|
||||
t.hoff++
|
||||
if t.hoff < 0 {
|
||||
return nil
|
||||
}
|
||||
v := t.front
|
||||
if int64(v) < t.hoff {
|
||||
// We are overwriting old nodes stored in the tree.
|
||||
t.remove(v)
|
||||
}
|
||||
t.node[v].x = t.x
|
||||
t.add(v)
|
||||
t.front++
|
||||
if int64(t.front) >= int64(len(t.node)) {
|
||||
t.front = 0
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Writes writes a sequence of bytes into the binTree structure.
|
||||
func (t *binTree) Write(p []byte) (n int, err error) {
|
||||
for _, c := range p {
|
||||
t.WriteByte(c)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// add puts the node v into the tree. The node must not be part of the
|
||||
// tree before.
|
||||
func (t *binTree) add(v uint32) {
|
||||
vn := &t.node[v]
|
||||
// Set left and right to null indices.
|
||||
vn.l, vn.r = null, null
|
||||
// If the binary tree is empty make v the root.
|
||||
if t.root == null {
|
||||
t.root = v
|
||||
vn.p = null
|
||||
return
|
||||
}
|
||||
x := vn.x
|
||||
p := t.root
|
||||
// Search for the right leave link and add the new node.
|
||||
for {
|
||||
pn := &t.node[p]
|
||||
if x <= pn.x {
|
||||
if pn.l == null {
|
||||
pn.l = v
|
||||
vn.p = p
|
||||
return
|
||||
}
|
||||
p = pn.l
|
||||
} else {
|
||||
if pn.r == null {
|
||||
pn.r = v
|
||||
vn.p = p
|
||||
return
|
||||
}
|
||||
p = pn.r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parent returns the parent node index of v and the pointer to v value
|
||||
// in the parent.
|
||||
func (t *binTree) parent(v uint32) (p uint32, ptr *uint32) {
|
||||
if t.root == v {
|
||||
return null, &t.root
|
||||
}
|
||||
p = t.node[v].p
|
||||
if t.node[p].l == v {
|
||||
ptr = &t.node[p].l
|
||||
} else {
|
||||
ptr = &t.node[p].r
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove node v.
|
||||
func (t *binTree) remove(v uint32) {
|
||||
vn := &t.node[v]
|
||||
p, ptr := t.parent(v)
|
||||
l, r := vn.l, vn.r
|
||||
if l == null {
|
||||
// Move the right child up.
|
||||
*ptr = r
|
||||
if r != null {
|
||||
t.node[r].p = p
|
||||
}
|
||||
return
|
||||
}
|
||||
if r == null {
|
||||
// Move the left child up.
|
||||
*ptr = l
|
||||
t.node[l].p = p
|
||||
return
|
||||
}
|
||||
|
||||
// Search the in-order predecessor u.
|
||||
un := &t.node[l]
|
||||
ur := un.r
|
||||
if ur == null {
|
||||
// In order predecessor is l. Move it up.
|
||||
un.r = r
|
||||
t.node[r].p = l
|
||||
un.p = p
|
||||
*ptr = l
|
||||
return
|
||||
}
|
||||
var u uint32
|
||||
for {
|
||||
// Look for the max value in the tree where l is root.
|
||||
u = ur
|
||||
ur = t.node[u].r
|
||||
if ur == null {
|
||||
break
|
||||
}
|
||||
}
|
||||
// replace u with ul
|
||||
un = &t.node[u]
|
||||
ul := un.l
|
||||
up := un.p
|
||||
t.node[up].r = ul
|
||||
if ul != null {
|
||||
t.node[ul].p = up
|
||||
}
|
||||
|
||||
// replace v by u
|
||||
un.l, un.r = l, r
|
||||
t.node[l].p = u
|
||||
t.node[r].p = u
|
||||
*ptr = u
|
||||
un.p = p
|
||||
}
|
||||
|
||||
// search looks for the node that have the value x or for the nodes that
|
||||
// brace it. The node highest in the tree with the value x will be
|
||||
// returned. All other nodes with the same value live in left subtree of
|
||||
// the returned node.
|
||||
func (t *binTree) search(v uint32, x uint32) (a, b uint32) {
|
||||
a, b = null, null
|
||||
if v == null {
|
||||
return
|
||||
}
|
||||
for {
|
||||
vn := &t.node[v]
|
||||
if x <= vn.x {
|
||||
if x == vn.x {
|
||||
return v, v
|
||||
}
|
||||
b = v
|
||||
if vn.l == null {
|
||||
return
|
||||
}
|
||||
v = vn.l
|
||||
} else {
|
||||
a = v
|
||||
if vn.r == null {
|
||||
return
|
||||
}
|
||||
v = vn.r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// max returns the node with maximum value in the subtree with v as
|
||||
// root.
|
||||
func (t *binTree) max(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
for {
|
||||
r := t.node[v].r
|
||||
if r == null {
|
||||
return v
|
||||
}
|
||||
v = r
|
||||
}
|
||||
}
|
||||
|
||||
// min returns the node with the minimum value in the subtree with v as
|
||||
// root.
|
||||
func (t *binTree) min(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
for {
|
||||
l := t.node[v].l
|
||||
if l == null {
|
||||
return v
|
||||
}
|
||||
v = l
|
||||
}
|
||||
}
|
||||
|
||||
// pred returns the in-order predecessor of node v.
|
||||
func (t *binTree) pred(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
u := t.max(t.node[v].l)
|
||||
if u != null {
|
||||
return u
|
||||
}
|
||||
for {
|
||||
p := t.node[v].p
|
||||
if p == null {
|
||||
return null
|
||||
}
|
||||
if t.node[p].r == v {
|
||||
return p
|
||||
}
|
||||
v = p
|
||||
}
|
||||
}
|
||||
|
||||
// succ returns the in-order successor of node v.
|
||||
func (t *binTree) succ(v uint32) uint32 {
|
||||
if v == null {
|
||||
return null
|
||||
}
|
||||
u := t.min(t.node[v].r)
|
||||
if u != null {
|
||||
return u
|
||||
}
|
||||
for {
|
||||
p := t.node[v].p
|
||||
if p == null {
|
||||
return null
|
||||
}
|
||||
if t.node[p].l == v {
|
||||
return p
|
||||
}
|
||||
v = p
|
||||
}
|
||||
}
|
||||
|
||||
// xval converts the first four bytes of a into an 32-bit unsigned
|
||||
// integer in big-endian order.
|
||||
func xval(a []byte) uint32 {
|
||||
var x uint32
|
||||
switch len(a) {
|
||||
default:
|
||||
x |= uint32(a[3])
|
||||
fallthrough
|
||||
case 3:
|
||||
x |= uint32(a[2]) << 8
|
||||
fallthrough
|
||||
case 2:
|
||||
x |= uint32(a[1]) << 16
|
||||
fallthrough
|
||||
case 1:
|
||||
x |= uint32(a[0]) << 24
|
||||
case 0:
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// dumpX converts value x into a four-letter string.
|
||||
func dumpX(x uint32) string {
|
||||
a := make([]byte, 4)
|
||||
for i := 0; i < 4; i++ {
|
||||
c := byte(x >> uint((3-i)*8))
|
||||
if unicode.IsGraphic(rune(c)) {
|
||||
a[i] = c
|
||||
} else {
|
||||
a[i] = '.'
|
||||
}
|
||||
}
|
||||
return string(a)
|
||||
}
|
||||
|
||||
// dumpNode writes a representation of the node v into the io.Writer.
|
||||
func (t *binTree) dumpNode(w io.Writer, v uint32, indent int) {
|
||||
if v == null {
|
||||
return
|
||||
}
|
||||
|
||||
vn := &t.node[v]
|
||||
|
||||
t.dumpNode(w, vn.r, indent+2)
|
||||
|
||||
for i := 0; i < indent; i++ {
|
||||
fmt.Fprint(w, " ")
|
||||
}
|
||||
if vn.p == null {
|
||||
fmt.Fprintf(w, "node %d %q parent null\n", v, dumpX(vn.x))
|
||||
} else {
|
||||
fmt.Fprintf(w, "node %d %q parent %d\n", v, dumpX(vn.x), vn.p)
|
||||
}
|
||||
|
||||
t.dumpNode(w, vn.l, indent+2)
|
||||
}
|
||||
|
||||
// dump prints a representation of the binary tree into the writer.
|
||||
func (t *binTree) dump(w io.Writer) error {
|
||||
bw := bufio.NewWriter(w)
|
||||
t.dumpNode(bw, t.root, 0)
|
||||
return bw.Flush()
|
||||
}
|
||||
|
||||
func (t *binTree) distance(v uint32) int {
|
||||
dist := int(t.front) - int(v)
|
||||
if dist <= 0 {
|
||||
dist += len(t.node)
|
||||
}
|
||||
return dist
|
||||
}
|
||||
|
||||
type matchParams struct {
|
||||
rep [4]uint32
|
||||
// length when match will be accepted
|
||||
nAccept int
|
||||
// nodes to check
|
||||
check int
|
||||
// finish if length get shorter
|
||||
stopShorter bool
|
||||
}
|
||||
|
||||
func (t *binTree) match(m match, distIter func() (int, bool), p matchParams,
|
||||
) (r match, checked int, accepted bool) {
|
||||
buf := &t.dict.buf
|
||||
for {
|
||||
if checked >= p.check {
|
||||
return m, checked, true
|
||||
}
|
||||
dist, ok := distIter()
|
||||
if !ok {
|
||||
return m, checked, false
|
||||
}
|
||||
checked++
|
||||
if m.n > 0 {
|
||||
i := buf.rear - dist + m.n - 1
|
||||
if i < 0 {
|
||||
i += len(buf.data)
|
||||
} else if i >= len(buf.data) {
|
||||
i -= len(buf.data)
|
||||
}
|
||||
if buf.data[i] != t.data[m.n-1] {
|
||||
if p.stopShorter {
|
||||
return m, checked, false
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
n := buf.matchLen(dist, t.data)
|
||||
switch n {
|
||||
case 0:
|
||||
if p.stopShorter {
|
||||
return m, checked, false
|
||||
}
|
||||
continue
|
||||
case 1:
|
||||
if uint32(dist-minDistance) != p.rep[0] {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if n < m.n || (n == m.n && int64(dist) >= m.distance) {
|
||||
continue
|
||||
}
|
||||
m = match{int64(dist), n}
|
||||
if n >= p.nAccept {
|
||||
return m, checked, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *binTree) NextOp(rep [4]uint32) operation {
|
||||
// retrieve maxMatchLen data
|
||||
n, _ := t.dict.buf.Peek(t.data[:maxMatchLen])
|
||||
if n == 0 {
|
||||
panic("no data in buffer")
|
||||
}
|
||||
t.data = t.data[:n]
|
||||
|
||||
var (
|
||||
m match
|
||||
x, u, v uint32
|
||||
iterPred, iterSucc func() (int, bool)
|
||||
)
|
||||
p := matchParams{
|
||||
rep: rep,
|
||||
nAccept: maxMatchLen,
|
||||
check: 32,
|
||||
}
|
||||
i := 4
|
||||
iterSmall := func() (dist int, ok bool) {
|
||||
i--
|
||||
if i <= 0 {
|
||||
return 0, false
|
||||
}
|
||||
return i, true
|
||||
}
|
||||
m, checked, accepted := t.match(m, iterSmall, p)
|
||||
if accepted {
|
||||
goto end
|
||||
}
|
||||
p.check -= checked
|
||||
x = xval(t.data)
|
||||
u, v = t.search(t.root, x)
|
||||
if u == v && len(t.data) == 4 {
|
||||
iter := func() (dist int, ok bool) {
|
||||
if u == null {
|
||||
return 0, false
|
||||
}
|
||||
dist = t.distance(u)
|
||||
u, v = t.search(t.node[u].l, x)
|
||||
if u != v {
|
||||
u = null
|
||||
}
|
||||
return dist, true
|
||||
}
|
||||
m, _, _ = t.match(m, iter, p)
|
||||
goto end
|
||||
}
|
||||
p.stopShorter = true
|
||||
iterSucc = func() (dist int, ok bool) {
|
||||
if v == null {
|
||||
return 0, false
|
||||
}
|
||||
dist = t.distance(v)
|
||||
v = t.succ(v)
|
||||
return dist, true
|
||||
}
|
||||
m, checked, accepted = t.match(m, iterSucc, p)
|
||||
if accepted {
|
||||
goto end
|
||||
}
|
||||
p.check -= checked
|
||||
iterPred = func() (dist int, ok bool) {
|
||||
if u == null {
|
||||
return 0, false
|
||||
}
|
||||
dist = t.distance(u)
|
||||
u = t.pred(u)
|
||||
return dist, true
|
||||
}
|
||||
m, _, _ = t.match(m, iterPred, p)
|
||||
end:
|
||||
if m.n == 0 {
|
||||
return lit{t.data[0]}
|
||||
}
|
||||
return m
|
||||
}
|
45
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
Normal file
45
vendor/github.com/ulikunitz/xz/lzma/bitops.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
/* Naming conventions follows the CodeReviewComments in the Go Wiki. */
|
||||
|
||||
// ntz32Const is used by the functions NTZ and NLZ.
|
||||
const ntz32Const = 0x04d7651f
|
||||
|
||||
// ntz32Table is a helper table for de Bruijn algorithm by Danny Dubé.
|
||||
// See Henry S. Warren, Jr. "Hacker's Delight" section 5-1 figure 5-26.
|
||||
var ntz32Table = [32]int8{
|
||||
0, 1, 2, 24, 3, 19, 6, 25,
|
||||
22, 4, 20, 10, 16, 7, 12, 26,
|
||||
31, 23, 18, 5, 21, 9, 15, 11,
|
||||
30, 17, 8, 14, 29, 13, 28, 27,
|
||||
}
|
||||
|
||||
// ntz32 computes the number of trailing zeros for an unsigned 32-bit integer.
|
||||
func ntz32(x uint32) int {
|
||||
if x == 0 {
|
||||
return 32
|
||||
}
|
||||
x = (x & -x) * ntz32Const
|
||||
return int(ntz32Table[x>>27])
|
||||
}
|
||||
|
||||
// nlz32 computes the number of leading zeros for an unsigned 32-bit integer.
|
||||
func nlz32(x uint32) int {
|
||||
// Smear left most bit to the right
|
||||
x |= x >> 1
|
||||
x |= x >> 2
|
||||
x |= x >> 4
|
||||
x |= x >> 8
|
||||
x |= x >> 16
|
||||
// Use ntz mechanism to calculate nlz.
|
||||
x++
|
||||
if x == 0 {
|
||||
return 0
|
||||
}
|
||||
x *= ntz32Const
|
||||
return 32 - int(ntz32Table[x>>27])
|
||||
}
|
39
vendor/github.com/ulikunitz/xz/lzma/breader.go
generated
vendored
Normal file
39
vendor/github.com/ulikunitz/xz/lzma/breader.go
generated
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// breader provides the ReadByte function for a Reader. It doesn't read
|
||||
// more data from the reader than absolutely necessary.
|
||||
type breader struct {
|
||||
io.Reader
|
||||
// helper slice to save allocations
|
||||
p []byte
|
||||
}
|
||||
|
||||
// ByteReader converts an io.Reader into an io.ByteReader.
|
||||
func ByteReader(r io.Reader) io.ByteReader {
|
||||
br, ok := r.(io.ByteReader)
|
||||
if !ok {
|
||||
return &breader{r, make([]byte, 1)}
|
||||
}
|
||||
return br
|
||||
}
|
||||
|
||||
// ReadByte read byte function.
|
||||
func (r *breader) ReadByte() (c byte, err error) {
|
||||
n, err := r.Reader.Read(r.p)
|
||||
if n < 1 {
|
||||
if err == nil {
|
||||
err = errors.New("breader.ReadByte: no data")
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
return r.p[0], nil
|
||||
}
|
171
vendor/github.com/ulikunitz/xz/lzma/buffer.go
generated
vendored
Normal file
171
vendor/github.com/ulikunitz/xz/lzma/buffer.go
generated
vendored
Normal file
@ -0,0 +1,171 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// buffer provides a circular buffer of bytes. If the front index equals
|
||||
// the rear index the buffer is empty. As a consequence front cannot be
|
||||
// equal rear for a full buffer. So a full buffer has a length that is
|
||||
// one byte less the the length of the data slice.
|
||||
type buffer struct {
|
||||
data []byte
|
||||
front int
|
||||
rear int
|
||||
}
|
||||
|
||||
// newBuffer creates a buffer with the given size.
|
||||
func newBuffer(size int) *buffer {
|
||||
return &buffer{data: make([]byte, size+1)}
|
||||
}
|
||||
|
||||
// Cap returns the capacity of the buffer.
|
||||
func (b *buffer) Cap() int {
|
||||
return len(b.data) - 1
|
||||
}
|
||||
|
||||
// Resets the buffer. The front and rear index are set to zero.
|
||||
func (b *buffer) Reset() {
|
||||
b.front = 0
|
||||
b.rear = 0
|
||||
}
|
||||
|
||||
// Buffered returns the number of bytes buffered.
|
||||
func (b *buffer) Buffered() int {
|
||||
delta := b.front - b.rear
|
||||
if delta < 0 {
|
||||
delta += len(b.data)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
// Available returns the number of bytes available for writing.
|
||||
func (b *buffer) Available() int {
|
||||
delta := b.rear - 1 - b.front
|
||||
if delta < 0 {
|
||||
delta += len(b.data)
|
||||
}
|
||||
return delta
|
||||
}
|
||||
|
||||
// addIndex adds a non-negative integer to the index i and returns the
|
||||
// resulting index. The function takes care of wrapping the index as
|
||||
// well as potential overflow situations.
|
||||
func (b *buffer) addIndex(i int, n int) int {
|
||||
// subtraction of len(b.data) prevents overflow
|
||||
i += n - len(b.data)
|
||||
if i < 0 {
|
||||
i += len(b.data)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// Read reads bytes from the buffer into p and returns the number of
|
||||
// bytes read. The function never returns an error but might return less
|
||||
// data than requested.
|
||||
func (b *buffer) Read(p []byte) (n int, err error) {
|
||||
n, err = b.Peek(p)
|
||||
b.rear = b.addIndex(b.rear, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Peek reads bytes from the buffer into p without changing the buffer.
|
||||
// Peek will never return an error but might return less data than
|
||||
// requested.
|
||||
func (b *buffer) Peek(p []byte) (n int, err error) {
|
||||
m := b.Buffered()
|
||||
n = len(p)
|
||||
if m < n {
|
||||
n = m
|
||||
p = p[:n]
|
||||
}
|
||||
k := copy(p, b.data[b.rear:])
|
||||
if k < n {
|
||||
copy(p[k:], b.data)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Discard skips the n next bytes to read from the buffer, returning the
|
||||
// bytes discarded.
|
||||
//
|
||||
// If Discards skips fewer than n bytes, it returns an error.
|
||||
func (b *buffer) Discard(n int) (discarded int, err error) {
|
||||
if n < 0 {
|
||||
return 0, errors.New("buffer.Discard: negative argument")
|
||||
}
|
||||
m := b.Buffered()
|
||||
if m < n {
|
||||
n = m
|
||||
err = errors.New(
|
||||
"buffer.Discard: discarded less bytes then requested")
|
||||
}
|
||||
b.rear = b.addIndex(b.rear, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// ErrNoSpace indicates that there is insufficient space for the Write
|
||||
// operation.
|
||||
var ErrNoSpace = errors.New("insufficient space")
|
||||
|
||||
// Write puts data into the buffer. If less bytes are written than
|
||||
// requested ErrNoSpace is returned.
|
||||
func (b *buffer) Write(p []byte) (n int, err error) {
|
||||
m := b.Available()
|
||||
n = len(p)
|
||||
if m < n {
|
||||
n = m
|
||||
p = p[:m]
|
||||
err = ErrNoSpace
|
||||
}
|
||||
k := copy(b.data[b.front:], p)
|
||||
if k < n {
|
||||
copy(b.data, p[k:])
|
||||
}
|
||||
b.front = b.addIndex(b.front, n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte into the buffer. The error ErrNoSpace
|
||||
// is returned if no single byte is available in the buffer for writing.
|
||||
func (b *buffer) WriteByte(c byte) error {
|
||||
if b.Available() < 1 {
|
||||
return ErrNoSpace
|
||||
}
|
||||
b.data[b.front] = c
|
||||
b.front = b.addIndex(b.front, 1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// prefixLen returns the length of the common prefix of a and b.
|
||||
func prefixLen(a, b []byte) int {
|
||||
if len(a) > len(b) {
|
||||
a, b = b, a
|
||||
}
|
||||
for i, c := range a {
|
||||
if b[i] != c {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(a)
|
||||
}
|
||||
|
||||
// matchLen returns the length of the common prefix for the given
|
||||
// distance from the rear and the byte slice p.
|
||||
func (b *buffer) matchLen(distance int, p []byte) int {
|
||||
var n int
|
||||
i := b.rear - distance
|
||||
if i < 0 {
|
||||
if n = prefixLen(p, b.data[len(b.data)+i:]); n < -i {
|
||||
return n
|
||||
}
|
||||
p = p[n:]
|
||||
i = 0
|
||||
}
|
||||
n += prefixLen(p, b.data[i:])
|
||||
return n
|
||||
}
|
37
vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
generated
vendored
Normal file
37
vendor/github.com/ulikunitz/xz/lzma/bytewriter.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// ErrLimit indicates that the limit of the LimitedByteWriter has been
|
||||
// reached.
|
||||
var ErrLimit = errors.New("limit reached")
|
||||
|
||||
// LimitedByteWriter provides a byte writer that can be written until a
|
||||
// limit is reached. The field N provides the number of remaining
|
||||
// bytes.
|
||||
type LimitedByteWriter struct {
|
||||
BW io.ByteWriter
|
||||
N int64
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte to the limited byte writer. It returns
|
||||
// ErrLimit if the limit has been reached. If the byte is successfully
|
||||
// written the field N of the LimitedByteWriter will be decremented by
|
||||
// one.
|
||||
func (l *LimitedByteWriter) WriteByte(c byte) error {
|
||||
if l.N <= 0 {
|
||||
return ErrLimit
|
||||
}
|
||||
if err := l.BW.WriteByte(c); err != nil {
|
||||
return err
|
||||
}
|
||||
l.N--
|
||||
return nil
|
||||
}
|
277
vendor/github.com/ulikunitz/xz/lzma/decoder.go
generated
vendored
Normal file
277
vendor/github.com/ulikunitz/xz/lzma/decoder.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// decoder decodes a raw LZMA stream without any header.
|
||||
type decoder struct {
|
||||
// dictionary; the rear pointer of the buffer will be used for
|
||||
// reading the data.
|
||||
Dict *decoderDict
|
||||
// decoder state
|
||||
State *state
|
||||
// range decoder
|
||||
rd *rangeDecoder
|
||||
// start stores the head value of the dictionary for the LZMA
|
||||
// stream
|
||||
start int64
|
||||
// size of uncompressed data
|
||||
size int64
|
||||
// end-of-stream encountered
|
||||
eos bool
|
||||
// EOS marker found
|
||||
eosMarker bool
|
||||
}
|
||||
|
||||
// newDecoder creates a new decoder instance. The parameter size provides
|
||||
// the expected byte size of the decompressed data. If the size is
|
||||
// unknown use a negative value. In that case the decoder will look for
|
||||
// a terminating end-of-stream marker.
|
||||
func newDecoder(br io.ByteReader, state *state, dict *decoderDict, size int64) (d *decoder, err error) {
|
||||
rd, err := newRangeDecoder(br)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d = &decoder{
|
||||
State: state,
|
||||
Dict: dict,
|
||||
rd: rd,
|
||||
size: size,
|
||||
start: dict.pos(),
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Reopen restarts the decoder with a new byte reader and a new size. Reopen
|
||||
// resets the Decompressed counter to zero.
|
||||
func (d *decoder) Reopen(br io.ByteReader, size int64) error {
|
||||
var err error
|
||||
if d.rd, err = newRangeDecoder(br); err != nil {
|
||||
return err
|
||||
}
|
||||
d.start = d.Dict.pos()
|
||||
d.size = size
|
||||
d.eos = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// decodeLiteral decodes a single literal from the LZMA stream.
|
||||
func (d *decoder) decodeLiteral() (op operation, err error) {
|
||||
litState := d.State.litState(d.Dict.byteAt(1), d.Dict.head)
|
||||
match := d.Dict.byteAt(int(d.State.rep[0]) + 1)
|
||||
s, err := d.State.litCodec.Decode(d.rd, d.State.state, match, litState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return lit{s}, nil
|
||||
}
|
||||
|
||||
// errEOS indicates that an EOS marker has been found.
|
||||
var errEOS = errors.New("EOS marker found")
|
||||
|
||||
// readOp decodes the next operation from the compressed stream. It
|
||||
// returns the operation. If an explicit end of stream marker is
|
||||
// identified the eos error is returned.
|
||||
func (d *decoder) readOp() (op operation, err error) {
|
||||
// Value of the end of stream (EOS) marker
|
||||
const eosDist = 1<<32 - 1
|
||||
|
||||
state, state2, posState := d.State.states(d.Dict.head)
|
||||
|
||||
b, err := d.State.isMatch[state2].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
// literal
|
||||
op, err := d.decodeLiteral()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.State.updateStateLiteral()
|
||||
return op, nil
|
||||
}
|
||||
b, err = d.State.isRep[state].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
// simple match
|
||||
d.State.rep[3], d.State.rep[2], d.State.rep[1] =
|
||||
d.State.rep[2], d.State.rep[1], d.State.rep[0]
|
||||
|
||||
d.State.updateStateMatch()
|
||||
// The length decoder returns the length offset.
|
||||
n, err := d.State.lenCodec.Decode(d.rd, posState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The dist decoder returns the distance offset. The actual
|
||||
// distance is 1 higher.
|
||||
d.State.rep[0], err = d.State.distCodec.Decode(d.rd, n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d.State.rep[0] == eosDist {
|
||||
d.eosMarker = true
|
||||
return nil, errEOS
|
||||
}
|
||||
op = match{n: int(n) + minMatchLen,
|
||||
distance: int64(d.State.rep[0]) + minDistance}
|
||||
return op, nil
|
||||
}
|
||||
b, err = d.State.isRepG0[state].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dist := d.State.rep[0]
|
||||
if b == 0 {
|
||||
// rep match 0
|
||||
b, err = d.State.isRepG0Long[state2].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
d.State.updateStateShortRep()
|
||||
op = match{n: 1, distance: int64(dist) + minDistance}
|
||||
return op, nil
|
||||
}
|
||||
} else {
|
||||
b, err = d.State.isRepG1[state].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
dist = d.State.rep[1]
|
||||
} else {
|
||||
b, err = d.State.isRepG2[state].Decode(d.rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b == 0 {
|
||||
dist = d.State.rep[2]
|
||||
} else {
|
||||
dist = d.State.rep[3]
|
||||
d.State.rep[3] = d.State.rep[2]
|
||||
}
|
||||
d.State.rep[2] = d.State.rep[1]
|
||||
}
|
||||
d.State.rep[1] = d.State.rep[0]
|
||||
d.State.rep[0] = dist
|
||||
}
|
||||
n, err := d.State.repLenCodec.Decode(d.rd, posState)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.State.updateStateRep()
|
||||
op = match{n: int(n) + minMatchLen, distance: int64(dist) + minDistance}
|
||||
return op, nil
|
||||
}
|
||||
|
||||
// apply takes the operation and transforms the decoder dictionary accordingly.
|
||||
func (d *decoder) apply(op operation) error {
|
||||
var err error
|
||||
switch x := op.(type) {
|
||||
case match:
|
||||
err = d.Dict.writeMatch(x.distance, x.n)
|
||||
case lit:
|
||||
err = d.Dict.WriteByte(x.b)
|
||||
default:
|
||||
panic("op is neither a match nor a literal")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// decompress fills the dictionary unless no space for new data is
|
||||
// available. If the end of the LZMA stream has been reached io.EOF will
|
||||
// be returned.
|
||||
func (d *decoder) decompress() error {
|
||||
if d.eos {
|
||||
return io.EOF
|
||||
}
|
||||
for d.Dict.Available() >= maxMatchLen {
|
||||
op, err := d.readOp()
|
||||
switch err {
|
||||
case nil:
|
||||
break
|
||||
case errEOS:
|
||||
d.eos = true
|
||||
if !d.rd.possiblyAtEnd() {
|
||||
return errDataAfterEOS
|
||||
}
|
||||
if d.size >= 0 && d.size != d.Decompressed() {
|
||||
return errSize
|
||||
}
|
||||
return io.EOF
|
||||
case io.EOF:
|
||||
d.eos = true
|
||||
return io.ErrUnexpectedEOF
|
||||
default:
|
||||
return err
|
||||
}
|
||||
if err = d.apply(op); err != nil {
|
||||
return err
|
||||
}
|
||||
if d.size >= 0 && d.Decompressed() >= d.size {
|
||||
d.eos = true
|
||||
if d.Decompressed() > d.size {
|
||||
return errSize
|
||||
}
|
||||
if !d.rd.possiblyAtEnd() {
|
||||
switch _, err = d.readOp(); err {
|
||||
case nil:
|
||||
return errSize
|
||||
case io.EOF:
|
||||
return io.ErrUnexpectedEOF
|
||||
case errEOS:
|
||||
break
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Errors that may be returned while decoding data.
|
||||
var (
|
||||
errDataAfterEOS = errors.New("lzma: data after end of stream marker")
|
||||
errSize = errors.New("lzma: wrong uncompressed data size")
|
||||
)
|
||||
|
||||
// Read reads data from the buffer. If no more data is available io.EOF is
|
||||
// returned.
|
||||
func (d *decoder) Read(p []byte) (n int, err error) {
|
||||
var k int
|
||||
for {
|
||||
// Read of decoder dict never returns an error.
|
||||
k, err = d.Dict.Read(p[n:])
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("dictionary read error %s", err))
|
||||
}
|
||||
if k == 0 && d.eos {
|
||||
return n, io.EOF
|
||||
}
|
||||
n += k
|
||||
if n >= len(p) {
|
||||
return n, nil
|
||||
}
|
||||
if err = d.decompress(); err != nil && err != io.EOF {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decompressed returns the number of bytes decompressed by the decoder.
|
||||
func (d *decoder) Decompressed() int64 {
|
||||
return d.Dict.pos() - d.start
|
||||
}
|
135
vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
generated
vendored
Normal file
135
vendor/github.com/ulikunitz/xz/lzma/decoderdict.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// decoderDict provides the dictionary for the decoder. The whole
|
||||
// dictionary is used as reader buffer.
|
||||
type decoderDict struct {
|
||||
buf buffer
|
||||
head int64
|
||||
}
|
||||
|
||||
// newDecoderDict creates a new decoder dictionary. The whole dictionary
|
||||
// will be used as reader buffer.
|
||||
func newDecoderDict(dictCap int) (d *decoderDict, err error) {
|
||||
// lower limit supports easy test cases
|
||||
if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
|
||||
return nil, errors.New("lzma: dictCap out of range")
|
||||
}
|
||||
d = &decoderDict{buf: *newBuffer(dictCap)}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Reset clears the dictionary. The read buffer is not changed, so the
|
||||
// buffered data can still be read.
|
||||
func (d *decoderDict) Reset() {
|
||||
d.head = 0
|
||||
}
|
||||
|
||||
// WriteByte writes a single byte into the dictionary. It is used to
|
||||
// write literals into the dictionary.
|
||||
func (d *decoderDict) WriteByte(c byte) error {
|
||||
if err := d.buf.WriteByte(c); err != nil {
|
||||
return err
|
||||
}
|
||||
d.head++
|
||||
return nil
|
||||
}
|
||||
|
||||
// pos returns the position of the dictionary head.
|
||||
func (d *decoderDict) pos() int64 { return d.head }
|
||||
|
||||
// dictLen returns the actual length of the dictionary.
|
||||
func (d *decoderDict) dictLen() int {
|
||||
capacity := d.buf.Cap()
|
||||
if d.head >= int64(capacity) {
|
||||
return capacity
|
||||
}
|
||||
return int(d.head)
|
||||
}
|
||||
|
||||
// byteAt returns a byte stored in the dictionary. If the distance is
|
||||
// non-positive or exceeds the current length of the dictionary the zero
|
||||
// byte is returned.
|
||||
func (d *decoderDict) byteAt(dist int) byte {
|
||||
if !(0 < dist && dist <= d.dictLen()) {
|
||||
return 0
|
||||
}
|
||||
i := d.buf.front - dist
|
||||
if i < 0 {
|
||||
i += len(d.buf.data)
|
||||
}
|
||||
return d.buf.data[i]
|
||||
}
|
||||
|
||||
// writeMatch writes the match at the top of the dictionary. The given
|
||||
// distance must point in the current dictionary and the length must not
|
||||
// exceed the maximum length 273 supported in LZMA.
|
||||
//
|
||||
// The error value ErrNoSpace indicates that no space is available in
|
||||
// the dictionary for writing. You need to read from the dictionary
|
||||
// first.
|
||||
func (d *decoderDict) writeMatch(dist int64, length int) error {
|
||||
if !(0 < dist && dist <= int64(d.dictLen())) {
|
||||
return errors.New("writeMatch: distance out of range")
|
||||
}
|
||||
if !(0 < length && length <= maxMatchLen) {
|
||||
return errors.New("writeMatch: length out of range")
|
||||
}
|
||||
if length > d.buf.Available() {
|
||||
return ErrNoSpace
|
||||
}
|
||||
d.head += int64(length)
|
||||
|
||||
i := d.buf.front - int(dist)
|
||||
if i < 0 {
|
||||
i += len(d.buf.data)
|
||||
}
|
||||
for length > 0 {
|
||||
var p []byte
|
||||
if i >= d.buf.front {
|
||||
p = d.buf.data[i:]
|
||||
i = 0
|
||||
} else {
|
||||
p = d.buf.data[i:d.buf.front]
|
||||
i = d.buf.front
|
||||
}
|
||||
if len(p) > length {
|
||||
p = p[:length]
|
||||
}
|
||||
if _, err := d.buf.Write(p); err != nil {
|
||||
panic(fmt.Errorf("d.buf.Write returned error %s", err))
|
||||
}
|
||||
length -= len(p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes the given bytes into the dictionary and advances the
|
||||
// head.
|
||||
func (d *decoderDict) Write(p []byte) (n int, err error) {
|
||||
n, err = d.buf.Write(p)
|
||||
d.head += int64(n)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Available returns the number of available bytes for writing into the
|
||||
// decoder dictionary.
|
||||
func (d *decoderDict) Available() int { return d.buf.Available() }
|
||||
|
||||
// Read reads data from the buffer contained in the decoder dictionary.
|
||||
func (d *decoderDict) Read(p []byte) (n int, err error) { return d.buf.Read(p) }
|
||||
|
||||
// Buffered returns the number of bytes currently buffered in the
|
||||
// decoder dictionary.
|
||||
func (d *decoderDict) buffered() int { return d.buf.Buffered() }
|
||||
|
||||
// Peek gets data from the buffer without advancing the rear index.
|
||||
func (d *decoderDict) peek(p []byte) (n int, err error) { return d.buf.Peek(p) }
|
49
vendor/github.com/ulikunitz/xz/lzma/directcodec.go
generated
vendored
Normal file
49
vendor/github.com/ulikunitz/xz/lzma/directcodec.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import "fmt"
|
||||
|
||||
// directCodec allows the encoding and decoding of values with a fixed number
|
||||
// of bits. The number of bits must be in the range [1,32].
|
||||
type directCodec byte
|
||||
|
||||
// makeDirectCodec creates a directCodec. The function panics if the number of
|
||||
// bits is not in the range [1,32].
|
||||
func makeDirectCodec(bits int) directCodec {
|
||||
if !(1 <= bits && bits <= 32) {
|
||||
panic(fmt.Errorf("bits=%d out of range", bits))
|
||||
}
|
||||
return directCodec(bits)
|
||||
}
|
||||
|
||||
// Bits returns the number of bits supported by this codec.
|
||||
func (dc directCodec) Bits() int {
|
||||
return int(dc)
|
||||
}
|
||||
|
||||
// Encode uses the range encoder to encode a value with the fixed number of
|
||||
// bits. The most-significant bit is encoded first.
|
||||
func (dc directCodec) Encode(e *rangeEncoder, v uint32) error {
|
||||
for i := int(dc) - 1; i >= 0; i-- {
|
||||
if err := e.DirectEncodeBit(v >> uint(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode uses the range decoder to decode a value with the given number of
|
||||
// given bits. The most-significant bit is decoded first.
|
||||
func (dc directCodec) Decode(d *rangeDecoder) (v uint32, err error) {
|
||||
for i := int(dc) - 1; i >= 0; i-- {
|
||||
x, err := d.DirectDecodeBit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
v = (v << 1) | x
|
||||
}
|
||||
return v, nil
|
||||
}
|
156
vendor/github.com/ulikunitz/xz/lzma/distcodec.go
generated
vendored
Normal file
156
vendor/github.com/ulikunitz/xz/lzma/distcodec.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
// Constants used by the distance codec.
|
||||
const (
|
||||
// minimum supported distance
|
||||
minDistance = 1
|
||||
// maximum supported distance, value is used for the eos marker.
|
||||
maxDistance = 1 << 32
|
||||
// number of the supported len states
|
||||
lenStates = 4
|
||||
// start for the position models
|
||||
startPosModel = 4
|
||||
// first index with align bits support
|
||||
endPosModel = 14
|
||||
// bits for the position slots
|
||||
posSlotBits = 6
|
||||
// number of align bits
|
||||
alignBits = 4
|
||||
// maximum position slot
|
||||
maxPosSlot = 63
|
||||
)
|
||||
|
||||
// distCodec provides encoding and decoding of distance values.
|
||||
type distCodec struct {
|
||||
posSlotCodecs [lenStates]treeCodec
|
||||
posModel [endPosModel - startPosModel]treeReverseCodec
|
||||
alignCodec treeReverseCodec
|
||||
}
|
||||
|
||||
// deepcopy initializes dc as deep copy of the source.
|
||||
func (dc *distCodec) deepcopy(src *distCodec) {
|
||||
if dc == src {
|
||||
return
|
||||
}
|
||||
for i := range dc.posSlotCodecs {
|
||||
dc.posSlotCodecs[i].deepcopy(&src.posSlotCodecs[i])
|
||||
}
|
||||
for i := range dc.posModel {
|
||||
dc.posModel[i].deepcopy(&src.posModel[i])
|
||||
}
|
||||
dc.alignCodec.deepcopy(&src.alignCodec)
|
||||
}
|
||||
|
||||
// distBits returns the number of bits required to encode dist.
|
||||
func distBits(dist uint32) int {
|
||||
if dist < startPosModel {
|
||||
return 6
|
||||
}
|
||||
// slot s > 3, dist d
|
||||
// s = 2(bits(d)-1) + bit(d, bits(d)-2)
|
||||
// s>>1 = bits(d)-1
|
||||
// bits(d) = 32-nlz32(d)
|
||||
// s>>1=31-nlz32(d)
|
||||
// n = 5 + (s>>1) = 36 - nlz32(d)
|
||||
return 36 - nlz32(dist)
|
||||
}
|
||||
|
||||
// newDistCodec creates a new distance codec.
|
||||
func (dc *distCodec) init() {
|
||||
for i := range dc.posSlotCodecs {
|
||||
dc.posSlotCodecs[i] = makeTreeCodec(posSlotBits)
|
||||
}
|
||||
for i := range dc.posModel {
|
||||
posSlot := startPosModel + i
|
||||
bits := (posSlot >> 1) - 1
|
||||
dc.posModel[i] = makeTreeReverseCodec(bits)
|
||||
}
|
||||
dc.alignCodec = makeTreeReverseCodec(alignBits)
|
||||
}
|
||||
|
||||
// lenState converts the value l to a supported lenState value.
|
||||
func lenState(l uint32) uint32 {
|
||||
if l >= lenStates {
|
||||
l = lenStates - 1
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Encode encodes the distance using the parameter l. Dist can have values from
|
||||
// the full range of uint32 values. To get the distance offset the actual match
|
||||
// distance has to be decreased by 1. A distance offset of 0xffffffff (eos)
|
||||
// indicates the end of the stream.
|
||||
func (dc *distCodec) Encode(e *rangeEncoder, dist uint32, l uint32) (err error) {
|
||||
// Compute the posSlot using nlz32
|
||||
var posSlot uint32
|
||||
var bits uint32
|
||||
if dist < startPosModel {
|
||||
posSlot = dist
|
||||
} else {
|
||||
bits = uint32(30 - nlz32(dist))
|
||||
posSlot = startPosModel - 2 + (bits << 1)
|
||||
posSlot += (dist >> uint(bits)) & 1
|
||||
}
|
||||
|
||||
if err = dc.posSlotCodecs[lenState(l)].Encode(e, posSlot); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch {
|
||||
case posSlot < startPosModel:
|
||||
return nil
|
||||
case posSlot < endPosModel:
|
||||
tc := &dc.posModel[posSlot-startPosModel]
|
||||
return tc.Encode(dist, e)
|
||||
}
|
||||
dic := directCodec(bits - alignBits)
|
||||
if err = dic.Encode(e, dist>>alignBits); err != nil {
|
||||
return
|
||||
}
|
||||
return dc.alignCodec.Encode(dist, e)
|
||||
}
|
||||
|
||||
// Decode decodes the distance offset using the parameter l. The dist value
|
||||
// 0xffffffff (eos) indicates the end of the stream. Add one to the distance
|
||||
// offset to get the actual match distance.
|
||||
func (dc *distCodec) Decode(d *rangeDecoder, l uint32) (dist uint32, err error) {
|
||||
posSlot, err := dc.posSlotCodecs[lenState(l)].Decode(d)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// posSlot equals distance
|
||||
if posSlot < startPosModel {
|
||||
return posSlot, nil
|
||||
}
|
||||
|
||||
// posSlot uses the individual models
|
||||
bits := (posSlot >> 1) - 1
|
||||
dist = (2 | (posSlot & 1)) << bits
|
||||
var u uint32
|
||||
if posSlot < endPosModel {
|
||||
tc := &dc.posModel[posSlot-startPosModel]
|
||||
if u, err = tc.Decode(d); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dist += u
|
||||
return dist, nil
|
||||
}
|
||||
|
||||
// posSlots use direct encoding and a single model for the four align
|
||||
// bits.
|
||||
dic := directCodec(bits - alignBits)
|
||||
if u, err = dic.Decode(d); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dist += u << alignBits
|
||||
if u, err = dc.alignCodec.Decode(d); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
dist += u
|
||||
return dist, nil
|
||||
}
|
268
vendor/github.com/ulikunitz/xz/lzma/encoder.go
generated
vendored
Normal file
268
vendor/github.com/ulikunitz/xz/lzma/encoder.go
generated
vendored
Normal file
@ -0,0 +1,268 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// opLenMargin provides the upper limit of the number of bytes required
|
||||
// to encode a single operation.
|
||||
const opLenMargin = 10
|
||||
|
||||
// compressFlags control the compression process.
|
||||
type compressFlags uint32
|
||||
|
||||
// Values for compressFlags.
|
||||
const (
|
||||
// all data should be compressed, even if compression is not
|
||||
// optimal.
|
||||
all compressFlags = 1 << iota
|
||||
)
|
||||
|
||||
// encoderFlags provide the flags for an encoder.
|
||||
type encoderFlags uint32
|
||||
|
||||
// Flags for the encoder.
|
||||
const (
|
||||
// eosMarker requests an EOS marker to be written.
|
||||
eosMarker encoderFlags = 1 << iota
|
||||
)
|
||||
|
||||
// Encoder compresses data buffered in the encoder dictionary and writes
|
||||
// it into a byte writer.
|
||||
type encoder struct {
|
||||
dict *encoderDict
|
||||
state *state
|
||||
re *rangeEncoder
|
||||
start int64
|
||||
// generate eos marker
|
||||
marker bool
|
||||
limit bool
|
||||
margin int
|
||||
}
|
||||
|
||||
// newEncoder creates a new encoder. If the byte writer must be
|
||||
// limited use LimitedByteWriter provided by this package. The flags
|
||||
// argument supports the eosMarker flag, controlling whether a
|
||||
// terminating end-of-stream marker must be written.
|
||||
func newEncoder(bw io.ByteWriter, state *state, dict *encoderDict,
|
||||
flags encoderFlags) (e *encoder, err error) {
|
||||
|
||||
re, err := newRangeEncoder(bw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e = &encoder{
|
||||
dict: dict,
|
||||
state: state,
|
||||
re: re,
|
||||
marker: flags&eosMarker != 0,
|
||||
start: dict.Pos(),
|
||||
margin: opLenMargin,
|
||||
}
|
||||
if e.marker {
|
||||
e.margin += 5
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// Write writes the bytes from p into the dictionary. If not enough
|
||||
// space is available the data in the dictionary buffer will be
|
||||
// compressed to make additional space available. If the limit of the
|
||||
// underlying writer has been reached ErrLimit will be returned.
|
||||
func (e *encoder) Write(p []byte) (n int, err error) {
|
||||
for {
|
||||
k, err := e.dict.Write(p[n:])
|
||||
n += k
|
||||
if err == ErrNoSpace {
|
||||
if err = e.compress(0); err != nil {
|
||||
return n, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Reopen reopens the encoder with a new byte writer.
|
||||
func (e *encoder) Reopen(bw io.ByteWriter) error {
|
||||
var err error
|
||||
if e.re, err = newRangeEncoder(bw); err != nil {
|
||||
return err
|
||||
}
|
||||
e.start = e.dict.Pos()
|
||||
e.limit = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeLiteral writes a literal into the LZMA stream
|
||||
func (e *encoder) writeLiteral(l lit) error {
|
||||
var err error
|
||||
state, state2, _ := e.state.states(e.dict.Pos())
|
||||
if err = e.state.isMatch[state2].Encode(e.re, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
litState := e.state.litState(e.dict.ByteAt(1), e.dict.Pos())
|
||||
match := e.dict.ByteAt(int(e.state.rep[0]) + 1)
|
||||
err = e.state.litCodec.Encode(e.re, l.b, state, match, litState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
e.state.updateStateLiteral()
|
||||
return nil
|
||||
}
|
||||
|
||||
// iverson implements the Iverson operator as proposed by Donald Knuth in his
|
||||
// book Concrete Mathematics.
|
||||
func iverson(ok bool) uint32 {
|
||||
if ok {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// writeMatch writes a repetition operation into the operation stream
|
||||
func (e *encoder) writeMatch(m match) error {
|
||||
var err error
|
||||
if !(minDistance <= m.distance && m.distance <= maxDistance) {
|
||||
panic(fmt.Errorf("match distance %d out of range", m.distance))
|
||||
}
|
||||
dist := uint32(m.distance - minDistance)
|
||||
if !(minMatchLen <= m.n && m.n <= maxMatchLen) &&
|
||||
!(dist == e.state.rep[0] && m.n == 1) {
|
||||
panic(fmt.Errorf(
|
||||
"match length %d out of range; dist %d rep[0] %d",
|
||||
m.n, dist, e.state.rep[0]))
|
||||
}
|
||||
state, state2, posState := e.state.states(e.dict.Pos())
|
||||
if err = e.state.isMatch[state2].Encode(e.re, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
g := 0
|
||||
for ; g < 4; g++ {
|
||||
if e.state.rep[g] == dist {
|
||||
break
|
||||
}
|
||||
}
|
||||
b := iverson(g < 4)
|
||||
if err = e.state.isRep[state].Encode(e.re, b); err != nil {
|
||||
return err
|
||||
}
|
||||
n := uint32(m.n - minMatchLen)
|
||||
if b == 0 {
|
||||
// simple match
|
||||
e.state.rep[3], e.state.rep[2], e.state.rep[1], e.state.rep[0] =
|
||||
e.state.rep[2], e.state.rep[1], e.state.rep[0], dist
|
||||
e.state.updateStateMatch()
|
||||
if err = e.state.lenCodec.Encode(e.re, n, posState); err != nil {
|
||||
return err
|
||||
}
|
||||
return e.state.distCodec.Encode(e.re, dist, n)
|
||||
}
|
||||
b = iverson(g != 0)
|
||||
if err = e.state.isRepG0[state].Encode(e.re, b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b == 0 {
|
||||
// g == 0
|
||||
b = iverson(m.n != 1)
|
||||
if err = e.state.isRepG0Long[state2].Encode(e.re, b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b == 0 {
|
||||
e.state.updateStateShortRep()
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
// g in {1,2,3}
|
||||
b = iverson(g != 1)
|
||||
if err = e.state.isRepG1[state].Encode(e.re, b); err != nil {
|
||||
return err
|
||||
}
|
||||
if b == 1 {
|
||||
// g in {2,3}
|
||||
b = iverson(g != 2)
|
||||
err = e.state.isRepG2[state].Encode(e.re, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b == 1 {
|
||||
e.state.rep[3] = e.state.rep[2]
|
||||
}
|
||||
e.state.rep[2] = e.state.rep[1]
|
||||
}
|
||||
e.state.rep[1] = e.state.rep[0]
|
||||
e.state.rep[0] = dist
|
||||
}
|
||||
e.state.updateStateRep()
|
||||
return e.state.repLenCodec.Encode(e.re, n, posState)
|
||||
}
|
||||
|
||||
// writeOp writes a single operation to the range encoder. The function
|
||||
// checks whether there is enough space available to close the LZMA
|
||||
// stream.
|
||||
func (e *encoder) writeOp(op operation) error {
|
||||
if e.re.Available() < int64(e.margin) {
|
||||
return ErrLimit
|
||||
}
|
||||
switch x := op.(type) {
|
||||
case lit:
|
||||
return e.writeLiteral(x)
|
||||
case match:
|
||||
return e.writeMatch(x)
|
||||
default:
|
||||
panic("unexpected operation")
|
||||
}
|
||||
}
|
||||
|
||||
// compress compressed data from the dictionary buffer. If the flag all
|
||||
// is set, all data in the dictionary buffer will be compressed. The
|
||||
// function returns ErrLimit if the underlying writer has reached its
|
||||
// limit.
|
||||
func (e *encoder) compress(flags compressFlags) error {
|
||||
n := 0
|
||||
if flags&all == 0 {
|
||||
n = maxMatchLen - 1
|
||||
}
|
||||
d := e.dict
|
||||
m := d.m
|
||||
for d.Buffered() > n {
|
||||
op := m.NextOp(e.state.rep)
|
||||
if err := e.writeOp(op); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Discard(op.Len())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// eosMatch is a pseudo operation that indicates the end of the stream.
|
||||
var eosMatch = match{distance: maxDistance, n: minMatchLen}
|
||||
|
||||
// Close terminates the LZMA stream. If requested the end-of-stream
|
||||
// marker will be written. If the byte writer limit has been or will be
|
||||
// reached during compression of the remaining data in the buffer the
|
||||
// LZMA stream will be closed and data will remain in the buffer.
|
||||
func (e *encoder) Close() error {
|
||||
err := e.compress(all)
|
||||
if err != nil && err != ErrLimit {
|
||||
return err
|
||||
}
|
||||
if e.marker {
|
||||
if err := e.writeMatch(eosMatch); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = e.re.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// Compressed returns the number bytes of the input data that been
|
||||
// compressed.
|
||||
func (e *encoder) Compressed() int64 {
|
||||
return e.dict.Pos() - e.start
|
||||
}
|
149
vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
generated
vendored
Normal file
149
vendor/github.com/ulikunitz/xz/lzma/encoderdict.go
generated
vendored
Normal file
@ -0,0 +1,149 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// matcher is an interface that supports the identification of the next
|
||||
// operation.
|
||||
type matcher interface {
|
||||
io.Writer
|
||||
SetDict(d *encoderDict)
|
||||
NextOp(rep [4]uint32) operation
|
||||
}
|
||||
|
||||
// encoderDict provides the dictionary of the encoder. It includes an
|
||||
// addtional buffer atop of the actual dictionary.
|
||||
type encoderDict struct {
|
||||
buf buffer
|
||||
m matcher
|
||||
head int64
|
||||
capacity int
|
||||
// preallocated array
|
||||
data [maxMatchLen]byte
|
||||
}
|
||||
|
||||
// newEncoderDict creates the encoder dictionary. The argument bufSize
|
||||
// defines the size of the additional buffer.
|
||||
func newEncoderDict(dictCap, bufSize int, m matcher) (d *encoderDict, err error) {
|
||||
if !(1 <= dictCap && int64(dictCap) <= MaxDictCap) {
|
||||
return nil, errors.New(
|
||||
"lzma: dictionary capacity out of range")
|
||||
}
|
||||
if bufSize < 1 {
|
||||
return nil, errors.New(
|
||||
"lzma: buffer size must be larger than zero")
|
||||
}
|
||||
d = &encoderDict{
|
||||
buf: *newBuffer(dictCap + bufSize),
|
||||
capacity: dictCap,
|
||||
m: m,
|
||||
}
|
||||
m.SetDict(d)
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Discard discards n bytes. Note that n must not be larger than
|
||||
// MaxMatchLen.
|
||||
func (d *encoderDict) Discard(n int) {
|
||||
p := d.data[:n]
|
||||
k, _ := d.buf.Read(p)
|
||||
if k < n {
|
||||
panic(fmt.Errorf("lzma: can't discard %d bytes", n))
|
||||
}
|
||||
d.head += int64(n)
|
||||
d.m.Write(p)
|
||||
}
|
||||
|
||||
// Len returns the data available in the encoder dictionary.
|
||||
func (d *encoderDict) Len() int {
|
||||
n := d.buf.Available()
|
||||
if int64(n) > d.head {
|
||||
return int(d.head)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// DictLen returns the actual length of data in the dictionary.
|
||||
func (d *encoderDict) DictLen() int {
|
||||
if d.head < int64(d.capacity) {
|
||||
return int(d.head)
|
||||
}
|
||||
return d.capacity
|
||||
}
|
||||
|
||||
// Available returns the number of bytes that can be written by a
|
||||
// following Write call.
|
||||
func (d *encoderDict) Available() int {
|
||||
return d.buf.Available() - d.DictLen()
|
||||
}
|
||||
|
||||
// Write writes data into the dictionary buffer. Note that the position
|
||||
// of the dictionary head will not be moved. If there is not enough
|
||||
// space in the buffer ErrNoSpace will be returned.
|
||||
func (d *encoderDict) Write(p []byte) (n int, err error) {
|
||||
m := d.Available()
|
||||
if len(p) > m {
|
||||
p = p[:m]
|
||||
err = ErrNoSpace
|
||||
}
|
||||
var e error
|
||||
if n, e = d.buf.Write(p); e != nil {
|
||||
err = e
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Pos returns the position of the head.
|
||||
func (d *encoderDict) Pos() int64 { return d.head }
|
||||
|
||||
// ByteAt returns the byte at the given distance.
|
||||
func (d *encoderDict) ByteAt(distance int) byte {
|
||||
if !(0 < distance && distance <= d.Len()) {
|
||||
return 0
|
||||
}
|
||||
i := d.buf.rear - distance
|
||||
if i < 0 {
|
||||
i += len(d.buf.data)
|
||||
}
|
||||
return d.buf.data[i]
|
||||
}
|
||||
|
||||
// CopyN copies the last n bytes from the dictionary into the provided
|
||||
// writer. This is used for copying uncompressed data into an
|
||||
// uncompressed segment.
|
||||
func (d *encoderDict) CopyN(w io.Writer, n int) (written int, err error) {
|
||||
if n <= 0 {
|
||||
return 0, nil
|
||||
}
|
||||
m := d.Len()
|
||||
if n > m {
|
||||
n = m
|
||||
err = ErrNoSpace
|
||||
}
|
||||
i := d.buf.rear - n
|
||||
var e error
|
||||
if i < 0 {
|
||||
i += len(d.buf.data)
|
||||
if written, e = w.Write(d.buf.data[i:]); e != nil {
|
||||
return written, e
|
||||
}
|
||||
i = 0
|
||||
}
|
||||
var k int
|
||||
k, e = w.Write(d.buf.data[i:d.buf.rear])
|
||||
written += k
|
||||
if e != nil {
|
||||
err = e
|
||||
}
|
||||
return written, err
|
||||
}
|
||||
|
||||
// Buffered returns the number of bytes in the buffer.
|
||||
func (d *encoderDict) Buffered() int { return d.buf.Buffered() }
|
309
vendor/github.com/ulikunitz/xz/lzma/hashtable.go
generated
vendored
Normal file
309
vendor/github.com/ulikunitz/xz/lzma/hashtable.go
generated
vendored
Normal file
@ -0,0 +1,309 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/ulikunitz/xz/internal/hash"
|
||||
)
|
||||
|
||||
/* For compression we need to find byte sequences that match the byte
|
||||
* sequence at the dictionary head. A hash table is a simple method to
|
||||
* provide this capability.
|
||||
*/
|
||||
|
||||
// maxMatches limits the number of matches requested from the Matches
|
||||
// function. This controls the speed of the overall encoding.
|
||||
const maxMatches = 16
|
||||
|
||||
// shortDists defines the number of short distances supported by the
|
||||
// implementation.
|
||||
const shortDists = 8
|
||||
|
||||
// The minimum is somehow arbitrary but the maximum is limited by the
|
||||
// memory requirements of the hash table.
|
||||
const (
|
||||
minTableExponent = 9
|
||||
maxTableExponent = 20
|
||||
)
|
||||
|
||||
// newRoller contains the function used to create an instance of the
|
||||
// hash.Roller.
|
||||
var newRoller = func(n int) hash.Roller { return hash.NewCyclicPoly(n) }
|
||||
|
||||
// hashTable stores the hash table including the rolling hash method.
|
||||
//
|
||||
// We implement chained hashing into a circular buffer. Each entry in
|
||||
// the circular buffer stores the delta distance to the next position with a
|
||||
// word that has the same hash value.
|
||||
type hashTable struct {
|
||||
dict *encoderDict
|
||||
// actual hash table
|
||||
t []int64
|
||||
// circular list data with the offset to the next word
|
||||
data []uint32
|
||||
front int
|
||||
// mask for computing the index for the hash table
|
||||
mask uint64
|
||||
// hash offset; initial value is -int64(wordLen)
|
||||
hoff int64
|
||||
// length of the hashed word
|
||||
wordLen int
|
||||
// hash roller for computing the hash values for the Write
|
||||
// method
|
||||
wr hash.Roller
|
||||
// hash roller for computing arbitrary hashes
|
||||
hr hash.Roller
|
||||
// preallocated slices
|
||||
p [maxMatches]int64
|
||||
distances [maxMatches + shortDists]int
|
||||
}
|
||||
|
||||
// hashTableExponent derives the hash table exponent from the dictionary
|
||||
// capacity.
|
||||
func hashTableExponent(n uint32) int {
|
||||
e := 30 - nlz32(n)
|
||||
switch {
|
||||
case e < minTableExponent:
|
||||
e = minTableExponent
|
||||
case e > maxTableExponent:
|
||||
e = maxTableExponent
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// newHashTable creates a new hash table for words of length wordLen
|
||||
func newHashTable(capacity int, wordLen int) (t *hashTable, err error) {
|
||||
if !(0 < capacity) {
|
||||
return nil, errors.New(
|
||||
"newHashTable: capacity must not be negative")
|
||||
}
|
||||
exp := hashTableExponent(uint32(capacity))
|
||||
if !(1 <= wordLen && wordLen <= 4) {
|
||||
return nil, errors.New("newHashTable: " +
|
||||
"argument wordLen out of range")
|
||||
}
|
||||
n := 1 << uint(exp)
|
||||
if n <= 0 {
|
||||
panic("newHashTable: exponent is too large")
|
||||
}
|
||||
t = &hashTable{
|
||||
t: make([]int64, n),
|
||||
data: make([]uint32, capacity),
|
||||
mask: (uint64(1) << uint(exp)) - 1,
|
||||
hoff: -int64(wordLen),
|
||||
wordLen: wordLen,
|
||||
wr: newRoller(wordLen),
|
||||
hr: newRoller(wordLen),
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *hashTable) SetDict(d *encoderDict) { t.dict = d }
|
||||
|
||||
// buffered returns the number of bytes that are currently hashed.
|
||||
func (t *hashTable) buffered() int {
|
||||
n := t.hoff + 1
|
||||
switch {
|
||||
case n <= 0:
|
||||
return 0
|
||||
case n >= int64(len(t.data)):
|
||||
return len(t.data)
|
||||
}
|
||||
return int(n)
|
||||
}
|
||||
|
||||
// addIndex adds n to an index ensuring that is stays inside the
|
||||
// circular buffer for the hash chain.
|
||||
func (t *hashTable) addIndex(i, n int) int {
|
||||
i += n - len(t.data)
|
||||
if i < 0 {
|
||||
i += len(t.data)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// putDelta puts the delta instance at the current front of the circular
|
||||
// chain buffer.
|
||||
func (t *hashTable) putDelta(delta uint32) {
|
||||
t.data[t.front] = delta
|
||||
t.front = t.addIndex(t.front, 1)
|
||||
}
|
||||
|
||||
// putEntry puts a new entry into the hash table. If there is already a
|
||||
// value stored it is moved into the circular chain buffer.
|
||||
func (t *hashTable) putEntry(h uint64, pos int64) {
|
||||
if pos < 0 {
|
||||
return
|
||||
}
|
||||
i := h & t.mask
|
||||
old := t.t[i] - 1
|
||||
t.t[i] = pos + 1
|
||||
var delta int64
|
||||
if old >= 0 {
|
||||
delta = pos - old
|
||||
if delta > 1<<32-1 || delta > int64(t.buffered()) {
|
||||
delta = 0
|
||||
}
|
||||
}
|
||||
t.putDelta(uint32(delta))
|
||||
}
|
||||
|
||||
// WriteByte converts a single byte into a hash and puts them into the hash
|
||||
// table.
|
||||
func (t *hashTable) WriteByte(b byte) error {
|
||||
h := t.wr.RollByte(b)
|
||||
t.hoff++
|
||||
t.putEntry(h, t.hoff)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write converts the bytes provided into hash tables and stores the
|
||||
// abbreviated offsets into the hash table. The method will never return an
|
||||
// error.
|
||||
func (t *hashTable) Write(p []byte) (n int, err error) {
|
||||
for _, b := range p {
|
||||
// WriteByte doesn't generate an error.
|
||||
t.WriteByte(b)
|
||||
}
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// getMatches the matches for a specific hash. The functions returns the
|
||||
// number of positions found.
|
||||
//
|
||||
// TODO: Make a getDistances because that we are actually interested in.
|
||||
func (t *hashTable) getMatches(h uint64, positions []int64) (n int) {
|
||||
if t.hoff < 0 || len(positions) == 0 {
|
||||
return 0
|
||||
}
|
||||
buffered := t.buffered()
|
||||
tailPos := t.hoff + 1 - int64(buffered)
|
||||
rear := t.front - buffered
|
||||
if rear >= 0 {
|
||||
rear -= len(t.data)
|
||||
}
|
||||
// get the slot for the hash
|
||||
pos := t.t[h&t.mask] - 1
|
||||
delta := pos - tailPos
|
||||
for {
|
||||
if delta < 0 {
|
||||
return n
|
||||
}
|
||||
positions[n] = tailPos + delta
|
||||
n++
|
||||
if n >= len(positions) {
|
||||
return n
|
||||
}
|
||||
i := rear + int(delta)
|
||||
if i < 0 {
|
||||
i += len(t.data)
|
||||
}
|
||||
u := t.data[i]
|
||||
if u == 0 {
|
||||
return n
|
||||
}
|
||||
delta -= int64(u)
|
||||
}
|
||||
}
|
||||
|
||||
// hash computes the rolling hash for the word stored in p. For correct
|
||||
// results its length must be equal to t.wordLen.
|
||||
func (t *hashTable) hash(p []byte) uint64 {
|
||||
var h uint64
|
||||
for _, b := range p {
|
||||
h = t.hr.RollByte(b)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// Matches fills the positions slice with potential matches. The
|
||||
// functions returns the number of positions filled into positions. The
|
||||
// byte slice p must have word length of the hash table.
|
||||
func (t *hashTable) Matches(p []byte, positions []int64) int {
|
||||
if len(p) != t.wordLen {
|
||||
panic(fmt.Errorf(
|
||||
"byte slice must have length %d", t.wordLen))
|
||||
}
|
||||
h := t.hash(p)
|
||||
return t.getMatches(h, positions)
|
||||
}
|
||||
|
||||
// NextOp identifies the next operation using the hash table.
|
||||
//
|
||||
// TODO: Use all repetitions to find matches.
|
||||
func (t *hashTable) NextOp(rep [4]uint32) operation {
|
||||
// get positions
|
||||
data := t.dict.data[:maxMatchLen]
|
||||
n, _ := t.dict.buf.Peek(data)
|
||||
data = data[:n]
|
||||
var p []int64
|
||||
if n < t.wordLen {
|
||||
p = t.p[:0]
|
||||
} else {
|
||||
p = t.p[:maxMatches]
|
||||
n = t.Matches(data[:t.wordLen], p)
|
||||
p = p[:n]
|
||||
}
|
||||
|
||||
// convert positions in potential distances
|
||||
head := t.dict.head
|
||||
dists := append(t.distances[:0], 1, 2, 3, 4, 5, 6, 7, 8)
|
||||
for _, pos := range p {
|
||||
dis := int(head - pos)
|
||||
if dis > shortDists {
|
||||
dists = append(dists, dis)
|
||||
}
|
||||
}
|
||||
|
||||
// check distances
|
||||
var m match
|
||||
dictLen := t.dict.DictLen()
|
||||
for _, dist := range dists {
|
||||
if dist > dictLen {
|
||||
continue
|
||||
}
|
||||
|
||||
// Here comes a trick. We are only interested in matches
|
||||
// that are longer than the matches we have been found
|
||||
// before. So before we test the whole byte sequence at
|
||||
// the given distance, we test the first byte that would
|
||||
// make the match longer. If it doesn't match the byte
|
||||
// to match, we don't to care any longer.
|
||||
i := t.dict.buf.rear - dist + m.n
|
||||
if i < 0 {
|
||||
i += len(t.dict.buf.data)
|
||||
}
|
||||
if t.dict.buf.data[i] != data[m.n] {
|
||||
// We can't get a longer match. Jump to the next
|
||||
// distance.
|
||||
continue
|
||||
}
|
||||
|
||||
n := t.dict.buf.matchLen(dist, data)
|
||||
switch n {
|
||||
case 0:
|
||||
continue
|
||||
case 1:
|
||||
if uint32(dist-minDistance) != rep[0] {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if n > m.n {
|
||||
m = match{int64(dist), n}
|
||||
if n == len(data) {
|
||||
// No better match will be found.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m.n == 0 {
|
||||
return lit{data[0]}
|
||||
}
|
||||
return m
|
||||
}
|
167
vendor/github.com/ulikunitz/xz/lzma/header.go
generated
vendored
Normal file
167
vendor/github.com/ulikunitz/xz/lzma/header.go
generated
vendored
Normal file
@ -0,0 +1,167 @@
|
||||
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package lzma
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// uint32LE reads an uint32 integer from a byte slice
|
||||
func uint32LE(b []byte) uint32 {
|
||||
x := uint32(b[3]) << 24
|
||||
x |= uint32(b[2]) << 16
|
||||
x |= uint32(b[1]) << 8
|
||||
x |= uint32(b[0])
|
||||
return x
|
||||
}
|
||||
|
||||
// uint64LE converts the uint64 value stored as little endian to an uint64
|
||||
// value.
|
||||
func uint64LE(b []byte) uint64 {
|
||||
x := uint64(b[7]) << 56
|
||||
x |= uint64(b[6]) << 48
|
||||
x |= uint64(b[5]) << 40
|
||||
x |= uint64(b[4]) << 32
|
||||
x |= uint64(b[3]) << 24
|
||||
x |= uint64(b[2]) << 16
|
||||
x |= uint64(b[1]) << 8
|
||||
x |= uint64(b[0])
|
||||
return x
|
||||
}
|
||||
|
||||
// putUint32LE puts an uint32 integer into a byte slice that must have at least
|
||||
// a length of 4 bytes.
|
||||
func putUint32LE(b []byte, x uint32) {
|
||||
b[0] = byte(x)
|
||||
b[1] = byte(x >> 8)
|
||||
b[2] = byte(x >> 16)
|
||||
b[3] = byte(x >> 24)
|
||||
}
|
||||
|
||||
// putUint64LE puts the uint64 value into the byte slice as little endian
|
||||
// value. The byte slice b must have at least place for 8 bytes.
|
||||
func putUint64LE(b []byte, x uint64) {
|
||||
b[0] = byte(x)
|
||||
b[1] = byte(x >> 8)
|
||||
b[2] = byte(x >> 16)
|
||||
b[3] = byte(x >> 24)
|
||||
b[4] = byte(x >> 32)
|
||||
b[5] = byte(x >> 40)
|
||||
b[6] = byte(x >> 48)
|
||||
b[7] = byte(x >> 56)
|
||||
}
|
||||
|
||||
// noHeaderSize defines the value of the length field in the LZMA header.
|
||||
const noHeaderSize uint64 = 1<<64 - 1
|
||||
|
||||
// HeaderLen provides the length of the LZMA file header.
|
||||
const HeaderLen = 13
|
||||
|
||||
// header represents the header of an LZMA file.
|
||||
type header struct {
|
||||
properties Properties
|
||||
dictCap int
|
||||
// uncompressed size; negative value if no size is given
|
||||
size int64
|
||||
}
|
||||
|
||||
// marshalBinary marshals the header.
|
||||
func (h *header) marshalBinary() (data []byte, err error) {
|
||||
if err = h.properties.verify(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !(0 <= h.dictCap && int64(h.dictCap) <= MaxDictCap) {
|
||||
return nil, fmt.Errorf("lzma: DictCap %d out of range",
|
||||
h.dictCap)
|
||||
}
|
||||
|
||||
data = make([]byte, 13)
|
||||
|
||||
// property byte
|
||||
data[0] = h.properties.Code()
|
||||
|
||||
// dictionary capacity
|
||||
putUint32LE(data[1:5], uint32(h.dictCap))
|
||||
|
||||
// uncompressed size
|
||||
var s uint64
|
||||
if h.size > 0 {
|
||||
s = uint64(h.size)
|
||||
} else {
|
||||
s = noHeaderSize
|
||||
}
|
||||
putUint64LE(data[5:], s)
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// unmarshalBinary unmarshals the header.
|
||||
func (h *header) unmarshalBinary(data []byte) error {
|
||||
if len(data) != HeaderLen {
|
||||
return errors.New("lzma.unmarshalBinary: data has wrong length")
|
||||
}
|
||||
|
||||
// properties
|
||||
var err error
|
||||
if h.properties, err = PropertiesForCode(data[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// dictionary capacity
|
||||
h.dictCap = int(uint32LE(data[1:]))
|
||||
if h.dictCap < 0 {
|
||||
return errors.New(
|
||||
"LZMA header: dictionary capacity exceeds maximum " +
|
||||
"integer")
|
||||
}
|
||||
|
||||
// uncompressed size
|
||||
s := uint64LE(data[5:])
|
||||
if s == noHeaderSize {
|
||||
h.size = -1
|
||||
} else {
|
||||
h.size = int64(s)
|
||||
if h.size < 0 {
|
||||
return errors.New(
|
||||
"LZMA header: uncompressed size " +
|
||||
"out of int64 range")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validDictCap checks whether the dictionary capacity is correct. This
|
||||
// is used to weed out wrong file headers.
|
||||
func validDictCap(dictcap int) bool {
|
||||
if int64(dictcap) == MaxDictCap {
|
||||
return true
|
||||
}
|
||||
for n := uint(10); n < 32; n++ {
|
||||
if dictcap == 1<<n {
|
||||
return true
|
||||
}
|
||||
if dictcap == 1<<n+1<<(n-1) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidHeader checks for a valid LZMA file header. It allows only
|
||||
// dictionary sizes of 2^n or 2^n+2^(n-1) with n >= 10 or 2^32-1. If
|
||||
// there is an explicit size it must not exceed 256 GiB. The length of
|
||||
// the data argument must be HeaderLen.
|
||||
func ValidHeader(data []byte) bool {
|
||||
var h header
|
||||
if err := h.unmarshalBinary(data); err != nil {
|
||||
return false
|
||||
}
|
||||
if !validDictCap(h.dictCap) {
|
||||
return false
|
||||
}
|
||||
return h.size < 0 || h.size <= 1<<38
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user