Compare commits

...

32 Commits

Author SHA1 Message Date
Antonio Murdaca
7add6fc80b version: bump v0.1.29
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2018-03-29 15:03:14 +02:00
Miloslav Trmač
eb9d74090e Merge pull request #485 from nlewo/pr/docker-archive-legacy
Add Docker legacy archive support
2018-03-28 22:38:49 +02:00
Antoine Eiche
61351d44d7 Vendor after merging https://github.com/containers/image/pull/370
Signed-off-by: Antoine Eiche <lewo@abesis.fr>
2018-03-28 18:46:26 +02:00
Miloslav Trmač
aa73bd9d0d Update for changed PutBlob API
Signed-off-by: Antoine Eiche <lewo@abesis.fr>
2018-03-28 18:46:14 +02:00
Miloslav Trmač
b08350db15 Merge pull request #477 from mtrmac/305-cleanup
Vendor mtrmac/image:305-cleanup
2018-03-15 16:17:46 +01:00
Miloslav Trmač
f63f78225d Update for types.Image.Inspect output change
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2018-03-15 15:26:00 +01:00
Miloslav Trmač
60aa4aa82d Vendor after merging mtrmac/image:305-cleanup
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2018-03-15 15:25:31 +01:00
Miloslav Trmač
37264e21fb Merge pull request #483 from lsm5/contrib-storage
add storage.conf and manpage in contrib/
2018-03-12 19:07:12 +01:00
Lokesh Mandvekar
fe2591054c add storage.conf and manpage in contrib/
These files are used by deb and rpm packages, so I'd rather have them
upstream than maintain in 2 separate places.

Signed-off-by: Lokesh Mandvekar <lsm5@fedoraproject.org>
2018-03-12 13:28:43 -04:00
Miloslav Trmač
fd0c3d7f08 Merge pull request #482 from umohnani8/gzip
Vendor in latest containers/image
2018-03-09 04:08:37 +01:00
umohnani8
b325cc22b8 Vendor in latest containers/image
Adds support to handle compressed docker-archive files

Signed-off-by: umohnani8 <umohnani@redhat.com>
2018-03-08 15:42:28 -05:00
Miloslav Trmač
5f754820da Merge pull request #479 from umohnani8/dir
Fix skopeo tests with changes to dir transport
2018-02-22 17:08:40 +01:00
umohnani8
43acc747d5 Fix skopeo tests with changes to dir transport
The dir transport has been changed to save the blobs without the .tar extension
Fixes the skopeo tests failing due to this change

Signed-off-by: umohnani8 <umohnani@redhat.com>
2018-02-22 10:50:22 -05:00
Daniel J Walsh
b3dec98757 Merge pull request #476 from jonboulle/fixbuild
Dockerfile: bump to ubuntu 17.10
2018-02-12 14:36:15 -05:00
Jonathan Boulle
b1795a08fb Dockerfile: bump to ubuntu 17.10
17.04 is EOLed and no longer works.

Signed-off-by: Jonathan Boulle <jonathanboulle@gmail.com>
2018-02-12 19:58:11 +01:00
Antonio Murdaca
1307cac0c2 Merge pull request #468 from mtrmac/oci-schema-rebase
Re-vendor, notably opencontainers/image-spec to fix tests
2018-02-09 20:16:42 +01:00
Miloslav Trmač
dc1567c8bc Re-vendor, and use mtrmac/image-spec:id-based-loader to fix tests
Anyone running (vndr) currently ends up with failing tests in OCI schema
validation because gojsonschema has fixed its "$ref" interpretation, exposing
inconsistent URI usage inside image-spec/schema.

So, this runs (vndr), and uses mtrmac/image-spec:id-based-loader
( https://github.com/opencontainers/image-spec/pull/739 ) to make the tests pass
again.  As soon as that PR is merged we should revert to using the upstream
image-spec repo again.
2018-02-09 18:34:31 +01:00
Antonio Murdaca
22c524b0e0 Merge pull request #474 from runcom/bump-0.1.28
Bump 0.1.28
2018-01-31 16:23:15 +01:00
Antonio Murdaca
9a225c3968 version: bump to v0.1.29-dev
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2018-01-31 16:01:51 +01:00
Antonio Murdaca
0270e5694c version: bump to v0.1.28
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2018-01-31 16:01:27 +01:00
Miloslav Trmač
4ff902dab9 Merge pull request #470 from giuseppe/revendor-containers-image-2
vendor: bump containers/image and containers/image
2018-01-22 16:38:19 +01:00
Giuseppe Scrivano
64b3bd28e3 vendor: bump containers/image and containers/image
Update containers/image and containers/storage to the current master
revisions.

Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
2018-01-17 15:47:07 +01:00
Miloslav Trmač
d8e506c648 Merge pull request #372 from nalind/storage-update
Bump containers/storage and containers/image
2018-01-04 16:39:23 +01:00
Nalin Dahyabhai
aa6c809e5a Bump containers/image and containers/image
Update containers/image and containers/storage to the current master
revisions.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2017-12-15 13:36:23 -05:00
Miloslav Trmač
1c27d6918f Merge pull request #466 from nalind/update-storage
Bump containers/storage and containers/image
2017-12-14 12:21:08 +01:00
Nalin Dahyabhai
9f2491694d Bump containers/storage and containers/image
Re-vendor containers/storage to current revision
0d32dfce498e06c132c60dac945081bf44c22464, and containers/image to
current revision c8bcd6aa11c62637c5a7da1420f43dd6a15f0e8d.

Signed-off-by: Nalin Dahyabhai <nalin@redhat.com>
2017-12-13 11:03:37 -05:00
Miloslav Trmač
14245f2e24 Merge pull request #461 from jonjohnsonjr/patch-1
Update README.md
2017-11-30 16:36:54 +01:00
jonjohnsonjr
8a1d480274 Update README.md
Fix OCI image spec link.
2017-11-29 14:08:38 -08:00
Miloslav Trmač
78b29a5c2f Merge pull request #460 from giuseppe/revendor-containers-image
vendor: revendor containers/image
2017-11-25 13:30:45 +01:00
Giuseppe Scrivano
20d31daec0 vendor: revendor containers/image
Include last changes in the ostree driver.

Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
2017-11-24 22:23:47 +01:00
Antonio Murdaca
5a8f212630 Merge pull request #458 from runcom/bump-v0.1.27
Bump v0.1.27
2017-11-22 02:27:18 +01:00
Antonio Murdaca
34e77f9897 version: bump to v0.1.28-dev
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
2017-11-22 01:53:39 +01:00
442 changed files with 45739 additions and 11387 deletions

View File

@@ -1,4 +1,4 @@
FROM ubuntu:17.04
FROM ubuntu:17.10
RUN apt-get update && apt-get install -y \
golang \

View File

@@ -7,7 +7,7 @@ skopeo [![Build Status](https://travis-ci.org/projectatomic/skopeo.svg?branch=ma
`skopeo` is a command line utility that performs various operations on container images and image repositories.
`skopeo` can work with (OCI images)[https://github.com/opencontainers/image-spec] as well as the original Docker v2 images.
`skopeo` can work with [OCI images](https://github.com/opencontainers/image-spec) as well as the original Docker v2 images.
Skopeo works with API V2 registries such as Docker registries, the Atomic registry, private registries, local directories and local OCI-layout directories. Skopeo does not require a daemon to be running to perform these operations which consist of:

View File

@@ -21,7 +21,7 @@ type inspectOutput struct {
Tag string `json:",omitempty"`
Digest digest.Digest
RepoTags []string
Created time.Time
Created *time.Time
DockerVersion string
Labels map[string]string
Architecture string

View File

@@ -46,7 +46,11 @@ var layersCmd = cli.Command{
}
}()
var blobDigests []digest.Digest
type blobDigest struct {
digest digest.Digest
isConfig bool
}
var blobDigests []blobDigest
for _, dString := range c.Args().Tail() {
if !strings.HasPrefix(dString, "sha256:") {
dString = "sha256:" + dString
@@ -55,7 +59,7 @@ var layersCmd = cli.Command{
if err != nil {
return err
}
blobDigests = append(blobDigests, d)
blobDigests = append(blobDigests, blobDigest{digest: d, isConfig: false})
}
if len(blobDigests) == 0 {
@@ -63,13 +67,13 @@ var layersCmd = cli.Command{
seenLayers := map[digest.Digest]struct{}{}
for _, info := range layers {
if _, ok := seenLayers[info.Digest]; !ok {
blobDigests = append(blobDigests, info.Digest)
blobDigests = append(blobDigests, blobDigest{digest: info.Digest, isConfig: false})
seenLayers[info.Digest] = struct{}{}
}
}
configInfo := src.ConfigInfo()
if configInfo.Digest != "" {
blobDigests = append(blobDigests, configInfo.Digest)
blobDigests = append(blobDigests, blobDigest{digest: configInfo.Digest, isConfig: true})
}
}
@@ -92,12 +96,12 @@ var layersCmd = cli.Command{
}
}()
for _, digest := range blobDigests {
r, blobSize, err := rawSource.GetBlob(types.BlobInfo{Digest: digest, Size: -1})
for _, bd := range blobDigests {
r, blobSize, err := rawSource.GetBlob(types.BlobInfo{Digest: bd.digest, Size: -1})
if err != nil {
return err
}
if _, err := dest.PutBlob(r, types.BlobInfo{Digest: digest, Size: blobSize}); err != nil {
if _, err := dest.PutBlob(r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, bd.isConfig); err != nil {
if closeErr := r.Close(); closeErr != nil {
return errors.Wrapf(err, " (close error: %v)", closeErr)
}

View File

@@ -0,0 +1,60 @@
% storage.conf(5) Container Storage Configuration File
% Dan Walsh
% May 2017
# NAME
storage.conf - Syntax of Container Storage configuration file
# DESCRIPTION
The STORAGE configuration file specifies all of the available container storage options
for tools using shared container storage.
# FORMAT
The [TOML format][toml] is used as the encoding of the configuration file.
Every option and subtable listed here is nested under a global "storage" table.
No bare options are used. The format of TOML can be simplified to:
[table]
option = value
[table.subtable1]
option = value
[table.subtable2]
option = value
## STORAGE TABLE
The `storage` table supports the following options:
**graphroot**=""
container storage graph dir (default: "/var/lib/containers/storage")
Default directory to store all writable content created by container storage programs.
**runroot**=""
container storage run dir (default: "/var/run/containers/storage")
Default directory to store all temporary writable content created by container storage programs.
**driver**=""
container storage driver (default is "overlay")
Default Copy On Write (COW) container storage driver.
### STORAGE OPTIONS TABLE
The `storage.options` table supports the following options:
**additionalimagestores**=[]
Paths to additional container image stores. Usually these are read-only and stored on remote network shares.
**size**=""
Maximum size of a container image. Default is 10GB. This flag can be used to set quota
on the size of container images.
**override_kernel_check**=""
Tell storage drivers to ignore kernel version checks. Some storage drivers assume that if a kernel is too
old, the driver is not supported. But for kernels that have had the drivers backported, this flag
allows users to override the checks.
# HISTORY
May 2017, Originally compiled by Dan Walsh <dwalsh@redhat.com>
Format copied from crio.conf man page created by Aleksa Sarai <asarai@suse.de>

28
contrib/storage.conf Normal file
View File

@@ -0,0 +1,28 @@
# storage.conf is the configuration file for all tools
# that share the containers/storage libraries
# See man 5 containers-storage.conf for more information
# The "container storage" table contains all of the server options.
[storage]
# Default Storage Driver
driver = "overlay"
# Temporary storage location
runroot = "/var/run/containers/storage"
# Primary read-write location of container storage
graphroot = "/var/lib/containers/storage"
[storage.options]
# AdditionalImageStores is used to pass paths to additional read-only image stores
# Must be comma separated list.
additionalimagestores = [
]
# Size is used to set a maximum size of the container image. Only supported by
# certain container storage drivers (currently overlay, zfs, vfs, btrfs)
size = ""
# OverrideKernelCheck tells the driver to ignore kernel checks based on kernel version
override_kernel_check = "true"

View File

@@ -379,7 +379,7 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
// Compression during copy
func (s *CopySuite) TestCopyCompression(c *check.C) {
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710.tar"
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
topDir, err := ioutil.TempDir("", "compression-top")
c.Assert(err, check.IsNil)
@@ -411,9 +411,7 @@ func (s *CopySuite) TestCopyCompression(c *check.C) {
fis, err := dirf.Readdir(-1)
c.Assert(err, check.IsNil)
for _, fi := range fis {
if strings.HasSuffix(fi.Name(), ".tar") {
c.Assert(fi.Size() < 2048, check.Equals, true)
}
c.Assert(fi.Size() < 2048, check.Equals, true)
}
}
}

View File

@@ -22,10 +22,20 @@ github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
# end docker deps
golang.org/x/text master
github.com/docker/distribution master
# docker/distributions dependencies
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
# end of docker/distribution dependencies
github.com/docker/libtrust master
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/opencontainers/runc master
github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/image-spec 149252121d044fddff670adcdc67f33148e16226
# -- start OCI image validation requirements.
github.com/opencontainers/runtime-spec v1.0.0
github.com/opencontainers/image-tools 6d941547fa1df31900990b3fb47ec2468c9c6469

20
vendor/github.com/beorn7/perks/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,20 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

31
vendor/github.com/beorn7/perks/README.md generated vendored Normal file
View File

@@ -0,0 +1,31 @@
# Perks for Go (golang.org)
Perks contains the Go package quantile that computes approximate quantiles over
an unbounded data stream within low memory and CPU bounds.
For more information and examples, see:
http://godoc.org/github.com/bmizerany/perks
A very special thank you and shout out to Graham Cormode (Rutgers University),
Flip Korn (AT&T LabsResearch), S. Muthukrishnan (Rutgers University), and
Divesh Srivastava (AT&T LabsResearch) for their research and publication of
[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf)
Thank you, also:
* Armon Dadgar (@armon)
* Andrew Gerrand (@nf)
* Brad Fitzpatrick (@bradfitz)
* Keith Rarick (@kr)
FAQ:
Q: Why not move the quantile package into the project root?
A: I want to add more packages to perks later.
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

292
vendor/github.com/beorn7/perks/quantile/stream.go generated vendored Normal file
View File

@@ -0,0 +1,292 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targets map[float64]float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for quantile, epsilon := range targets {
if quantile*s.n <= r {
f = (2 * epsilon * r) / quantile
} else {
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}

View File

@@ -368,6 +368,18 @@ func (ic *imageCopier) copyLayers() error {
srcInfos := ic.src.LayerInfos()
destInfos := []types.BlobInfo{}
diffIDs := []digest.Digest{}
updatedSrcInfos, err := ic.src.LayerInfosForCopy()
if err != nil {
return err
}
srcInfosUpdated := false
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
if !ic.canModifyManifest {
return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden")
}
srcInfos = updatedSrcInfos
srcInfosUpdated = true
}
for _, srcLayer := range srcInfos {
var (
destInfo types.BlobInfo
@@ -396,7 +408,7 @@ func (ic *imageCopier) copyLayers() error {
if ic.diffIDsAreNeeded {
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
}
if layerDigestsDiffer(srcInfos, destInfos) {
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
ic.manifestUpdates.LayerInfos = destInfos
}
return nil
@@ -463,7 +475,7 @@ func (c *copier) copyConfig(src types.Image) error {
if err != nil {
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
}
destInfo, err := c.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false)
destInfo, err := c.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false, true)
if err != nil {
return err
}
@@ -561,7 +573,7 @@ func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.Bl
return pipeWriter
}
}
blobInfo, err := ic.c.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success
blobInfo, err := ic.c.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false) // Sets err to nil on success
return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
}
@@ -597,7 +609,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
// and returns a complete blobInfo of the copied blob.
func (c *copier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
canCompress bool) (types.BlobInfo, error) {
canModifyBlob bool, isConfig bool) (types.BlobInfo, error) {
// The copying happens through a pipeline of connected io.Readers.
// === Input: srcStream
@@ -638,12 +650,9 @@ func (c *copier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
originalLayerReader = destStream
}
// === Compress the layer if it is uncompressed and compression is desired
// === Deal with layer compression/decompression if necessary
var inputInfo types.BlobInfo
if !canCompress || isCompressed || !c.dest.ShouldCompressLayers() {
logrus.Debugf("Using original blob without modification")
inputInfo = srcInfo
} else {
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
logrus.Debugf("Compressing blob on the fly")
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
@@ -655,6 +664,17 @@ func (c *copier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
logrus.Debugf("Blob will be decompressed")
destStream, err = decompressor(destStream)
if err != nil {
return types.BlobInfo{}, err
}
inputInfo.Digest = ""
inputInfo.Size = -1
} else {
logrus.Debugf("Using original blob without modification")
inputInfo = srcInfo
}
// === Report progress using the c.progress channel, if required.
@@ -669,7 +689,7 @@ func (c *copier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
}
// === Finally, send the layer stream to dest.
uploadedInfo, err := c.dest.PutBlob(destStream, inputInfo)
uploadedInfo, err := c.dest.PutBlob(destStream, inputInfo, isConfig)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
}

View File

@@ -46,6 +46,11 @@ func (ic *imageCopier) determineManifestConversion(destSupportedManifestMIMEType
if err != nil { // This should have been cached?!
return "", nil, errors.Wrap(err, "Error reading manifest")
}
normalizedSrcType := manifest.NormalizedMIMEType(srcType)
if srcType != normalizedSrcType {
logrus.Debugf("Source manifest MIME type %s, treating it as %s", srcType, normalizedSrcType)
srcType = normalizedSrcType
}
if forceManifestMIMEType != "" {
destSupportedManifestMIMETypes = []string{forceManifestMIMEType}

View File

@@ -12,7 +12,7 @@ import (
"github.com/sirupsen/logrus"
)
const version = "Directory Transport Version: 1.0\n"
const version = "Directory Transport Version: 1.1\n"
// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created
// using the 'dir' transport
@@ -70,7 +70,7 @@ func newImageDestination(ref dirReference, compress bool) (types.ImageDestinatio
}
}
// create version file
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0755)
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
if err != nil {
return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath())
}
@@ -98,9 +98,11 @@ func (d *dirImageDestination) SupportsSignatures() error {
return nil
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *dirImageDestination) ShouldCompressLayers() bool {
return d.compress
func (d *dirImageDestination) DesiredLayerCompression() types.LayerCompression {
if d.compress {
return types.Compress
}
return types.PreserveOriginal
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
@@ -120,7 +122,7 @@ func (d *dirImageDestination) MustMatchRuntimeOS() bool {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
if err != nil {
return types.BlobInfo{}, err

View File

@@ -52,11 +52,11 @@ func (s *dirImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, str
func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(info.Digest))
if err != nil {
return nil, 0, nil
return nil, -1, err
}
fi, err := r.Stat()
if err != nil {
return nil, 0, nil
return nil, -1, err
}
return r, fi.Size(), nil
}
@@ -82,3 +82,8 @@ func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *dige
}
return signatures, nil
}
// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified.
func (s *dirImageSource) LayerInfosForCopy() ([]types.BlobInfo, error) {
return nil, nil
}

View File

@@ -5,14 +5,13 @@ import (
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
func init() {
@@ -173,7 +172,7 @@ func (ref dirReference) manifestPath() string {
// layerPath returns a path for a layer tarball within a directory using our conventions.
func (ref dirReference) layerPath(digest digest.Digest) string {
// FIXME: Should we keep the digest identification?
return filepath.Join(ref.path, digest.Hex()+".tar")
return filepath.Join(ref.path, digest.Hex())
}
// signaturePath returns a path for a signature within a directory using our conventions.

View File

@@ -46,6 +46,11 @@ func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.
}, nil
}
// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
func (d *archiveImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.Decompress
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
func (d *archiveImageDestination) Reference() types.ImageReference {

View File

@@ -13,15 +13,18 @@ type archiveImageSource struct {
// newImageSource returns a types.ImageSource for the specified image reference.
// The caller must call .Close() on the returned ImageSource.
func newImageSource(ctx *types.SystemContext, ref archiveReference) types.ImageSource {
func newImageSource(ctx *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
if ref.destinationRef != nil {
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
}
src := tarfile.NewSource(ref.path)
src, err := tarfile.NewSourceFromFile(ref.path)
if err != nil {
return nil, err
}
return &archiveImageSource{
Source: src,
ref: ref,
}
}, nil
}
// Reference returns the reference used to set up this source, _as specified by the user_
@@ -30,7 +33,7 @@ func (s *archiveImageSource) Reference() types.ImageReference {
return s.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *archiveImageSource) Close() error {
return nil
// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
func (s *archiveImageSource) LayerInfosForCopy() ([]types.BlobInfo, error) {
return nil, nil
}

View File

@@ -131,14 +131,17 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
src := newImageSource(ctx, ref)
src, err := newImageSource(ctx, ref)
if err != nil {
return nil, err
}
return ctrImage.FromSource(ctx, src)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref archiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
return newImageSource(ctx, ref), nil
return newImageSource(ctx, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.

View File

@@ -14,6 +14,7 @@ import (
type daemonImageDestination struct {
ref daemonReference
mustMatchRuntimeOS bool
*tarfile.Destination // Implements most of types.ImageDestination
// For talking to imageLoadGoroutine
goroutineCancel context.CancelFunc
@@ -33,6 +34,11 @@ func newImageDestination(ctx *types.SystemContext, ref daemonReference) (types.I
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
var mustMatchRuntimeOS = true
if ctx != nil && ctx.DockerDaemonHost != client.DefaultDockerHost {
mustMatchRuntimeOS = false
}
c, err := newDockerClient(ctx)
if err != nil {
return nil, errors.Wrap(err, "Error initializing docker engine client")
@@ -46,12 +52,13 @@ func newImageDestination(ctx *types.SystemContext, ref daemonReference) (types.I
go imageLoadGoroutine(goroutineContext, c, reader, statusChannel)
return &daemonImageDestination{
ref: ref,
Destination: tarfile.NewDestination(writer, namedTaggedRef),
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
committed: false,
ref: ref,
mustMatchRuntimeOS: mustMatchRuntimeOS,
Destination: tarfile.NewDestination(writer, namedTaggedRef),
goroutineCancel: goroutineCancel,
statusChannel: statusChannel,
writer: writer,
committed: false,
}, nil
}
@@ -78,9 +85,14 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
defer resp.Body.Close()
}
// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved
func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.PreserveOriginal
}
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
func (d *daemonImageDestination) MustMatchRuntimeOS() bool {
return true
return d.mustMatchRuntimeOS
}
// Close removes resources associated with an initialized ImageDestination, if any.

View File

@@ -1,22 +1,15 @@
package daemon
import (
"io"
"io/ioutil"
"os"
"github.com/containers/image/docker/tarfile"
"github.com/containers/image/types"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
type daemonImageSource struct {
ref daemonReference
*tarfile.Source // Implements most of types.ImageSource
tarCopyPath string
}
type layerInfo struct {
@@ -46,29 +39,13 @@ func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageS
}
defer inputStream.Close()
// FIXME: use SystemContext here.
tarCopyFile, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-tar")
src, err := tarfile.NewSourceFromStream(inputStream)
if err != nil {
return nil, err
}
defer tarCopyFile.Close()
succeeded := false
defer func() {
if !succeeded {
os.Remove(tarCopyFile.Name())
}
}()
if _, err := io.Copy(tarCopyFile, inputStream); err != nil {
return nil, err
}
succeeded = true
return &daemonImageSource{
ref: ref,
Source: tarfile.NewSource(tarCopyFile.Name()),
tarCopyPath: tarCopyFile.Name(),
ref: ref,
Source: src,
}, nil
}
@@ -78,7 +55,7 @@ func (s *daemonImageSource) Reference() types.ImageReference {
return s.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *daemonImageSource) Close() error {
return os.Remove(s.tarCopyPath)
// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
func (s *daemonImageSource) LayerInfosForCopy() ([]types.BlobInfo, error) {
return nil, nil
}

View File

@@ -8,7 +8,10 @@ import (
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
@@ -24,10 +27,9 @@ import (
)
const (
dockerHostname = "docker.io"
dockerRegistry = "registry-1.docker.io"
systemPerHostCertDirPath = "/etc/docker/certs.d"
dockerHostname = "docker.io"
dockerV1Hostname = "index.docker.io"
dockerRegistry = "registry-1.docker.io"
resolvedPingV2URL = "%s://%s/v2/"
resolvedPingV1URL = "%s://%s/v1/_ping"
@@ -49,6 +51,7 @@ var (
ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
// ErrUnauthorizedForCredentials is returned when the status code returned is 401
ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password")
systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"}
)
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
@@ -66,9 +69,10 @@ type extensionSignatureList struct {
}
type bearerToken struct {
Token string `json:"token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
}
// dockerClient is configuration for dealing with a single Docker registry.
@@ -96,6 +100,24 @@ type authScope struct {
actions string
}
func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
token := new(bearerToken)
if err := json.Unmarshal(blob, &token); err != nil {
return nil, err
}
if token.Token == "" {
token.Token = token.AccessToken
}
if token.ExpiresIn < minimumTokenLifetimeSeconds {
token.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
}
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
return token, nil
}
// this is cloned from docker/go-connections because upstream docker has changed
// it and make deps here fails otherwise.
// We'll drop this once we upgrade to docker 1.13.x deps.
@@ -109,19 +131,42 @@ func serverDefault() *tls.Config {
}
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
func dockerCertDir(ctx *types.SystemContext, hostPort string) string {
func dockerCertDir(ctx *types.SystemContext, hostPort string) (string, error) {
if ctx != nil && ctx.DockerCertPath != "" {
return ctx.DockerCertPath
return ctx.DockerCertPath, nil
}
var hostCertDir string
if ctx != nil && ctx.DockerPerHostCertDirPath != "" {
hostCertDir = ctx.DockerPerHostCertDirPath
} else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
hostCertDir = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemPerHostCertDirPath)
} else {
hostCertDir = systemPerHostCertDirPath
return filepath.Join(ctx.DockerPerHostCertDirPath, hostPort), nil
}
return filepath.Join(hostCertDir, hostPort)
var (
hostCertDir string
fullCertDirPath string
)
for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths {
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
hostCertDir = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemPerHostCertDirPath)
} else {
hostCertDir = systemPerHostCertDirPath
}
fullCertDirPath = filepath.Join(hostCertDir, hostPort)
_, err := os.Stat(fullCertDirPath)
if err == nil {
break
}
if os.IsNotExist(err) {
continue
}
if os.IsPermission(err) {
logrus.Debugf("error accessing certs directory due to permissions: %v", err)
continue
}
if err != nil {
return "", err
}
}
return fullCertDirPath, nil
}
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
@@ -155,7 +200,10 @@ func newDockerClientWithDetails(ctx *types.SystemContext, registry, username, pa
// dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
// generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
// undocumented and may change if docker/docker changes.
certDir := dockerCertDir(ctx, hostName)
certDir, err := dockerCertDir(ctx, hostName)
if err != nil {
return nil, err
}
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil {
return nil, err
}
@@ -202,6 +250,100 @@ func CheckAuth(ctx context.Context, sCtx *types.SystemContext, username, passwor
}
}
// SearchResult holds the information of each matching image
// It matches the output returned by the v1 endpoint
type SearchResult struct {
Name string `json:"name"`
Description string `json:"description"`
// StarCount states the number of stars the image has
StarCount int `json:"star_count"`
IsTrusted bool `json:"is_trusted"`
// IsAutomated states whether the image is an automated build
IsAutomated bool `json:"is_automated"`
// IsOfficial states whether the image is an official build
IsOfficial bool `json:"is_official"`
}
// SearchRegistry queries a registry for images that contain "image" in their name
// The limit is the max number of results desired
// Note: The limit value doesn't work with all registries
// for example registry.access.redhat.com returns all the results without limiting it to the limit value
func SearchRegistry(ctx context.Context, sCtx *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) {
type V2Results struct {
// Repositories holds the results returned by the /v2/_catalog endpoint
Repositories []string `json:"repositories"`
}
type V1Results struct {
// Results holds the results returned by the /v1/search endpoint
Results []SearchResult `json:"results"`
}
v2Res := &V2Results{}
v1Res := &V1Results{}
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
if registry == dockerHostname {
registry = dockerV1Hostname
}
client, err := newDockerClientWithDetails(sCtx, registry, "", "", "", nil, "")
if err != nil {
return nil, errors.Wrapf(err, "error creating new docker client")
}
logrus.Debugf("trying to talk to v2 search endpoint\n")
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil)
if err != nil {
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Debugf("error getting search results from v2 endpoint %q, status code %q", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
return nil, err
}
searchRes := []SearchResult{}
for _, repo := range v2Res.Repositories {
if strings.Contains(repo, image) {
res := SearchResult{
Name: repo,
}
searchRes = append(searchRes, res)
}
}
return searchRes, nil
}
}
// set up the query values for the v1 endpoint
u := url.URL{
Path: "/v1/search",
}
q := u.Query()
q.Set("q", image)
q.Set("n", strconv.Itoa(limit))
u.RawQuery = q.Encode()
logrus.Debugf("trying to talk to v1 search endpoint\n")
resp, err = client.makeRequest(ctx, "GET", u.String(), nil, nil)
if err != nil {
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
} else {
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
logrus.Debugf("error getting search results from v1 endpoint %q, status code %q", registry, resp.StatusCode)
} else {
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
return nil, err
}
return v1Res.Results, nil
}
}
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
}
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
@@ -332,18 +474,8 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
if err != nil {
return nil, err
}
var token bearerToken
if err := json.Unmarshal(tokenBlob, &token); err != nil {
return nil, err
}
if token.ExpiresIn < minimumTokenLifetimeSeconds {
token.ExpiresIn = minimumTokenLifetimeSeconds
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
}
if token.IssuedAt.IsZero() {
token.IssuedAt = time.Now().UTC()
}
return &token, nil
return newBearerTokenFromJSONBlob(tokenBlob)
}
// detectProperties detects various properties of the registry.
@@ -363,7 +495,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
defer resp.Body.Close()
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
return errors.Errorf("error pinging repository, response code %d", resp.StatusCode)
return errors.Errorf("error pinging registry %s, response code %d", c.registry, resp.StatusCode)
}
c.challenges = parseAuthHeader(resp.Header)
c.scheme = scheme
@@ -415,7 +547,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, client.HandleErrorResponse(res)
return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {

View File

@@ -80,9 +80,8 @@ func (d *dockerImageDestination) SupportsSignatures() error {
}
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *dockerImageDestination) ShouldCompressLayers() bool {
return true
func (d *dockerImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.Compress
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
@@ -110,7 +109,7 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
if inputInfo.Digest.String() != "" {
haveBlob, size, err := d.HasBlob(inputInfo)
if err != nil {
@@ -131,7 +130,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
return types.BlobInfo{}, errors.Errorf("Error initiating layer upload to %s, status %d", uploadPath, res.StatusCode)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s in %s", uploadPath, d.c.registry)
}
uploadLocation, err := res.Location()
if err != nil {
@@ -167,7 +166,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
return types.BlobInfo{}, errors.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation)
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
@@ -196,7 +195,7 @@ func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, erro
return true, getBlobSize(res), nil
case http.StatusUnauthorized:
logrus.Debugf("... not authorized")
return false, -1, errors.Errorf("not authorized to read from destination repository %s", reference.Path(d.ref.ref))
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
case http.StatusNotFound:
logrus.Debugf("... not present")
return false, -1, nil
@@ -237,7 +236,7 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
}
defer res.Body.Close()
if !successStatus(res.StatusCode) {
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path)
err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest %s to %s", refTail, d.ref.ref.Name())
if isManifestInvalidError(errors.Cause(err)) {
err = types.ManifestTypeRejectedError{Err: err}
}
@@ -447,7 +446,7 @@ sigExists:
logrus.Debugf("Error body %s", string(body))
}
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
return errors.Errorf("Error uploading signature to %s, status %d", path, res.StatusCode)
return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s in %s", path, d.c.registry)
}
}

View File

@@ -52,6 +52,11 @@ func (s *dockerImageSource) Close() error {
return nil
}
// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
func (s *dockerImageSource) LayerInfosForCopy() ([]types.BlobInfo, error) {
return nil, nil
}
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
func simplifyContentType(contentType string) string {
@@ -90,7 +95,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
}
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, "", client.HandleErrorResponse(res)
return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name())
}
manblob, err := ioutil.ReadAll(res.Body)
if err != nil {

View File

@@ -8,9 +8,11 @@ import (
"io"
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/containers/image/docker/reference"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
@@ -18,41 +20,23 @@ import (
"github.com/sirupsen/logrus"
)
const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
type Destination struct {
writer io.Writer
tar *tar.Writer
repoTag string
writer io.Writer
tar *tar.Writer
reference reference.NamedTagged
// Other state.
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
config []byte
}
// NewDestination returns a tarfile.Destination for the specified io.Writer.
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
// For github.com/docker/docker consumers, this works just as well as
// refString := ref.String()
// because when reading the RepoTags strings, github.com/docker/docker/reference
// normalizes both of them to the same value.
//
// Doing it this way to include the normalized-out `docker.io[/library]` does make
// a difference for github.com/projectatomic/docker consumers, with the
// “Add --add-registry and --block-registry options to docker daemon” patch.
// These consumers treat reference strings which include a hostname and reference
// strings without a hostname differently.
//
// Using the host name here is more explicit about the intent, and it has the same
// effect as (docker pull) in projectatomic/docker, which tags the result using
// a hostname-qualified reference.
// See https://github.com/containers/image/issues/72 for a more detailed
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", ref.Name(), ref.Tag())
return &Destination{
writer: dest,
tar: tar.NewWriter(dest),
repoTag: refString,
blobs: make(map[digest.Digest]types.BlobInfo),
writer: dest,
tar: tar.NewWriter(dest),
reference: ref,
blobs: make(map[digest.Digest]types.BlobInfo),
}
}
@@ -70,11 +54,6 @@ func (d *Destination) SupportsSignatures() error {
return errors.Errorf("Storing signatures for docker tar files is not supported")
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *Destination) ShouldCompressLayers() bool {
return false
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
func (d *Destination) AcceptsForeignLayerURLs() bool {
@@ -92,29 +71,21 @@ func (d *Destination) MustMatchRuntimeOS() bool {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
if inputInfo.Digest.String() == "" {
return types.BlobInfo{}, errors.Errorf("Can not stream a blob with unknown digest to docker tarfile")
}
ok, size, err := d.HasBlob(inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
if ok {
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
}
if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob")
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob")
if err != nil {
return types.BlobInfo{}, err
}
defer os.Remove(streamCopy.Name())
defer streamCopy.Close()
size, err := io.Copy(streamCopy, stream)
digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash())
size, err := io.Copy(streamCopy, tee)
if err != nil {
return types.BlobInfo{}, err
}
@@ -123,17 +94,43 @@ func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types
return types.BlobInfo{}, err
}
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
if inputInfo.Digest == "" {
inputInfo.Digest = digester.Digest()
}
stream = streamCopy
logrus.Debugf("... streaming done")
}
digester := digest.Canonical.Digester()
tee := io.TeeReader(stream, digester.Hash())
if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
// Maybe the blob has been already sent
ok, size, err := d.HasBlob(inputInfo)
if err != nil {
return types.BlobInfo{}, err
}
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}
return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
if ok {
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
}
if isConfig {
buf, err := ioutil.ReadAll(stream)
if err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
}
d.config = buf
if err := d.sendFile(inputInfo.Digest.Hex()+".json", inputInfo.Size, bytes.NewReader(buf)); err != nil {
return types.BlobInfo{}, errors.Wrap(err, "Error writing Config file")
}
} else {
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
// writeLegacyLayerMetadata constructs layer IDs differently from inputinfo.Digest values (as described
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
// in the root of the tarball.
if err := d.sendFile(inputInfo.Digest.Hex()+".tar", inputInfo.Size, stream); err != nil {
return types.BlobInfo{}, err
}
}
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
// HasBlob returns true iff the image destination already contains a blob with
@@ -161,6 +158,19 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
return info, nil
}
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
repositories := map[string]map[string]string{
d.reference.Name(): {d.reference.Tag(): rootLayerID}}
b, err := json.Marshal(repositories)
if err != nil {
return errors.Wrap(err, "Error marshaling repositories")
}
if err := d.sendBytes(legacyRepositoriesFileName, b); err != nil {
return errors.Wrap(err, "Error writing config json file")
}
return nil
}
// PutManifest writes manifest to the destination.
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
@@ -168,7 +178,7 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
func (d *Destination) PutManifest(m []byte) error {
// We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
// so the caller trying a different manifest kind would be pointless.
var man schema2Manifest
var man manifest.Schema2
if err := json.Unmarshal(m, &man); err != nil {
return errors.Wrap(err, "Error parsing manifest")
}
@@ -176,14 +186,37 @@ func (d *Destination) PutManifest(m []byte) error {
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
}
layerPaths := []string{}
for _, l := range man.Layers {
layerPaths = append(layerPaths, l.Digest.String())
layerPaths, lastLayerID, err := d.writeLegacyLayerMetadata(man.LayersDescriptors)
if err != nil {
return err
}
if len(man.LayersDescriptors) > 0 {
if err := d.createRepositoriesFile(lastLayerID); err != nil {
return err
}
}
// For github.com/docker/docker consumers, this works just as well as
// refString := ref.String()
// because when reading the RepoTags strings, github.com/docker/docker/reference
// normalizes both of them to the same value.
//
// Doing it this way to include the normalized-out `docker.io[/library]` does make
// a difference for github.com/projectatomic/docker consumers, with the
// “Add --add-registry and --block-registry options to docker daemon” patch.
// These consumers treat reference strings which include a hostname and reference
// strings without a hostname differently.
//
// Using the host name here is more explicit about the intent, and it has the same
// effect as (docker pull) in projectatomic/docker, which tags the result using
// a hostname-qualified reference.
// See https://github.com/containers/image/issues/72 for a more detailed
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", d.reference.Name(), d.reference.Tag())
items := []ManifestItem{{
Config: man.Config.Digest.String(),
RepoTags: []string{d.repoTag},
Config: man.ConfigDescriptor.Digest.Hex() + ".json",
RepoTags: []string{refString},
Layers: layerPaths,
Parent: "",
LayerSources: nil,
@@ -194,12 +227,81 @@ func (d *Destination) PutManifest(m []byte) error {
}
// FIXME? Do we also need to support the legacy format?
return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes))
return d.sendBytes(manifestFileName, itemsBytes)
}
// writeLegacyLayerMetadata writes legacy VERSION and configuration files for all layers
func (d *Destination) writeLegacyLayerMetadata(layerDescriptors []manifest.Schema2Descriptor) (layerPaths []string, lastLayerID string, err error) {
var chainID digest.Digest
lastLayerID = ""
for i, l := range layerDescriptors {
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
if chainID == "" {
chainID = l.Digest
} else {
chainID = digest.Canonical.FromString(chainID.String() + " " + l.Digest.String())
}
// … but note that this image ID does not match docker/docker/image/v1.CreateID. At least recent
// versions allocate new IDs on load, as long as the IDs we use are unique / cannot loop.
//
// Overall, the goal of computing a digest dependent on the full history is to avoid reusing an image ID
// (and possibly creating a loop in the "parent" links) if a layer with the same DiffID appears two or more
// times in layersDescriptors. The ChainID values are sufficient for this, the v1.CreateID computation
// which also mixes in the full image configuration seems unnecessary, at least as long as we are storing
// only a single image per tarball, i.e. all DiffID prefixes are unique (cant differ only with
// configuration).
layerID := chainID.Hex()
physicalLayerPath := l.Digest.Hex() + ".tar"
// The layer itself has been stored into physicalLayerPath in PutManifest.
// So, use that path for layerPaths used in the non-legacy manifest
layerPaths = append(layerPaths, physicalLayerPath)
// ... and create a symlink for the legacy format;
if err := d.sendSymlink(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
return nil, "", errors.Wrap(err, "Error creating layer symbolic link")
}
b := []byte("1.0")
if err := d.sendBytes(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
return nil, "", errors.Wrap(err, "Error writing VERSION file")
}
// The legacy format requires a config file per layer
layerConfig := make(map[string]interface{})
layerConfig["id"] = layerID
// The root layer doesn't have any parent
if lastLayerID != "" {
layerConfig["parent"] = lastLayerID
}
// The root layer configuration file is generated by using subpart of the image configuration
if i == len(layerDescriptors)-1 {
var config map[string]*json.RawMessage
err := json.Unmarshal(d.config, &config)
if err != nil {
return nil, "", errors.Wrap(err, "Error unmarshaling config")
}
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
layerConfig[attr] = config[attr]
}
}
b, err := json.Marshal(layerConfig)
if err != nil {
return nil, "", errors.Wrap(err, "Error marshaling layer config")
}
if err := d.sendBytes(filepath.Join(layerID, legacyConfigFileName), b); err != nil {
return nil, "", errors.Wrap(err, "Error writing config json file")
}
lastLayerID = layerID
}
return layerPaths, lastLayerID, nil
}
type tarFI struct {
path string
size int64
path string
size int64
isSymlink bool
}
func (t *tarFI) Name() string {
@@ -209,6 +311,9 @@ func (t *tarFI) Size() int64 {
return t.size
}
func (t *tarFI) Mode() os.FileMode {
if t.isSymlink {
return os.ModeSymlink
}
return 0444
}
func (t *tarFI) ModTime() time.Time {
@@ -221,6 +326,21 @@ func (t *tarFI) Sys() interface{} {
return nil
}
// sendSymlink sends a symlink into the tar stream.
func (d *Destination) sendSymlink(path string, target string) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: 0, isSymlink: true}, target)
if err != nil {
return nil
}
logrus.Debugf("Sending as tar link %s -> %s", path, target)
return d.tar.WriteHeader(hdr)
}
// sendBytes sends a path into the tar stream.
func (d *Destination) sendBytes(path string, b []byte) error {
return d.sendFile(path, int64(len(b)), bytes.NewReader(b))
}
// sendFile sends a file into the tar stream.
func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")

View File

@@ -3,6 +3,7 @@ package tarfile
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"encoding/json"
"io"
@@ -10,6 +11,7 @@ import (
"os"
"path"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/compression"
"github.com/containers/image/types"
@@ -19,13 +21,14 @@ import (
// Source is a partial implementation of types.ImageSource for reading from tarPath.
type Source struct {
tarPath string
tarPath string
removeTarPathOnClose bool // Remove temp file on close if true
// The following data is only available after ensureCachedDataIsPresent() succeeds
tarManifest *ManifestItem // nil if not available yet.
configBytes []byte
configDigest digest.Digest
orderedDiffIDList []diffID
knownLayers map[diffID]*layerInfo
orderedDiffIDList []digest.Digest
knownLayers map[digest.Digest]*layerInfo
// Other state
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
}
@@ -35,14 +38,58 @@ type layerInfo struct {
size int64
}
// NewSource returns a tarfile.Source for the specified path.
func NewSource(path string) *Source {
// TODO: We could add support for multiple images in a single archive, so
// that people could use docker-archive:opensuse.tar:opensuse:leap as
// the source of an image.
return &Source{
tarPath: path,
// TODO: We could add support for multiple images in a single archive, so
// that people could use docker-archive:opensuse.tar:opensuse:leap as
// the source of an image.
// To do for both the NewSourceFromFile and NewSourceFromStream functions
// NewSourceFromFile returns a tarfile.Source for the specified path
// NewSourceFromFile supports both conpressed and uncompressed input
func NewSourceFromFile(path string) (*Source, error) {
file, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "error opening file %q", path)
}
defer file.Close()
reader, err := gzip.NewReader(file)
if err != nil {
return &Source{
tarPath: path,
}, nil
}
defer reader.Close()
return NewSourceFromStream(reader)
}
// NewSourceFromStream returns a tarfile.Source for the specified inputStream, which must be uncompressed.
// The caller can close the inputStream immediately after NewSourceFromFile returns.
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
// FIXME: use SystemContext here.
// Save inputStream to a temporary file
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar")
if err != nil {
return nil, errors.Wrap(err, "error creating temporary file")
}
defer tarCopyFile.Close()
succeeded := false
defer func() {
if !succeeded {
os.Remove(tarCopyFile.Name())
}
}()
if _, err := io.Copy(tarCopyFile, inputStream); err != nil {
return nil, errors.Wrapf(err, "error copying contents to temporary file %q", tarCopyFile.Name())
}
succeeded = true
return &Source{
tarPath: tarCopyFile.Name(),
removeTarPathOnClose: true,
}, nil
}
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
@@ -156,7 +203,7 @@ func (s *Source) ensureCachedDataIsPresent() error {
if err != nil {
return err
}
var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs.
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
}
@@ -189,17 +236,25 @@ func (s *Source) loadTarManifest() ([]ManifestItem, error) {
return items, nil
}
// Close removes resources associated with an initialized Source, if any.
func (s *Source) Close() error {
if s.removeTarPathOnClose {
return os.Remove(s.tarPath)
}
return nil
}
// LoadTarManifest loads and decodes the manifest.json
func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
return s.loadTarManifest()
}
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) {
func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
// Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
}
knownLayers := map[diffID]*layerInfo{}
knownLayers := map[digest.Digest]*layerInfo{}
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
for i, diffID := range parsedConfig.RootFS.DiffIDs {
if _, ok := knownLayers[diffID]; ok {
@@ -260,23 +315,23 @@ func (s *Source) GetManifest(instanceDigest *digest.Digest) ([]byte, string, err
if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, "", err
}
m := schema2Manifest{
m := manifest.Schema2{
SchemaVersion: 2,
MediaType: manifest.DockerV2Schema2MediaType,
Config: distributionDescriptor{
ConfigDescriptor: manifest.Schema2Descriptor{
MediaType: manifest.DockerV2Schema2ConfigMediaType,
Size: int64(len(s.configBytes)),
Digest: s.configDigest,
},
Layers: []distributionDescriptor{},
LayersDescriptors: []manifest.Schema2Descriptor{},
}
for _, diffID := range s.orderedDiffIDList {
li, ok := s.knownLayers[diffID]
if !ok {
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
}
m.Layers = append(m.Layers, distributionDescriptor{
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
Digest: diffID, // diffID is a digest of the uncompressed tarball
MediaType: manifest.DockerV2Schema2LayerMediaType,
Size: li.size,
})
@@ -312,7 +367,7 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
}
if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
stream, err := s.openTarComponent(li.path)
if err != nil {
return nil, 0, err

View File

@@ -1,16 +1,19 @@
package tarfile
import "github.com/opencontainers/go-digest"
import (
"github.com/containers/image/manifest"
"github.com/opencontainers/go-digest"
)
// Various data structures.
// Based on github.com/docker/docker/image/tarexport/tarexport.go
const (
manifestFileName = "manifest.json"
// legacyLayerFileName = "layer.tar"
// legacyConfigFileName = "json"
// legacyVersionFileName = "VERSION"
// legacyRepositoriesFileName = "repositories"
manifestFileName = "manifest.json"
legacyLayerFileName = "layer.tar"
legacyConfigFileName = "json"
legacyVersionFileName = "VERSION"
legacyRepositoriesFileName = "repositories"
)
// ManifestItem is an element of the array stored in the top-level manifest.json file.
@@ -18,37 +21,8 @@ type ManifestItem struct {
Config string
RepoTags []string
Layers []string
Parent imageID `json:",omitempty"`
LayerSources map[diffID]distributionDescriptor `json:",omitempty"`
Parent imageID `json:",omitempty"`
LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"`
}
type imageID string
type diffID digest.Digest
// Based on github.com/docker/distribution/blobs.go
type distributionDescriptor struct {
MediaType string `json:"mediaType,omitempty"`
Size int64 `json:"size,omitempty"`
Digest digest.Digest `json:"digest,omitempty"`
URLs []string `json:"urls,omitempty"`
}
// Based on github.com/docker/distribution/manifest/schema2/manifest.go
// FIXME: We are repeating this all over the place; make a public copy?
type schema2Manifest struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType,omitempty"`
Config distributionDescriptor `json:"config"`
Layers []distributionDescriptor `json:"layers"`
}
// Based on github.com/docker/docker/image/image.go
// MOST CONTENT OMITTED AS UNNECESSARY
type image struct {
RootFS *rootFS `json:"rootfs,omitempty"`
}
type rootFS struct {
Type string `json:"type"`
DiffIDs []diffID `json:"diff_ids,omitempty"`
}

View File

@@ -22,7 +22,7 @@ type platformSpec struct {
// A manifestDescriptor references a platform-specific manifest.
type manifestDescriptor struct {
descriptor
manifest.Schema2Descriptor
Platform platformSpec `json:"platform"`
}

View File

@@ -2,9 +2,6 @@ package image
import (
"encoding/json"
"regexp"
"strings"
"time"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
@@ -14,87 +11,25 @@ import (
"github.com/pkg/errors"
)
var (
validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
)
type fsLayersSchema1 struct {
BlobSum digest.Digest `json:"blobSum"`
}
type historySchema1 struct {
V1Compatibility string `json:"v1Compatibility"`
}
// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field.
type v1Compatibility struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created time.Time `json:"created"`
ContainerConfig struct {
Cmd []string
} `json:"container_config,omitempty"`
Author string `json:"author,omitempty"`
ThrowAway bool `json:"throwaway,omitempty"`
}
type manifestSchema1 struct {
Name string `json:"name"`
Tag string `json:"tag"`
Architecture string `json:"architecture"`
FSLayers []fsLayersSchema1 `json:"fsLayers"`
History []historySchema1 `json:"history"`
SchemaVersion int `json:"schemaVersion"`
m *manifest.Schema1
}
func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
mschema1 := &manifestSchema1{}
if err := json.Unmarshal(manifest, mschema1); err != nil {
return nil, err
}
if mschema1.SchemaVersion != 1 {
return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
}
if len(mschema1.FSLayers) != len(mschema1.History) {
return nil, errors.New("length of history not equal to number of layers")
}
if len(mschema1.FSLayers) == 0 {
return nil, errors.New("no FSLayers in manifest")
}
if err := fixManifestLayers(mschema1); err != nil {
return nil, err
}
return mschema1, nil
}
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest {
var name, tag string
if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
name = reference.Path(ref)
if tagged, ok := ref.(reference.NamedTagged); ok {
tag = tagged.Tag()
}
}
return &manifestSchema1{
Name: name,
Tag: tag,
Architecture: architecture,
FSLayers: fsLayers,
History: history,
SchemaVersion: 1,
}
}
func (m *manifestSchema1) serialize() ([]byte, error) {
// docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
unsigned, err := json.Marshal(*m)
func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) {
m, err := manifest.Schema1FromManifest(manifestBlob)
if err != nil {
return nil, err
}
return manifest.AddDummyV2S1Signature(unsigned)
return &manifestSchema1{m: m}, nil
}
// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest {
return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)}
}
func (m *manifestSchema1) serialize() ([]byte, error) {
return m.m.Serialize()
}
func (m *manifestSchema1) manifestMIMEType() string {
@@ -104,7 +39,7 @@ func (m *manifestSchema1) manifestMIMEType() string {
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
return types.BlobInfo{}
return m.m.ConfigInfo()
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
@@ -128,11 +63,7 @@ func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
layers := make([]types.BlobInfo, len(m.FSLayers))
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
}
return layers
return m.m.LayerInfos()
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
@@ -153,56 +84,36 @@ func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named)
} else {
tag = ""
}
return m.Name != name || m.Tag != tag
return m.m.Name != name || m.m.Tag != tag
}
func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) {
v1 := &v1Image{}
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil {
return nil, err
}
i := &types.ImageInspectInfo{
Tag: m.Tag,
DockerVersion: v1.DockerVersion,
Created: v1.Created,
Architecture: v1.Architecture,
Os: v1.OS,
}
if v1.Config != nil {
i.Labels = v1.Config.Labels
}
return i, nil
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *manifestSchema1) Inspect() (*types.ImageInspectInfo, error) {
return m.m.Inspect(nil)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
// (most importantly it forces us to download the full layers even if they are already present at the destination).
func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
return options.ManifestMIMEType == manifest.DockerV2Schema2MediaType
return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest)
}
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
copy := *m
copy := manifestSchema1{m: manifest.Schema1Clone(m.m)}
if options.LayerInfos != nil {
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
if len(copy.FSLayers) != len(options.LayerInfos) {
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
}
for i, info := range options.LayerInfos {
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err
}
}
if options.EmbeddedDockerReference != nil {
copy.Name = reference.Path(options.EmbeddedDockerReference)
copy.m.Name = reference.Path(options.EmbeddedDockerReference)
if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged {
copy.Tag = tagged.Tag()
copy.m.Tag = tagged.Tag()
} else {
copy.Tag = ""
copy.m.Tag = ""
}
}
@@ -234,102 +145,32 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
return memoryImageFromManifest(&copy), nil
}
// fixManifestLayers, after validating the supplied manifest
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
// both from manifest.History and manifest.FSLayers).
// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries
// (for Dockerfile operations which change the configuration but not the filesystem).
func fixManifestLayers(manifest *manifestSchema1) error {
type imageV1 struct {
ID string
Parent string
}
// Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History)
imgs := make([]*imageV1, len(manifest.FSLayers))
for i := range manifest.FSLayers {
img := &imageV1{}
if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil {
return err
}
imgs[i] = img
if err := validateV1ID(img.ID); err != nil {
return err
}
}
if imgs[len(imgs)-1].Parent != "" {
return errors.New("Invalid parent ID in the base layer of the image")
}
// check general duplicates to error instead of a deadlock
idmap := make(map[string]struct{})
var lastID string
for _, img := range imgs {
// skip IDs that appear after each other, we handle those later
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
}
lastID = img.ID
idmap[lastID] = struct{}{}
}
// backwards loop so that we keep the remaining indexes after removing items
for i := len(imgs) - 2; i >= 0; i-- {
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
} else if imgs[i].Parent != imgs[i+1].ID {
return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
}
}
return nil
}
func validateV1ID(id string) error {
if ok := validHex.MatchString(id); !ok {
return errors.Errorf("image ID %q is invalid", id)
}
return nil
}
// Based on github.com/docker/docker/distribution/pull_v2.go
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) {
if len(m.History) == 0 {
if len(m.m.History) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
}
if len(m.History) != len(m.FSLayers) {
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
if len(m.m.History) != len(m.m.FSLayers) {
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers))
}
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) {
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) {
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers))
}
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) {
return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) {
return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers))
}
rootFS := rootFS{
Type: "layers",
DiffIDs: []digest.Digest{},
BaseLayer: "",
}
var layers []descriptor
history := make([]imageHistory, len(m.History))
for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- {
v2Index := (len(m.History) - 1) - v1Index
// Build a list of the diffIDs for the non-empty layers.
diffIDs := []digest.Digest{}
var layers []manifest.Schema2Descriptor
for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- {
v2Index := (len(m.m.History) - 1) - v1Index
var v1compat v1Compatibility
if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil {
var v1compat manifest.Schema1V1Compatibility
if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil {
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
}
history[v2Index] = imageHistory{
Created: v1compat.Created,
Author: v1compat.Author,
CreatedBy: strings.Join(v1compat.ContainerConfig.Cmd, " "),
Comment: v1compat.Comment,
EmptyLayer: v1compat.ThrowAway,
}
if !v1compat.ThrowAway {
var size int64
if uploadedLayerInfos != nil {
@@ -339,19 +180,19 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
if layerDiffIDs != nil {
d = layerDiffIDs[v2Index]
}
layers = append(layers, descriptor{
layers = append(layers, manifest.Schema2Descriptor{
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
Size: size,
Digest: m.FSLayers[v1Index].BlobSum,
Digest: m.m.FSLayers[v1Index].BlobSum,
})
rootFS.DiffIDs = append(rootFS.DiffIDs, d)
diffIDs = append(diffIDs, d)
}
}
configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history)
configJSON, err := m.m.ToSchema2Config(diffIDs)
if err != nil {
return nil, err
}
configDescriptor := descriptor{
configDescriptor := manifest.Schema2Descriptor{
MediaType: "application/vnd.docker.container.image.v1+json",
Size: int64(len(configJSON)),
Digest: digest.FromBytes(configJSON),
@@ -359,33 +200,3 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil
}
func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) {
// github.com/docker/docker/image/v1/imagev1.go:MakeConfigFromV1Config unmarshals and re-marshals the input if docker_version is < 1.8.3 to remove blank fields;
// we don't do that here. FIXME? Should we? AFAICT it would only affect the digest value of the schema2 manifest, and we don't particularly need that to be
// a consistently reproducible value.
// Preserve everything we don't specifically know about.
// (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
rawContents := map[string]*json.RawMessage{}
if err := json.Unmarshal(v1ConfigJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
return nil, err
}
delete(rawContents, "id")
delete(rawContents, "parent")
delete(rawContents, "Size")
delete(rawContents, "parent_id")
delete(rawContents, "layer_id")
delete(rawContents, "throwaway")
updates := map[string]interface{}{"rootfs": rootFS, "history": history}
for field, value := range updates {
encoded, err := json.Marshal(value)
if err != nil {
return nil, err
}
rawContents[field] = (*json.RawMessage)(&encoded)
}
return json.Marshal(rawContents)
}

View File

@@ -29,54 +29,44 @@ var gzippedEmptyLayer = []byte{
// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
type descriptor struct {
MediaType string `json:"mediaType"`
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
URLs []string `json:"urls,omitempty"`
}
type manifestSchema2 struct {
src types.ImageSource // May be nil if configBlob is not nil
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
ConfigDescriptor descriptor `json:"config"`
LayersDescriptors []descriptor `json:"layers"`
src types.ImageSource // May be nil if configBlob is not nil
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
m *manifest.Schema2
}
func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
v2s2 := manifestSchema2{src: src}
if err := json.Unmarshal(manifest, &v2s2); err != nil {
func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
m, err := manifest.Schema2FromManifest(manifestBlob)
if err != nil {
return nil, err
}
return &v2s2, nil
return &manifestSchema2{
src: src,
m: m,
}, nil
}
// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest {
func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest {
return &manifestSchema2{
src: src,
configBlob: configBlob,
SchemaVersion: 2,
MediaType: manifest.DockerV2Schema2MediaType,
ConfigDescriptor: config,
LayersDescriptors: layers,
src: src,
configBlob: configBlob,
m: manifest.Schema2FromComponents(config, layers),
}
}
func (m *manifestSchema2) serialize() ([]byte, error) {
return json.Marshal(*m)
return m.m.Serialize()
}
func (m *manifestSchema2) manifestMIMEType() string {
return m.MediaType
return m.m.MediaType
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
return m.m.ConfigInfo()
}
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
@@ -105,9 +95,9 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
}
stream, _, err := m.src.GetBlob(types.BlobInfo{
Digest: m.ConfigDescriptor.Digest,
Size: m.ConfigDescriptor.Size,
URLs: m.ConfigDescriptor.URLs,
Digest: m.m.ConfigDescriptor.Digest,
Size: m.m.ConfigDescriptor.Size,
URLs: m.m.ConfigDescriptor.URLs,
})
if err != nil {
return nil, err
@@ -118,8 +108,8 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.ConfigDescriptor.Digest {
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
if computedDigest != m.m.ConfigDescriptor.Digest {
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest)
}
m.configBlob = blob
}
@@ -130,15 +120,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
blobs := []types.BlobInfo{}
for _, layer := range m.LayersDescriptors {
blobs = append(blobs, types.BlobInfo{
Digest: layer.Digest,
Size: layer.Size,
URLs: layer.URLs,
})
}
return blobs
return m.m.LayerInfos()
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
@@ -148,25 +130,20 @@ func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named)
return false
}
func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) {
config, err := m.ConfigBlob()
if err != nil {
return nil, err
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *manifestSchema2) Inspect() (*types.ImageInspectInfo, error) {
getter := func(info types.BlobInfo) ([]byte, error) {
if info.Digest != m.ConfigInfo().Digest {
// Shouldn't ever happen
return nil, errors.New("asked for a different config blob")
}
config, err := m.ConfigBlob()
if err != nil {
return nil, err
}
return config, nil
}
v1 := &v1Image{}
if err := json.Unmarshal(config, v1); err != nil {
return nil, err
}
i := &types.ImageInspectInfo{
DockerVersion: v1.DockerVersion,
Created: v1.Created,
Architecture: v1.Architecture,
Os: v1.OS,
}
if v1.Config != nil {
i.Labels = v1.Config.Labels
}
return i, nil
return m.m.Inspect(getter)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
@@ -179,17 +156,14 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc.
src: m.src,
configBlob: m.configBlob,
m: manifest.Schema2Clone(m.m),
}
if options.LayerInfos != nil {
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
}
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
for i, info := range options.LayerInfos {
copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
copy.LayersDescriptors[i].Digest = info.Digest
copy.LayersDescriptors[i].Size = info.Size
copy.LayersDescriptors[i].URLs = info.URLs
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
@@ -207,6 +181,15 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
return memoryImageFromManifest(&copy), nil
}
func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor {
return imgspecv1.Descriptor{
MediaType: d.MediaType,
Size: d.Size,
Digest: d.Digest,
URLs: d.URLs,
}
}
func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
configOCI, err := m.OCIConfig()
if err != nil {
@@ -217,18 +200,16 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
return nil, err
}
config := descriptorOCI1{
descriptor: descriptor{
MediaType: imgspecv1.MediaTypeImageConfig,
Size: int64(len(configOCIBytes)),
Digest: digest.FromBytes(configOCIBytes),
},
config := imgspecv1.Descriptor{
MediaType: imgspecv1.MediaTypeImageConfig,
Size: int64(len(configOCIBytes)),
Digest: digest.FromBytes(configOCIBytes),
}
layers := make([]descriptorOCI1, len(m.LayersDescriptors))
layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors))
for idx := range layers {
layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]}
if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx])
if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
} else {
// we assume layers are gzip'ed because docker v2s2 only deals with
@@ -247,14 +228,14 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
if err != nil {
return nil, err
}
imageConfig := &image{}
imageConfig := &manifest.Schema2Image{}
if err := json.Unmarshal(configBytes, imageConfig); err != nil {
return nil, err
}
// Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
fsLayers := make([]fsLayersSchema1, len(imageConfig.History))
history := make([]historySchema1, len(imageConfig.History))
fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History))
history := make([]manifest.Schema1History, len(imageConfig.History))
nonemptyLayerIndex := 0
var parentV1ID string // Set in the loop
v1ID := ""
@@ -271,7 +252,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
if historyEntry.EmptyLayer {
if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1")
info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))})
info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))}, false)
if err != nil {
return nil, errors.Wrap(err, "Error uploading empty layer")
}
@@ -282,10 +263,10 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
}
blobDigest = gzippedEmptyLayerDigest
} else {
if nonemptyLayerIndex >= len(m.LayersDescriptors) {
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
if nonemptyLayerIndex >= len(m.m.LayersDescriptors) {
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors))
}
blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest
blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest
nonemptyLayerIndex++
}
@@ -296,7 +277,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
}
v1ID = v
fakeImage := v1Compatibility{
fakeImage := manifest.Schema1V1Compatibility{
ID: v1ID,
Parent: parentV1ID,
Comment: historyEntry.Comment,
@@ -310,8 +291,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
}
fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest}
history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)}
fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest}
history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)}
// Note that parentV1ID of the top layer is preserved when exiting this loop
}

View File

@@ -1,57 +1,14 @@
package image
import (
"time"
"fmt"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest"
"github.com/containers/image/pkg/strslice"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
type config struct {
Cmd strslice.StrSlice
Labels map[string]string
}
type v1Image struct {
ID string `json:"id,omitempty"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created time.Time `json:"created"`
ContainerConfig *config `json:"container_config,omitempty"`
DockerVersion string `json:"docker_version,omitempty"`
Author string `json:"author,omitempty"`
// Config is the configuration of the container received from the client
Config *config `json:"config,omitempty"`
// Architecture is the hardware that the image is build and runs on
Architecture string `json:"architecture,omitempty"`
// OS is the operating system used to build and run the image
OS string `json:"os,omitempty"`
}
type image struct {
v1Image
History []imageHistory `json:"history,omitempty"`
RootFS *rootFS `json:"rootfs,omitempty"`
}
type imageHistory struct {
Created time.Time `json:"created"`
Author string `json:"author,omitempty"`
CreatedBy string `json:"created_by,omitempty"`
Comment string `json:"comment,omitempty"`
EmptyLayer bool `json:"empty_layer,omitempty"`
}
type rootFS struct {
Type string `json:"type"`
DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
BaseLayer string `json:"base_layer,omitempty"`
}
// genericManifest is an interface for parsing, modifying image manifests and related data.
// Note that the public methods are intended to be a subset of types.Image
// so that embedding a genericManifest into structs works.
@@ -77,7 +34,8 @@ type genericManifest interface {
// It returns false if the manifest does not embed a Docker reference.
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
EmbeddedDockerReferenceConflicts(ref reference.Named) bool
imageInspectInfo() (*types.ImageInspectInfo, error) // To be called by inspectManifest
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
Inspect() (*types.ImageInspectInfo, error)
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
// (most importantly it forces us to download the full layers even if they are already present at the destination).
@@ -90,11 +48,8 @@ type genericManifest interface {
// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
// If manblob is a manifest list, it implicitly chooses an appropriate image from the list.
func manifestInstanceFromBlob(ctx *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
switch mt {
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
// need to happen within the ImageSource.
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json":
switch manifest.NormalizedMIMEType(mt) {
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
return manifestSchema1FromManifest(manblob)
case imgspecv1.MediaTypeImageManifest:
return manifestOCI1FromManifest(src, manblob)
@@ -102,30 +57,7 @@ func manifestInstanceFromBlob(ctx *types.SystemContext, src types.ImageSource, m
return manifestSchema2FromManifest(src, manblob)
case manifest.DockerV2ListMediaType:
return manifestSchema2FromManifestList(ctx, src, manblob)
default:
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
//
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
return manifestSchema1FromManifest(manblob)
default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
}
}
// inspectManifest is an implementation of types.Image.Inspect
func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) {
info, err := m.imageInspectInfo()
if err != nil {
return nil, err
}
layers := m.LayerInfos()
info.Layers = make([]string, len(layers))
for i, layer := range layers {
info.Layers[i] = layer.Digest.String()
}
return info, nil
}

View File

@@ -57,7 +57,9 @@ func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory")
}
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) {
return inspectManifest(i.genericManifest)
// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest.
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (i *memoryImage) LayerInfosForCopy() ([]types.BlobInfo, error) {
return nil, nil
}

View File

@@ -12,41 +12,34 @@ import (
"github.com/pkg/errors"
)
type descriptorOCI1 struct {
descriptor
Annotations map[string]string `json:"annotations,omitempty"`
}
type manifestOCI1 struct {
src types.ImageSource // May be nil if configBlob is not nil
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
SchemaVersion int `json:"schemaVersion"`
ConfigDescriptor descriptorOCI1 `json:"config"`
LayersDescriptors []descriptorOCI1 `json:"layers"`
Annotations map[string]string `json:"annotations,omitempty"`
src types.ImageSource // May be nil if configBlob is not nil
configBlob []byte // If set, corresponds to contents of m.Config.
m *manifest.OCI1
}
func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
oci := manifestOCI1{src: src}
if err := json.Unmarshal(manifest, &oci); err != nil {
func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) {
m, err := manifest.OCI1FromManifest(manifestBlob)
if err != nil {
return nil, err
}
return &oci, nil
return &manifestOCI1{
src: src,
m: m,
}, nil
}
// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest {
func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest {
return &manifestOCI1{
src: src,
configBlob: configBlob,
SchemaVersion: 2,
ConfigDescriptor: config,
LayersDescriptors: layers,
src: src,
configBlob: configBlob,
m: manifest.OCI1FromComponents(config, layers),
}
}
func (m *manifestOCI1) serialize() ([]byte, error) {
return json.Marshal(*m)
return m.m.Serialize()
}
func (m *manifestOCI1) manifestMIMEType() string {
@@ -56,7 +49,7 @@ func (m *manifestOCI1) manifestMIMEType() string {
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations}
return m.m.ConfigInfo()
}
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
@@ -67,9 +60,9 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
}
stream, _, err := m.src.GetBlob(types.BlobInfo{
Digest: m.ConfigDescriptor.Digest,
Size: m.ConfigDescriptor.Size,
URLs: m.ConfigDescriptor.URLs,
Digest: m.m.Config.Digest,
Size: m.m.Config.Size,
URLs: m.m.Config.URLs,
})
if err != nil {
return nil, err
@@ -80,8 +73,8 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
return nil, err
}
computedDigest := digest.FromBytes(blob)
if computedDigest != m.ConfigDescriptor.Digest {
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
if computedDigest != m.m.Config.Digest {
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest)
}
m.configBlob = blob
}
@@ -107,11 +100,7 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) {
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
blobs := []types.BlobInfo{}
for _, layer := range m.LayersDescriptors {
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
}
return blobs
return m.m.LayerInfos()
}
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
@@ -121,25 +110,20 @@ func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) boo
return false
}
func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) {
config, err := m.ConfigBlob()
if err != nil {
return nil, err
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *manifestOCI1) Inspect() (*types.ImageInspectInfo, error) {
getter := func(info types.BlobInfo) ([]byte, error) {
if info.Digest != m.ConfigInfo().Digest {
// Shouldn't ever happen
return nil, errors.New("asked for a different config blob")
}
config, err := m.ConfigBlob()
if err != nil {
return nil, err
}
return config, nil
}
v1 := &v1Image{}
if err := json.Unmarshal(config, v1); err != nil {
return nil, err
}
i := &types.ImageInspectInfo{
DockerVersion: v1.DockerVersion,
Created: v1.Created,
Architecture: v1.Architecture,
Os: v1.OS,
}
if v1.Config != nil {
i.Labels = v1.Config.Labels
}
return i, nil
return m.m.Inspect(getter)
}
// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
@@ -152,24 +136,30 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat
// UpdatedImage returns a types.Image modified according to options.
// This does not change the state of the original Image object.
func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc.
src: m.src,
configBlob: m.configBlob,
m: manifest.OCI1Clone(m.m),
}
if options.LayerInfos != nil {
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
}
copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos))
for i, info := range options.LayerInfos {
copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
copy.LayersDescriptors[i].Digest = info.Digest
copy.LayersDescriptors[i].Size = info.Size
copy.LayersDescriptors[i].Annotations = info.Annotations
copy.LayersDescriptors[i].URLs = info.URLs
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err
}
}
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
switch options.ManifestMIMEType {
case "": // No conversion, OK
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
// We can't directly convert to V1, but we can transitively convert via a V2 image
m2, err := copy.convertToManifestSchema2()
if err != nil {
return nil, err
}
return m2.UpdatedImage(types.ManifestUpdateOptions{
ManifestMIMEType: options.ManifestMIMEType,
InformationOnly: options.InformationOnly,
})
case manifest.DockerV2Schema2MediaType:
return copy.convertToManifestSchema2()
default:
@@ -179,17 +169,26 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
return memoryImageFromManifest(&copy), nil
}
func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor {
return manifest.Schema2Descriptor{
MediaType: d.MediaType,
Size: d.Size,
Digest: d.Digest,
URLs: d.URLs,
}
}
func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
// Create a copy of the descriptor.
config := m.ConfigDescriptor.descriptor
config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
// The only difference between OCI and DockerSchema2 is the mediatypes. The
// media type of the manifest is handled by manifestSchema2FromComponents.
config.MediaType = manifest.DockerV2Schema2ConfigMediaType
layers := make([]descriptor, len(m.LayersDescriptors))
layers := make([]manifest.Schema2Descriptor, len(m.m.Layers))
for idx := range layers {
layers[idx] = m.LayersDescriptors[idx].descriptor
layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx])
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
}

View File

@@ -97,6 +97,6 @@ func (i *sourcedImage) Manifest() ([]byte, string, error) {
return i.manifestBlob, i.manifestMIMEType, nil
}
func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) {
return inspectManifest(i.genericManifest)
func (i *sourcedImage) LayerInfosForCopy() ([]types.BlobInfo, error) {
return i.UnparsedImage.src.LayerInfosForCopy()
}

View File

@@ -0,0 +1,19 @@
package tmpdir
import (
"os"
"runtime"
)
// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files.
// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp
// which on systemd based systems could be the unsuitable tmpfs filesystem.
func TemporaryDirectoryForBigFiles() string {
var temporaryDirectoryForBigFiles string
if runtime.GOOS == "windows" {
temporaryDirectoryForBigFiles = os.TempDir()
} else {
temporaryDirectoryForBigFiles = "/var/tmp"
}
return temporaryDirectoryForBigFiles
}

View File

@@ -0,0 +1,306 @@
package manifest
import (
"encoding/json"
"regexp"
"strings"
"time"
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/docker/docker/api/types/versions"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1.
type Schema1FSLayers struct {
BlobSum digest.Digest `json:"blobSum"`
}
// Schema1History is an entry of the "history" array in docker/distribution schema 1.
type Schema1History struct {
V1Compatibility string `json:"v1Compatibility"`
}
// Schema1 is a manifest in docker/distribution schema 1.
type Schema1 struct {
Name string `json:"name"`
Tag string `json:"tag"`
Architecture string `json:"architecture"`
FSLayers []Schema1FSLayers `json:"fsLayers"`
History []Schema1History `json:"history"`
SchemaVersion int `json:"schemaVersion"`
}
// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1.
type Schema1V1Compatibility struct {
ID string `json:"id"`
Parent string `json:"parent,omitempty"`
Comment string `json:"comment,omitempty"`
Created time.Time `json:"created"`
ContainerConfig struct {
Cmd []string
} `json:"container_config,omitempty"`
Author string `json:"author,omitempty"`
ThrowAway bool `json:"throwaway,omitempty"`
}
// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob.
// (NOTE: The instance is not necessary a literal representation of the original blob,
// layers with duplicate IDs are eliminated.)
func Schema1FromManifest(manifest []byte) (*Schema1, error) {
s1 := Schema1{}
if err := json.Unmarshal(manifest, &s1); err != nil {
return nil, err
}
if s1.SchemaVersion != 1 {
return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
}
if len(s1.FSLayers) != len(s1.History) {
return nil, errors.New("length of history not equal to number of layers")
}
if len(s1.FSLayers) == 0 {
return nil, errors.New("no FSLayers in manifest")
}
if err := s1.fixManifestLayers(); err != nil {
return nil, err
}
return &s1, nil
}
// Schema1FromComponents creates an Schema1 manifest instance from the supplied data.
func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 {
var name, tag string
if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
name = reference.Path(ref)
if tagged, ok := ref.(reference.NamedTagged); ok {
tag = tagged.Tag()
}
}
return &Schema1{
Name: name,
Tag: tag,
Architecture: architecture,
FSLayers: fsLayers,
History: history,
SchemaVersion: 1,
}
}
// Schema1Clone creates a copy of the supplied Schema1 manifest.
func Schema1Clone(src *Schema1) *Schema1 {
copy := *src
return &copy
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
func (m *Schema1) ConfigInfo() types.BlobInfo {
return types.BlobInfo{}
}
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *Schema1) LayerInfos() []types.BlobInfo {
layers := make([]types.BlobInfo, len(m.FSLayers))
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
}
return layers
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
if len(m.FSLayers) != len(layerInfos) {
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos))
}
m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
for i, info := range layerInfos {
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest
}
return nil
}
// Serialize returns the manifest in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (m *Schema1) Serialize() ([]byte, error) {
// docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
unsigned, err := json.Marshal(*m)
if err != nil {
return nil, err
}
return AddDummyV2S1Signature(unsigned)
}
// fixManifestLayers, after validating the supplied manifest
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History),
// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates,
// both from m.History and m.FSLayers).
// Note that even after this succeeds, m.FSLayers may contain duplicate entries
// (for Dockerfile operations which change the configuration but not the filesystem).
func (m *Schema1) fixManifestLayers() error {
type imageV1 struct {
ID string
Parent string
}
// Per the specification, we can assume that len(m.FSLayers) == len(m.History)
imgs := make([]*imageV1, len(m.FSLayers))
for i := range m.FSLayers {
img := &imageV1{}
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
return err
}
imgs[i] = img
if err := validateV1ID(img.ID); err != nil {
return err
}
}
if imgs[len(imgs)-1].Parent != "" {
return errors.New("Invalid parent ID in the base layer of the image")
}
// check general duplicates to error instead of a deadlock
idmap := make(map[string]struct{})
var lastID string
for _, img := range imgs {
// skip IDs that appear after each other, we handle those later
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
}
lastID = img.ID
idmap[lastID] = struct{}{}
}
// backwards loop so that we keep the remaining indexes after removing items
for i := len(imgs) - 2; i >= 0; i-- {
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
m.History = append(m.History[:i], m.History[i+1:]...)
} else if imgs[i].Parent != imgs[i+1].ID {
return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
}
}
return nil
}
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
func validateV1ID(id string) error {
if ok := validHex.MatchString(id); !ok {
return errors.Errorf("image ID %q is invalid", id)
}
return nil
}
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
s1 := &Schema2V1Image{}
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
return nil, err
}
i := &types.ImageInspectInfo{
Tag: m.Tag,
Created: &s1.Created,
DockerVersion: s1.DockerVersion,
Architecture: s1.Architecture,
Os: s1.OS,
Layers: LayerInfosToStrings(m.LayerInfos()),
}
if s1.Config != nil {
i.Labels = s1.Config.Labels
}
return i, nil
}
// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs.
func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
// Convert the schema 1 compat info into a schema 2 config, constructing some of the fields
// that aren't directly comparable using info from the manifest.
if len(m.History) == 0 {
return nil, errors.New("image has no layers")
}
s1 := Schema2V1Image{}
config := []byte(m.History[0].V1Compatibility)
err := json.Unmarshal(config, &s1)
if err != nil {
return nil, errors.Wrapf(err, "error decoding configuration")
}
// Images created with versions prior to 1.8.3 require us to re-encode the encoded object,
// adding some fields that aren't "omitempty".
if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") {
config, err = json.Marshal(&s1)
if err != nil {
return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s1)
}
}
// Build the history.
convertedHistory := []Schema2History{}
for _, h := range m.History {
compat := Schema1V1Compatibility{}
if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil {
return nil, errors.Wrapf(err, "error decoding history information")
}
hitem := Schema2History{
Created: compat.Created,
CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "),
Author: compat.Author,
Comment: compat.Comment,
EmptyLayer: compat.ThrowAway,
}
convertedHistory = append([]Schema2History{hitem}, convertedHistory...)
}
// Build the rootfs information. We need the decompressed sums that we've been
// calculating to fill in the DiffIDs. It's expected (but not enforced by us)
// that the number of diffIDs corresponds to the number of non-EmptyLayer
// entries in the history.
rootFS := &Schema2RootFS{
Type: "layers",
DiffIDs: diffIDs,
}
// And now for some raw manipulation.
raw := make(map[string]*json.RawMessage)
err = json.Unmarshal(config, &raw)
if err != nil {
return nil, errors.Wrapf(err, "error re-decoding compat image config %#v: %v", s1)
}
// Drop some fields.
delete(raw, "id")
delete(raw, "parent")
delete(raw, "parent_id")
delete(raw, "layer_id")
delete(raw, "throwaway")
delete(raw, "Size")
// Add the history and rootfs information.
rootfs, err := json.Marshal(rootFS)
if err != nil {
return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err)
}
rawRootfs := json.RawMessage(rootfs)
raw["rootfs"] = &rawRootfs
history, err := json.Marshal(convertedHistory)
if err != nil {
return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err)
}
rawHistory := json.RawMessage(history)
raw["history"] = &rawHistory
// Encode the result.
config, err = json.Marshal(raw)
if err != nil {
return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s1, err)
}
return config, nil
}
// ImageID computes an ID which can uniquely identify this image by its contents.
func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) {
image, err := m.ToSchema2Config(diffIDs)
if err != nil {
return "", err
}
return digest.FromBytes(image).Hex(), nil
}

View File

@@ -0,0 +1,244 @@
package manifest
import (
"encoding/json"
"time"
"github.com/containers/image/pkg/strslice"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
// Schema2Descriptor is a “descriptor” in docker/distribution schema 2.
type Schema2Descriptor struct {
MediaType string `json:"mediaType"`
Size int64 `json:"size"`
Digest digest.Digest `json:"digest"`
URLs []string `json:"urls,omitempty"`
}
// Schema2 is a manifest in docker/distribution schema 2.
type Schema2 struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
ConfigDescriptor Schema2Descriptor `json:"config"`
LayersDescriptors []Schema2Descriptor `json:"layers"`
}
// Schema2Port is a Port, a string containing port number and protocol in the
// format "80/tcp", from docker/go-connections/nat.
type Schema2Port string
// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from
// docker/go-connections/nat.
type Schema2PortSet map[Schema2Port]struct{}
// Schema2HealthConfig is a HealthConfig, which holds configuration settings
// for the HEALTHCHECK feature, from docker/docker/api/types/container.
type Schema2HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}
// Schema2Config is a Config in docker/docker/api/types/container.
type Schema2Config struct {
Hostname string // Hostname
Domainname string // Domainname
User string // User that will run the command(s) inside the container, also support user:group
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStdout bool // Attach the standard output
AttachStderr bool // Attach the standard error
ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string // List of environment variable to set in the container
Cmd strslice.StrSlice // Command to run when starting the container
Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
Volumes map[string]struct{} // List of volumes (mounts) used for the container
WorkingDir string // Current directory (PWD) in the command will be launched
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
NetworkDisabled bool `json:",omitempty"` // Is network disabled
MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
Labels map[string]string // List of labels set to this container
StopSignal string `json:",omitempty"` // Signal to stop a container
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}
// Schema2V1Image is a V1Image in docker/docker/image.
type Schema2V1Image struct {
// ID is a unique 64 character identifier of the image
ID string `json:"id,omitempty"`
// Parent is the ID of the parent image
Parent string `json:"parent,omitempty"`
// Comment is the commit message that was set when committing the image
Comment string `json:"comment,omitempty"`
// Created is the timestamp at which the image was created
Created time.Time `json:"created"`
// Container is the id of the container used to commit
Container string `json:"container,omitempty"`
// ContainerConfig is the configuration of the container that is committed into the image
ContainerConfig Schema2Config `json:"container_config,omitempty"`
// DockerVersion specifies the version of Docker that was used to build the image
DockerVersion string `json:"docker_version,omitempty"`
// Author is the name of the author that was specified when committing the image
Author string `json:"author,omitempty"`
// Config is the configuration of the container received from the client
Config *Schema2Config `json:"config,omitempty"`
// Architecture is the hardware that the image is build and runs on
Architecture string `json:"architecture,omitempty"`
// OS is the operating system used to build and run the image
OS string `json:"os,omitempty"`
// Size is the total size of the image including all layers it is composed of
Size int64 `json:",omitempty"`
}
// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image.
type Schema2RootFS struct {
Type string `json:"type"`
DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
}
// Schema2History stores build commands that were used to create an image, from docker/docker/image.
type Schema2History struct {
// Created is the timestamp at which the image was created
Created time.Time `json:"created"`
// Author is the name of the author that was specified when committing the image
Author string `json:"author,omitempty"`
// CreatedBy keeps the Dockerfile command used while building the image
CreatedBy string `json:"created_by,omitempty"`
// Comment is the commit message that was set when committing the image
Comment string `json:"comment,omitempty"`
// EmptyLayer is set to true if this history item did not generate a
// layer. Otherwise, the history item is associated with the next
// layer in the RootFS section.
EmptyLayer bool `json:"empty_layer,omitempty"`
}
// Schema2Image is an Image in docker/docker/image.
type Schema2Image struct {
Schema2V1Image
Parent digest.Digest `json:"parent,omitempty"`
RootFS *Schema2RootFS `json:"rootfs,omitempty"`
History []Schema2History `json:"history,omitempty"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
}
// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob.
func Schema2FromManifest(manifest []byte) (*Schema2, error) {
s2 := Schema2{}
if err := json.Unmarshal(manifest, &s2); err != nil {
return nil, err
}
return &s2, nil
}
// Schema2FromComponents creates an Schema2 manifest instance from the supplied data.
func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 {
return &Schema2{
SchemaVersion: 2,
MediaType: DockerV2Schema2MediaType,
ConfigDescriptor: config,
LayersDescriptors: layers,
}
}
// Schema2Clone creates a copy of the supplied Schema2 manifest.
func Schema2Clone(src *Schema2) *Schema2 {
copy := *src
return &copy
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
func (m *Schema2) ConfigInfo() types.BlobInfo {
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
}
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *Schema2) LayerInfos() []types.BlobInfo {
blobs := []types.BlobInfo{}
for _, layer := range m.LayersDescriptors {
blobs = append(blobs, types.BlobInfo{
Digest: layer.Digest,
Size: layer.Size,
URLs: layer.URLs,
})
}
return blobs
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.LayersDescriptors) != len(layerInfos) {
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos))
}
original := m.LayersDescriptors
m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos))
for i, info := range layerInfos {
m.LayersDescriptors[i].MediaType = original[i].MediaType
m.LayersDescriptors[i].Digest = info.Digest
m.LayersDescriptors[i].Size = info.Size
m.LayersDescriptors[i].URLs = info.URLs
}
return nil
}
// Serialize returns the manifest in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (m *Schema2) Serialize() ([]byte, error) {
return json.Marshal(*m)
}
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
config, err := configGetter(m.ConfigInfo())
if err != nil {
return nil, err
}
s2 := &Schema2Image{}
if err := json.Unmarshal(config, s2); err != nil {
return nil, err
}
i := &types.ImageInspectInfo{
Tag: "",
Created: &s2.Created,
DockerVersion: s2.DockerVersion,
Architecture: s2.Architecture,
Os: s2.OS,
Layers: LayerInfosToStrings(m.LayerInfos()),
}
if s2.Config != nil {
i.Labels = s2.Config.Labels
}
return i, nil
}
// ImageID computes an ID which can uniquely identify this image by its contents.
func (m *Schema2) ImageID([]digest.Digest) (string, error) {
if err := m.ConfigDescriptor.Digest.Validate(); err != nil {
return "", err
}
return m.ConfigDescriptor.Digest.Hex(), nil
}

View File

@@ -2,7 +2,9 @@ package manifest
import (
"encoding/json"
"fmt"
"github.com/containers/image/types"
"github.com/docker/libtrust"
"github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -38,6 +40,39 @@ var DefaultRequestedManifestMIMETypes = []string{
DockerV2ListMediaType,
}
// Manifest is an interface for parsing, modifying image manifests in isolation.
// Callers can either use this abstract interface without understanding the details of the formats,
// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members
// directly.
//
// See types.Image for functionality not limited to manifests, including format conversions and config parsing.
// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image.
type Manifest interface {
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
ConfigInfo() types.BlobInfo
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
LayerInfos() []types.BlobInfo
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
UpdateLayerInfos(layerInfos []types.BlobInfo) error
// ImageID computes an ID which can uniquely identify this image by its contents, irrespective
// of which (of possibly more than one simultaneously valid) reference was used to locate the
// image, and unchanged by whether or how the layers are compressed. The result takes the form
// of the hexadecimal portion of a digest.Digest.
ImageID(diffIDs []digest.Digest) (string, error)
// Inspect returns various information for (skopeo inspect) parsed from the manifest,
// incorporating information from a configuration blob returned by configGetter, if
// the underlying image format is expected to include a configuration blob.
Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error)
// Serialize returns the manifest in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
Serialize() ([]byte, error)
}
// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
// but we may not have such metadata available (e.g. when the manifest is a local file).
@@ -147,3 +182,57 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
func MIMETypeIsMultiImage(mimeType string) bool {
return mimeType == DockerV2ListMediaType
}
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
// centralizing various workarounds.
func NormalizedMIMEType(input string) string {
switch input {
// "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
// need to happen within the ImageSource.
case "application/json":
return DockerV2Schema1SignedMediaType
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType,
imgspecv1.MediaTypeImageManifest,
DockerV2Schema2MediaType,
DockerV2ListMediaType:
return input
default:
// If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
//
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
return DockerV2Schema1SignedMediaType
}
}
// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type
func FromBlob(manblob []byte, mt string) (Manifest, error) {
switch NormalizedMIMEType(mt) {
case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType:
return Schema1FromManifest(manblob)
case imgspecv1.MediaTypeImageManifest:
return OCI1FromManifest(manblob)
case DockerV2Schema2MediaType:
return Schema2FromManifest(manblob)
case DockerV2ListMediaType:
return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented")
default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values.
return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt)
}
}
// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
func LayerInfosToStrings(infos []types.BlobInfo) []string {
layers := make([]string, len(infos))
for i, info := range infos {
layers[i] = info.Digest.String()
}
return layers
}

115
vendor/github.com/containers/image/manifest/oci.go generated vendored Normal file
View File

@@ -0,0 +1,115 @@
package manifest
import (
"encoding/json"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// OCI1 is a manifest.Manifest implementation for OCI images.
// The underlying data from imgspecv1.Manifest is also available.
type OCI1 struct {
imgspecv1.Manifest
}
// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob.
func OCI1FromManifest(manifest []byte) (*OCI1, error) {
oci1 := OCI1{}
if err := json.Unmarshal(manifest, &oci1); err != nil {
return nil, err
}
return &oci1, nil
}
// OCI1FromComponents creates an OCI1 manifest instance from the supplied data.
func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 {
return &OCI1{
imgspecv1.Manifest{
Versioned: specs.Versioned{SchemaVersion: 2},
Config: config,
Layers: layers,
},
}
}
// OCI1Clone creates a copy of the supplied OCI1 manifest.
func OCI1Clone(src *OCI1) *OCI1 {
return &OCI1{
Manifest: src.Manifest,
}
}
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
func (m *OCI1) ConfigInfo() types.BlobInfo {
return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations}
}
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
func (m *OCI1) LayerInfos() []types.BlobInfo {
blobs := []types.BlobInfo{}
for _, layer := range m.Layers {
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType})
}
return blobs
}
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
if len(m.Layers) != len(layerInfos) {
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
}
original := m.Layers
m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
for i, info := range layerInfos {
m.Layers[i].MediaType = original[i].MediaType
m.Layers[i].Digest = info.Digest
m.Layers[i].Size = info.Size
m.Layers[i].Annotations = info.Annotations
m.Layers[i].URLs = info.URLs
}
return nil
}
// Serialize returns the manifest in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (m *OCI1) Serialize() ([]byte, error) {
return json.Marshal(*m)
}
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) {
config, err := configGetter(m.ConfigInfo())
if err != nil {
return nil, err
}
v1 := &imgspecv1.Image{}
if err := json.Unmarshal(config, v1); err != nil {
return nil, err
}
d1 := &Schema2V1Image{}
json.Unmarshal(config, d1)
i := &types.ImageInspectInfo{
Tag: "",
Created: v1.Created,
DockerVersion: d1.DockerVersion,
Labels: v1.Config.Labels,
Architecture: v1.Architecture,
Os: v1.OS,
Layers: LayerInfosToStrings(m.LayerInfos()),
}
return i, nil
}
// ImageID computes an ID which can uniquely identify this image by its contents.
func (m *OCI1) ImageID([]digest.Digest) (string, error) {
if err := m.Config.Digest.Validate(); err != nil {
return "", err
}
return m.Config.Digest.Hex(), nil
}

View File

@@ -54,9 +54,8 @@ func (d *ociArchiveImageDestination) SupportsSignatures() error {
return d.unpackedDest.SupportsSignatures()
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination
func (d *ociArchiveImageDestination) ShouldCompressLayers() bool {
return d.unpackedDest.ShouldCompressLayers()
func (d *ociArchiveImageDestination) DesiredLayerCompression() types.LayerCompression {
return d.unpackedDest.DesiredLayerCompression()
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
@@ -73,8 +72,8 @@ func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool {
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
func (d *ociArchiveImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
return d.unpackedDest.PutBlob(stream, inputInfo)
func (d *ociArchiveImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
return d.unpackedDest.PutBlob(stream, inputInfo, isConfig)
}
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob

View File

@@ -88,3 +88,8 @@ func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int
func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
return s.unpackedSrc.GetSignatures(ctx, instanceDigest)
}
// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
func (s *ociArchiveImageSource) LayerInfosForCopy() ([]types.BlobInfo, error) {
return nil, nil
}

View File

@@ -4,13 +4,13 @@ import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
"github.com/containers/image/internal/tmpdir"
"github.com/containers/image/oci/internal"
ocilayout "github.com/containers/image/oci/layout"
"github.com/containers/image/transports"
"github.com/containers/image/types"
@@ -48,51 +48,12 @@ func (t ociArchiveTransport) ParseReference(reference string) (types.ImageRefere
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error {
var file string
sep := strings.SplitN(scope, ":", 2)
file = sep[0]
if len(sep) == 2 {
image := sep[1]
if !refRegexp.MatchString(image) {
return errors.Errorf("Invalid image %s", image)
}
}
if !strings.HasPrefix(file, "/") {
return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry.
// (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?)
if scope == "/" {
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
}
cleaned := filepath.Clean(file)
if cleaned != file {
return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
return nil
return internal.ValidateScope(scope)
}
// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
const (
separator = `(?:[-._:@+]|--)`
alphanum = `(?:[A-Za-z0-9]+)`
component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
)
var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
func ParseReference(reference string) (types.ImageReference, error) {
var file, image string
sep := strings.SplitN(reference, ":", 2)
file = sep[0]
if len(sep) == 2 {
image = sep[1]
}
file, image := internal.SplitPathAndImage(reference)
return NewReference(file, image)
}
@@ -102,14 +63,15 @@ func NewReference(file, image string) (types.ImageReference, error) {
if err != nil {
return nil, err
}
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", file, image, resolved)
if err := internal.ValidateOCIPath(file); err != nil {
return nil, err
}
if len(image) > 0 && !refRegexp.MatchString(image) {
return nil, errors.Errorf("Invalid image %s", image)
if err := internal.ValidateImageName(image); err != nil {
return nil, err
}
return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil
}
@@ -197,7 +159,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
// createOCIRef creates the oci reference of the image
func createOCIRef(image string) (tempDirOCIRef, error) {
dir, err := ioutil.TempDir("/var/tmp", "oci")
dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci")
if err != nil {
return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory")
}

View File

@@ -0,0 +1,126 @@
package internal
import (
"github.com/pkg/errors"
"path/filepath"
"regexp"
"runtime"
"strings"
)
// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
const (
separator = `(?:[-._:@+]|--)`
alphanum = `(?:[A-Za-z0-9]+)`
component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
)
var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`)
// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs.
// In any other case an error is returned.
func ValidateImageName(image string) error {
if len(image) == 0 {
return nil
}
var err error
if !refRegexp.MatchString(image) {
err = errors.Errorf("Invalid image %s", image)
}
return err
}
// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image.
// Neither path nor image parts are validated at this stage.
func SplitPathAndImage(reference string) (string, string) {
if runtime.GOOS == "windows" {
return splitPathAndImageWindows(reference)
}
return splitPathAndImageNonWindows(reference)
}
func splitPathAndImageWindows(reference string) (string, string) {
groups := windowsRefRegexp.FindStringSubmatch(reference)
// nil group means no match
if groups == nil {
return reference, ""
}
// we expect three elements. First one full match, second the capture group for the path and
// the third the capture group for the image
if len(groups) != 3 {
return reference, ""
}
return groups[1], groups[2]
}
func splitPathAndImageNonWindows(reference string) (string, string) {
sep := strings.SplitN(reference, ":", 2)
path := sep[0]
var image string
if len(sep) == 2 {
image = sep[1]
}
return path, image
}
// ValidateOCIPath takes the OCI path and validates it.
func ValidateOCIPath(path string) error {
if runtime.GOOS == "windows" {
// On Windows we must allow for a ':' as part of the path
if strings.Count(path, ":") > 1 {
return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path)
}
} else {
if strings.Contains(path, ":") {
return errors.Errorf("Invalid OCI reference: path %s contains a colon", path)
}
}
return nil
}
// ValidateScope validates a policy configuration scope for an OCI transport.
func ValidateScope(scope string) error {
var err error
if runtime.GOOS == "windows" {
err = validateScopeWindows(scope)
} else {
err = validateScopeNonWindows(scope)
}
if err != nil {
return err
}
cleaned := filepath.Clean(scope)
if cleaned != scope {
return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
return nil
}
func validateScopeWindows(scope string) error {
matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
if !matched {
return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
}
return nil
}
func validateScopeNonWindows(scope string) error {
if !strings.HasPrefix(scope, "/") {
return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry.
if scope == "/" {
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
}
return nil
}

View File

@@ -84,9 +84,8 @@ func (d *ociImageDestination) SupportsSignatures() error {
return errors.Errorf("Pushing signatures for OCI images is not supported")
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *ociImageDestination) ShouldCompressLayers() bool {
return true
func (d *ociImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.Compress
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
@@ -106,14 +105,17 @@ func (d *ociImageDestination) MustMatchRuntimeOS() bool {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
if err != nil {
return types.BlobInfo{}, err
}
succeeded := false
explicitClosed := false
defer func() {
blobFile.Close()
if !explicitClosed {
blobFile.Close()
}
if !succeeded {
os.Remove(blobFile.Name())
}
@@ -133,8 +135,15 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
}
if err := blobFile.Chmod(0644); err != nil {
return types.BlobInfo{}, err
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
// On Windows, the “permissions of newly created files” argument to syscall.Open is
// ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod,
// always fails on Windows.
if runtime.GOOS != "windows" {
if err := blobFile.Chmod(0644); err != nil {
return types.BlobInfo{}, err
}
}
blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir)
@@ -144,6 +153,10 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
if err := ensureParentDirectoryExists(blobPath); err != nil {
return types.BlobInfo{}, err
}
// need to explicitly close the file, since a rename won't otherwise not work on Windows
blobFile.Close()
explicitClosed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
return types.BlobInfo{}, err
}

View File

@@ -143,6 +143,11 @@ func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, e
return nil, 0, errWrap
}
// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
func (s *ociImageSource) LayerInfosForCopy() ([]types.BlobInfo, error) {
return nil, nil
}
func getBlobSize(resp *http.Response) int64 {
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {

View File

@@ -5,12 +5,12 @@ import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
"github.com/containers/image/oci/internal"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/opencontainers/go-digest"
@@ -36,45 +36,12 @@ func (t ociTransport) ParseReference(reference string) (types.ImageReference, er
return ParseReference(reference)
}
// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys
const (
separator = `(?:[-._:@+]|--)`
alphanum = `(?:[A-Za-z0-9]+)`
component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)`
)
var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`)
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed.
func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
var dir string
sep := strings.SplitN(scope, ":", 2)
dir = sep[0]
if len(sep) == 2 {
image := sep[1]
if !refRegexp.MatchString(image) {
return errors.Errorf("Invalid image %s", image)
}
}
if !strings.HasPrefix(dir, "/") {
return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
}
// Refuse also "/", otherwise "/" and "" would have the same semantics,
// and "" could be unexpectedly shadowed by the "/" entry.
// (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?)
if scope == "/" {
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
}
cleaned := filepath.Clean(dir)
if cleaned != dir {
return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
}
return nil
return internal.ValidateScope(scope)
}
// ociReference is an ImageReference for OCI directory paths.
@@ -92,13 +59,7 @@ type ociReference struct {
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
func ParseReference(reference string) (types.ImageReference, error) {
var dir, image string
sep := strings.SplitN(reference, ":", 2)
dir = sep[0]
if len(sep) == 2 {
image = sep[1]
}
dir, image := internal.SplitPathAndImage(reference)
return NewReference(dir, image)
}
@@ -111,14 +72,15 @@ func NewReference(dir, image string) (types.ImageReference, error) {
if err != nil {
return nil, err
}
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, image, resolved)
if err := internal.ValidateOCIPath(dir); err != nil {
return nil, err
}
if len(image) > 0 && !refRegexp.MatchString(image) {
return nil, errors.Errorf("Invalid image %s", image)
if err = internal.ValidateImageName(image); err != nil {
return nil, err
}
return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
}

View File

@@ -246,6 +246,11 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest
return sigs, nil
}
// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
func (s *openshiftImageSource) LayerInfosForCopy() ([]types.BlobInfo, error) {
return nil, nil
}
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error {
if s.docker != nil {
@@ -349,9 +354,8 @@ func (d *openshiftImageDestination) SupportsSignatures() error {
return nil
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *openshiftImageDestination) ShouldCompressLayers() bool {
return true
func (d *openshiftImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.Compress
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
@@ -371,8 +375,8 @@ func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
return d.docker.PutBlob(stream, inputInfo)
func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
return d.docker.PutBlob(stream, inputInfo, isConfig)
}
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.

View File

@@ -4,6 +4,8 @@ package ostree
import (
"bytes"
"compress/gzip"
"encoding/base64"
"encoding/json"
"fmt"
"io"
@@ -13,17 +15,32 @@ import (
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"unsafe"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/archive"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
selinux "github.com/opencontainers/selinux/go-selinux"
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include <stdlib.h>
// #include <ostree.h>
// #include <gio/ginputstream.h>
// #include <selinux/selinux.h>
// #include <selinux/label.h>
import "C"
type blobToImport struct {
Size int64
Digest digest.Digest
@@ -35,18 +52,24 @@ type descriptor struct {
Digest digest.Digest `json:"digest"`
}
type fsLayersSchema1 struct {
BlobSum digest.Digest `json:"blobSum"`
}
type manifestSchema struct {
ConfigDescriptor descriptor `json:"config"`
LayersDescriptors []descriptor `json:"layers"`
LayersDescriptors []descriptor `json:"layers"`
FSLayers []fsLayersSchema1 `json:"fsLayers"`
}
type ostreeImageDestination struct {
ref ostreeReference
manifest string
schema manifestSchema
tmpDirPath string
blobs map[string]*blobToImport
digest digest.Digest
ref ostreeReference
manifest string
schema manifestSchema
tmpDirPath string
blobs map[string]*blobToImport
digest digest.Digest
signaturesLen int
repo *C.struct_OstreeRepo
}
// newImageDestination returns an ImageDestination for writing to an existing ostree.
@@ -55,7 +78,7 @@ func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDes
if err := ensureDirectoryExists(tmpDirPath); err != nil {
return nil, err
}
return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, ""}, nil
return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil
}
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
@@ -66,6 +89,9 @@ func (d *ostreeImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *ostreeImageDestination) Close() error {
if d.repo != nil {
C.g_object_unref(C.gpointer(d.repo))
}
return os.RemoveAll(d.tmpDirPath)
}
@@ -82,8 +108,8 @@ func (d *ostreeImageDestination) SupportsSignatures() error {
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *ostreeImageDestination) ShouldCompressLayers() bool {
return false
func (d *ostreeImageDestination) DesiredLayerCompression() types.LayerCompression {
return types.PreserveOriginal
}
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
@@ -97,7 +123,7 @@ func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
return true
}
func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
if err != nil {
return types.BlobInfo{}, err
@@ -130,7 +156,7 @@ func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
}
func fixFiles(dir string, usermode bool) error {
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
entries, err := ioutil.ReadDir(dir)
if err != nil {
return err
@@ -144,13 +170,43 @@ func fixFiles(dir string, usermode bool) error {
}
continue
}
if selinuxHnd != nil {
relPath, err := filepath.Rel(root, fullpath)
if err != nil {
return err
}
// Handle /exports/hostfs as a special case. Files under this directory are copied to the host,
// thus we benefit from maintaining the same SELinux label they would have on the host as we could
// use hard links instead of copying the files.
relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/"))
relPathC := C.CString(relPath)
defer C.free(unsafe.Pointer(relPathC))
var context *C.char
res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm))
if int(res) < 0 && err != syscall.ENOENT {
return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath)
}
if int(res) == 0 {
defer C.freecon(context)
fullpathC := C.CString(fullpath)
defer C.free(unsafe.Pointer(fullpathC))
res, err = C.lsetfilecon_raw(fullpathC, context)
if int(res) < 0 {
return errors.Wrapf(err, "cannot setfilecon_raw %s", fullpath)
}
}
}
if info.IsDir() {
if usermode {
if err := os.Chmod(fullpath, info.Mode()|0700); err != nil {
return err
}
}
err = fixFiles(fullpath, usermode)
err = fixFiles(selinuxHnd, root, fullpath, usermode)
if err != nil {
return err
}
@@ -174,7 +230,39 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin
return err
}
func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error {
func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
mfz := gzip.NewWriter(output)
defer mfz.Close()
metaPacker := storage.NewJSONPacker(mfz)
stream, err := os.OpenFile(file, os.O_RDONLY, 0)
if err != nil {
return "", -1, err
}
defer stream.Close()
gzReader, err := archive.DecompressStream(stream)
if err != nil {
return "", -1, err
}
defer gzReader.Close()
its, err := asm.NewInputTarStream(gzReader, metaPacker, nil)
if err != nil {
return "", -1, err
}
digester := digest.Canonical.Digester()
written, err := io.Copy(digester.Hash(), its)
if err != nil {
return "", -1, err
}
return digester.Digest(), written, nil
}
func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root")
if err := ensureDirectoryExists(destinationPath); err != nil {
@@ -185,11 +273,17 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm
os.RemoveAll(destinationPath)
}()
var tarSplitOutput bytes.Buffer
uncompressedDigest, uncompressedSize, err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath)
if err != nil {
return err
}
if os.Getuid() == 0 {
if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil {
return err
}
if err := fixFiles(destinationPath, false); err != nil {
if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil {
return err
}
} else {
@@ -198,32 +292,51 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm
return err
}
if err := fixFiles(destinationPath, true); err != nil {
if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil {
return err
}
}
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size),
fmt.Sprintf("docker.uncompressed_size=%d", uncompressedSize),
fmt.Sprintf("docker.uncompressed_digest=%s", uncompressedDigest.String()),
fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))})
}
func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
destinationPath := filepath.Dir(blob.BlobPath)
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
}
func (d *ostreeImageDestination) importConfig(blob *blobToImport) error {
ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex())
return exec.Command("ostree", "commit",
"--repo", d.ref.repo,
fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size),
"--branch", ostreeBranch, filepath.Dir(blob.BlobPath)).Run()
}
func (d *ostreeImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
output, err := exec.Command("ostree", "show", "--repo", d.ref.repo, "--print-metadata-key=docker.size", branch).CombinedOutput()
if err != nil {
if bytes.Index(output, []byte("not found")) >= 0 || bytes.Index(output, []byte("No such")) >= 0 {
return false, -1, nil
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
return false, 0, err
}
return false, -1, err
d.repo = repo
}
size, err := strconv.ParseInt(strings.Trim(string(output), "'\n"), 10, 64)
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return found, -1, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
return found, -1, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
return found, -1, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return false, -1, err
}
@@ -272,6 +385,7 @@ func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error {
return err
}
}
d.signaturesLen = len(signatures)
return nil
}
@@ -286,24 +400,48 @@ func (d *ostreeImageDestination) Commit() error {
return err
}
for _, layer := range d.schema.LayersDescriptors {
hash := layer.Digest.Hex()
var selinuxHnd *C.struct_selabel_handle
if os.Getuid() == 0 && selinux.GetEnabled() {
selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0)
if selinuxHnd == nil {
return errors.Wrapf(err, "cannot open the SELinux DB")
}
defer C.selabel_close(selinuxHnd)
}
checkLayer := func(hash string) error {
blob := d.blobs[hash]
// if the blob is not present in d.blobs then it is already stored in OSTree,
// and we don't need to import it.
if blob == nil {
continue
return nil
}
err := d.importBlob(repo, blob)
err := d.importBlob(selinuxHnd, repo, blob)
if err != nil {
return err
}
delete(d.blobs, hash)
return nil
}
for _, layer := range d.schema.LayersDescriptors {
hash := layer.Digest.Hex()
if err = checkLayer(hash); err != nil {
return err
}
}
for _, layer := range d.schema.FSLayers {
hash := layer.BlobSum.Hex()
if err = checkLayer(hash); err != nil {
return err
}
}
hash := d.schema.ConfigDescriptor.Digest.Hex()
blob := d.blobs[hash]
if blob != nil {
err := d.importConfig(blob)
// Import the other blobs that are not layers
for _, blob := range d.blobs {
err := d.importConfig(repo, blob)
if err != nil {
return err
}
@@ -311,7 +449,9 @@ func (d *ostreeImageDestination) Commit() error {
manifestPath := filepath.Join(d.tmpDirPath, "manifest")
metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), fmt.Sprintf("docker.digest=%s", string(d.digest))}
metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)),
fmt.Sprintf("signatures=%d", d.signaturesLen),
fmt.Sprintf("docker.digest=%s", string(d.digest))}
err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata)
_, err = repo.CommitTransaction()

408
vendor/github.com/containers/image/ostree/ostree_src.go generated vendored Normal file
View File

@@ -0,0 +1,408 @@
// +build !containers_image_ostree_stub
package ostree
import (
"bytes"
"compress/gzip"
"context"
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"strconv"
"strings"
"unsafe"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/containers/storage/pkg/ioutils"
"github.com/opencontainers/go-digest"
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
"github.com/pkg/errors"
"github.com/vbatts/tar-split/tar/asm"
"github.com/vbatts/tar-split/tar/storage"
)
// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1
// #include <glib.h>
// #include <glib-object.h>
// #include <gio/gio.h>
// #include <stdlib.h>
// #include <ostree.h>
// #include <gio/ginputstream.h>
import "C"
type ostreeImageSource struct {
ref ostreeReference
tmpDir string
repo *C.struct_OstreeRepo
// get the compressed layer by its uncompressed checksum
compressed map[digest.Digest]digest.Digest
}
// newImageSource returns an ImageSource for reading from an existing directory.
func newImageSource(ctx *types.SystemContext, tmpDir string, ref ostreeReference) (types.ImageSource, error) {
return &ostreeImageSource{ref: ref, tmpDir: tmpDir, compressed: nil}, nil
}
// Reference returns the reference used to set up this source.
func (s *ostreeImageSource) Reference() types.ImageReference {
return s.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *ostreeImageSource) Close() error {
if s.repo != nil {
C.g_object_unref(C.gpointer(s.repo))
}
return nil
}
func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) {
b := fmt.Sprintf("ociimage/%s", blob)
found, data, err := readMetadata(s.repo, b, "docker.size")
if err != nil || !found {
return 0, err
}
return strconv.ParseInt(data, 10, 64)
}
func (s *ostreeImageSource) getLenSignatures() (int64, error) {
b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
found, data, err := readMetadata(s.repo, b, "signatures")
if err != nil {
return -1, err
}
if !found {
// if 'signatures' is not present, just return 0 signatures.
return 0, nil
}
return strconv.ParseInt(data, 10, 64)
}
func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) {
b := fmt.Sprintf("ociimage/%s", blob)
found, out, err := readMetadata(s.repo, b, "tarsplit.output")
if err != nil || !found {
return nil, err
}
return base64.StdEncoding.DecodeString(out)
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
func (s *ostreeImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) {
if instanceDigest != nil {
return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`)
}
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, "", err
}
s.repo = repo
}
b := fmt.Sprintf("ociimage/%s", s.ref.branchName)
found, out, err := readMetadata(s.repo, b, "docker.manifest")
if err != nil {
return nil, "", err
}
if !found {
return nil, "", errors.New("manifest not found")
}
m := []byte(out)
return m, manifest.GuessMIMEType(m), nil
}
func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
return nil, "", errors.New("manifest lists are not supported by this transport")
}
func openRepo(path string) (*C.struct_OstreeRepo, error) {
var cerr *C.GError
cpath := C.CString(path)
defer C.free(unsafe.Pointer(cpath))
pathc := C.g_file_new_for_path(cpath)
defer C.g_object_unref(C.gpointer(pathc))
repo := C.ostree_repo_new(pathc)
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
if !r {
C.g_object_unref(C.gpointer(repo))
return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
return repo, nil
}
type ostreePathFileGetter struct {
repo *C.struct_OstreeRepo
parentRoot *C.GFile
}
type ostreeReader struct {
stream *C.GFileInputStream
}
func (o ostreeReader) Close() error {
C.g_object_unref(C.gpointer(o.stream))
return nil
}
func (o ostreeReader) Read(p []byte) (int, error) {
var cerr *C.GError
instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type())
stream := (*C.GInputStream)(unsafe.Pointer(instanceCast))
b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr)
if b == nil {
return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
defer C.g_bytes_unref(b)
count := int(C.g_bytes_get_size(b))
if count == 0 {
return 0, io.EOF
}
data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count]
copy(p, data)
return count, nil
}
func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) {
var cerr *C.GError
var ref *C.char
defer C.free(unsafe.Pointer(ref))
cCommit := C.CString(commit)
defer C.free(unsafe.Pointer(cCommit))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) {
return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
if ref == nil {
return false, "", nil
}
var variant *C.GVariant
if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) {
return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
defer C.g_variant_unref(variant)
if variant != nil {
cKey := C.CString(key)
defer C.free(unsafe.Pointer(cKey))
metadata := C.g_variant_get_child_value(variant, 0)
defer C.g_variant_unref(metadata)
data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil)
if data != nil {
defer C.g_variant_unref(data)
ptr := (*C.char)(C.g_variant_get_string(data, nil))
val := C.GoString(ptr)
return true, val, nil
}
}
return false, "", nil
}
func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) {
var cerr *C.GError
var parentRoot *C.GFile
cCommit := C.CString(commit)
defer C.free(unsafe.Pointer(cCommit))
if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) {
return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
C.g_object_ref(C.gpointer(repo))
return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil
}
func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) {
var file *C.GFile
if strings.HasPrefix(filename, "./") {
filename = filename[2:]
}
cfilename := C.CString(filename)
defer C.free(unsafe.Pointer(cfilename))
file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename))
var cerr *C.GError
stream := C.g_file_read(file, nil, &cerr)
if stream == nil {
return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr)))
}
return &ostreeReader{stream: stream}, nil
}
func (o ostreePathFileGetter) Close() {
C.g_object_unref(C.gpointer(o.repo))
C.g_object_unref(C.gpointer(o.parentRoot))
}
func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) {
getter, err := newOSTreePathFileGetter(s.repo, commit)
if err != nil {
return nil, err
}
defer getter.Close()
return getter.Get(path)
}
// GetBlob returns a stream for the specified blob, and the blob's size.
func (s *ostreeImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
blob := info.Digest.Hex()
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
if s.compressed == nil {
_, err := s.LayerInfosForCopy()
if err != nil {
return nil, -1, err
}
}
compressedBlob, found := s.compressed[info.Digest]
if found {
blob = compressedBlob.Hex()
}
branch := fmt.Sprintf("ociimage/%s", blob)
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, 0, err
}
s.repo = repo
}
layerSize, err := s.getLayerSize(blob)
if err != nil {
return nil, 0, err
}
tarsplit, err := s.getTarSplitData(blob)
if err != nil {
return nil, 0, err
}
// if tarsplit is nil we are looking at the manifest. Return directly the file in /content
if tarsplit == nil {
file, err := s.readSingleFile(branch, "/content")
if err != nil {
return nil, 0, err
}
return file, layerSize, nil
}
mf := bytes.NewReader(tarsplit)
mfz, err := gzip.NewReader(mf)
if err != nil {
return nil, 0, err
}
defer mfz.Close()
metaUnpacker := storage.NewJSONUnpacker(mfz)
getter, err := newOSTreePathFileGetter(s.repo, branch)
if err != nil {
return nil, 0, err
}
ots := asm.NewOutputTarStream(getter, metaUnpacker)
pipeReader, pipeWriter := io.Pipe()
go func() {
io.Copy(pipeWriter, ots)
pipeWriter.Close()
}()
rc := ioutils.NewReadCloserWrapper(pipeReader, func() error {
getter.Close()
return ots.Close()
})
return rc, layerSize, nil
}
func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
if instanceDigest != nil {
return nil, errors.New("manifest lists are not supported by this transport")
}
lenSignatures, err := s.getLenSignatures()
if err != nil {
return nil, err
}
branch := fmt.Sprintf("ociimage/%s", s.ref.branchName)
if s.repo == nil {
repo, err := openRepo(s.ref.repo)
if err != nil {
return nil, err
}
s.repo = repo
}
signatures := [][]byte{}
for i := int64(1); i <= lenSignatures; i++ {
sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i))
if err != nil {
return nil, err
}
defer sigReader.Close()
sig, err := ioutil.ReadAll(sigReader)
if err != nil {
return nil, err
}
signatures = append(signatures, sig)
}
return signatures, nil
}
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
// the image, after they've been decompressed.
func (s *ostreeImageSource) LayerInfosForCopy() ([]types.BlobInfo, error) {
updatedBlobInfos := []types.BlobInfo{}
manifestBlob, manifestType, err := s.GetManifest(nil)
if err != nil {
return nil, err
}
man, err := manifest.FromBlob(manifestBlob, manifestType)
s.compressed = make(map[digest.Digest]digest.Digest)
layerBlobs := man.LayerInfos()
for _, layerBlob := range layerBlobs {
branch := fmt.Sprintf("ociimage/%s", layerBlob.Digest.Hex())
found, uncompressedDigestStr, err := readMetadata(s.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return nil, err
}
found, uncompressedSizeStr, err := readMetadata(s.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
return nil, err
}
uncompressedSize, err := strconv.ParseInt(uncompressedSizeStr, 10, 64)
if err != nil {
return nil, err
}
uncompressedDigest := digest.Digest(uncompressedDigestStr)
blobInfo := types.BlobInfo{
Digest: uncompressedDigest,
Size: uncompressedSize,
MediaType: layerBlob.MediaType,
}
s.compressed[uncompressedDigest] = layerBlob.Digest
updatedBlobInfos = append(updatedBlobInfos, blobInfo)
}
return updatedBlobInfos, nil
}

View File

@@ -10,12 +10,12 @@ import (
"regexp"
"strings"
"github.com/pkg/errors"
"github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference"
"github.com/containers/image/image"
"github.com/containers/image/transports"
"github.com/containers/image/types"
"github.com/pkg/errors"
)
const defaultOSTreeRepo = "/ostree/repo"
@@ -66,6 +66,11 @@ type ostreeReference struct {
repo string
}
type ostreeImageCloser struct {
types.ImageCloser
size int64
}
func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) {
var repo = ""
var image = ""
@@ -110,7 +115,7 @@ func NewReference(image string, repo string) (types.ImageReference, error) {
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
// from being ambiguous with values of PolicyConfigurationIdentity.
if strings.Contains(resolved, ":") {
return nil, errors.Errorf("Invalid OSTreeCI reference %s@%s: path %s contains a colon", image, repo, resolved)
return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved)
}
return ostreeReference{
@@ -168,19 +173,38 @@ func (ref ostreeReference) PolicyConfigurationNamespaces() []string {
return res
}
func (s *ostreeImageCloser) Size() (int64, error) {
return s.size, nil
}
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
// The caller must call .Close() on the returned ImageCloser.
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) {
return nil, errors.New("Reading ostree: images is currently not supported")
var tmpDir string
if ctx == nil || ctx.OSTreeTmpDirPath == "" {
tmpDir = os.TempDir()
} else {
tmpDir = ctx.OSTreeTmpDirPath
}
src, err := newImageSource(ctx, tmpDir, ref)
if err != nil {
return nil, err
}
return image.FromSource(ctx, src)
}
// NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource.
func (ref ostreeReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) {
return nil, errors.New("Reading ostree: images is currently not supported")
var tmpDir string
if ctx == nil || ctx.OSTreeTmpDirPath == "" {
tmpDir = os.TempDir()
} else {
tmpDir = ctx.OSTreeTmpDirPath
}
return newImageSource(ctx, tmpDir, ref)
}
// NewImageDestination returns a types.ImageDestination for this reference.

View File

@@ -27,7 +27,7 @@ type dockerConfigFile struct {
}
const (
defaultPath = "/run/user"
defaultPath = "/run"
authCfg = "containers"
authCfgFileName = "auth.json"
dockerCfg = ".docker"
@@ -64,7 +64,11 @@ func GetAuthentication(ctx *types.SystemContext, registry string) (string, strin
}
dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyCfg)
paths := [3]string{getPathToAuth(ctx), filepath.Join(homedir.Get(), dockerCfg, dockerCfgFileName), dockerLegacyPath}
pathToAuth, err := getPathToAuth(ctx)
if err != nil {
return "", "", err
}
paths := [3]string{pathToAuth, filepath.Join(homedir.Get(), dockerCfg, dockerCfgFileName), dockerLegacyPath}
for _, path := range paths {
legacyFormat := path == dockerLegacyPath
@@ -82,13 +86,16 @@ func GetAuthentication(ctx *types.SystemContext, registry string) (string, strin
// GetUserLoggedIn returns the username logged in to registry from either
// auth.json or XDG_RUNTIME_DIR
// Used to tell the user if someone is logged in to the registry when logging in
func GetUserLoggedIn(ctx *types.SystemContext, registry string) string {
path := getPathToAuth(ctx)
func GetUserLoggedIn(ctx *types.SystemContext, registry string) (string, error) {
path, err := getPathToAuth(ctx)
if err != nil {
return "", err
}
username, _, _ := findAuthentication(registry, path, false)
if username != "" {
return username
return username, nil
}
return ""
return "", nil
}
// RemoveAuthentication deletes the credentials stored in auth.json
@@ -123,20 +130,30 @@ func RemoveAllAuthentication(ctx *types.SystemContext) error {
// The path can be overriden by the user if the overwrite-path flag is set
// If the flag is not set and XDG_RUNTIME_DIR is ser, the auth.json file is saved in XDG_RUNTIME_DIR/containers
// Otherwise, the auth.json file is stored in /run/user/UID/containers
func getPathToAuth(ctx *types.SystemContext) string {
func getPathToAuth(ctx *types.SystemContext) (string, error) {
if ctx != nil {
if ctx.AuthFilePath != "" {
return ctx.AuthFilePath
return ctx.AuthFilePath, nil
}
if ctx.RootForImplicitAbsolutePaths != "" {
return filepath.Join(ctx.RootForImplicitAbsolutePaths, defaultPath, strconv.Itoa(os.Getuid()), authCfg, authCfgFileName)
return filepath.Join(ctx.RootForImplicitAbsolutePaths, defaultPath, strconv.Itoa(os.Getuid()), authCfg, authCfgFileName), nil
}
}
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
runtimeDir = filepath.Join(defaultPath, strconv.Itoa(os.Getuid()))
if runtimeDir != "" {
_, err := os.Stat(runtimeDir)
if os.IsNotExist(err) {
// This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory
// or made a typo while setting the environment variable
// so we log the error and return an empty string as the path
return "", errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir)
}
runtimeDir = filepath.Join(runtimeDir, authCfg)
} else {
runtimeDir = filepath.Join(defaultPath, authCfg, strconv.Itoa(os.Getuid()))
}
return filepath.Join(runtimeDir, authCfg, authCfgFileName)
return filepath.Join(runtimeDir, authCfgFileName), nil
}
// readJSONFile unmarshals the authentications stored in the auth.json file and returns it
@@ -167,10 +184,14 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
// modifyJSON writes to auth.json if the dockerConfigFile has been updated
func modifyJSON(ctx *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error {
path := getPathToAuth(ctx)
path, err := getPathToAuth(ctx)
if err != nil {
return err
}
dir := filepath.Dir(path)
if _, err := os.Stat(dir); os.IsNotExist(err) {
if err = os.Mkdir(dir, 0700); err != nil {
if err = os.MkdirAll(dir, 0700); err != nil {
return errors.Wrapf(err, "error creating directory %q", dir)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -8,6 +8,7 @@ import (
"github.com/containers/image/docker/reference"
"github.com/containers/image/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
@@ -20,9 +21,11 @@ type storageReference struct {
reference string
id string
name reference.Named
tag string
digest digest.Digest
}
func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference {
func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference {
// We take a copy of the transport, which contains a pointer to the
// store that it used for resolving this reference, so that the
// transport that we'll return from Transport() won't be affected by
@@ -32,6 +35,8 @@ func newReference(transport storageTransport, reference, id string, name referen
reference: reference,
id: id,
name: name,
tag: tag,
digest: digest,
}
}
@@ -39,25 +44,49 @@ func newReference(transport storageTransport, reference, id string, name referen
// one present with the same name or ID, and return the image.
func (s *storageReference) resolveImage() (*storage.Image, error) {
if s.id == "" {
// Look for an image that has the expanded reference name as an explicit Name value.
image, err := s.transport.store.Image(s.reference)
if image != nil && err == nil {
s.id = image.ID
}
}
if s.id == "" && s.name != nil && s.digest != "" {
// Look for an image with the specified digest that has the same name,
// though possibly with a different tag or digest, as a Name value, so
// that the canonical reference can be implicitly resolved to the image.
images, err := s.transport.store.ImagesByDigest(s.digest)
if images != nil && err == nil {
repo := reference.FamiliarName(reference.TrimNamed(s.name))
search:
for _, image := range images {
for _, name := range image.Names {
if named, err := reference.ParseNormalizedNamed(name); err == nil {
if reference.FamiliarName(reference.TrimNamed(named)) == repo {
s.id = image.ID
break search
}
}
}
}
}
}
if s.id == "" {
logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport())
return nil, ErrNoSuchImage
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport())
}
img, err := s.transport.store.Image(s.id)
if err != nil {
return nil, errors.Wrapf(err, "error reading image %q", s.id)
}
if s.reference != "" {
if s.name != nil {
repo := reference.FamiliarName(reference.TrimNamed(s.name))
nameMatch := false
for _, name := range img.Names {
if name == s.reference {
nameMatch = true
break
if named, err := reference.ParseNormalizedNamed(name); err == nil {
if reference.FamiliarName(reference.TrimNamed(named)) == repo {
nameMatch = true
break
}
}
}
if !nameMatch {
@@ -78,8 +107,21 @@ func (s storageReference) Transport() types.ImageTransport {
}
}
// Return a name with a tag, if we have a name to base them on.
// Return a name with a tag or digest, if we have either, else return it bare.
func (s storageReference) DockerReference() reference.Named {
if s.name == nil {
return nil
}
if s.tag != "" {
if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil {
return namedTagged
}
}
if s.digest != "" {
if canonical, err := reference.WithDigest(s.name, s.digest); err == nil {
return canonical
}
}
return s.name
}
@@ -93,7 +135,7 @@ func (s storageReference) StringWithinTransport() string {
optionsList = ":" + strings.Join(options, ",")
}
storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]"
if s.name == nil {
if s.reference == "" {
return storeSpec + "@" + s.id
}
if s.id == "" {
@@ -122,11 +164,8 @@ func (s storageReference) PolicyConfigurationNamespaces() []string {
driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]"
namespaces := []string{}
if s.name != nil {
if s.id != "" {
// The reference without the ID is also a valid namespace.
namespaces = append(namespaces, storeSpec+s.reference)
}
components := strings.Split(s.name.Name(), "/")
name := reference.TrimNamed(s.name)
components := strings.Split(name.String(), "/")
for len(components) > 0 {
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
components = components[:len(components)-1]
@@ -166,5 +205,5 @@ func (s storageReference) NewImageSource(ctx *types.SystemContext) (types.ImageS
}
func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(s)
return newImageDestination(ctx, s)
}

View File

@@ -13,11 +13,14 @@ import (
"github.com/containers/image/types"
"github.com/containers/storage"
"github.com/containers/storage/pkg/idtools"
"github.com/opencontainers/go-digest"
ddigest "github.com/opencontainers/go-digest"
digest "github.com/opencontainers/go-digest"
"github.com/sirupsen/logrus"
)
const (
minimumTruncatedIDLength = 3
)
func init() {
transports.Register(Transport)
}
@@ -103,69 +106,133 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
// relative to the given store, and returns it in a reference object.
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
var name reference.Named
var sum digest.Digest
var err error
if ref == "" {
return nil, ErrInvalidReference
return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference")
}
if ref[0] == '[' {
// Ignore the store specifier.
closeIndex := strings.IndexRune(ref, ']')
if closeIndex < 1 {
return nil, ErrInvalidReference
return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref)
}
ref = ref[closeIndex+1:]
}
refInfo := strings.SplitN(ref, "@", 2)
if len(refInfo) == 1 {
// A name.
name, err = reference.ParseNormalizedNamed(refInfo[0])
if err != nil {
return nil, err
// The last segment, if there's more than one, is either a digest from a reference, or an image ID.
split := strings.LastIndex(ref, "@")
idOrDigest := ""
if split != -1 {
// Peel off that last bit so that we can work on the rest.
idOrDigest = ref[split+1:]
if idOrDigest == "" {
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
}
} else if len(refInfo) == 2 {
// An ID, possibly preceded by a name.
if refInfo[0] != "" {
name, err = reference.ParseNormalizedNamed(refInfo[0])
if err != nil {
return nil, err
}
}
sum, err = digest.Parse(refInfo[1])
if err != nil || sum.Validate() != nil {
sum, err = digest.Parse("sha256:" + refInfo[1])
if err != nil || sum.Validate() != nil {
return nil, err
}
}
} else { // Coverage: len(refInfo) is always 1 or 2
// Anything else: store specified in a form we don't
// recognize.
return nil, ErrInvalidReference
ref = ref[:split]
}
// The middle segment (now the last segment), if there is one, is a digest.
split = strings.LastIndex(ref, "@")
sum := digest.Digest("")
if split != -1 {
sum = digest.Digest(ref[split+1:])
if sum == "" {
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
}
ref = ref[:split]
}
// If we have something that unambiguously should be a digest, validate it, and then the third part,
// if we have one, as an ID.
id := ""
if sum != "" {
if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil {
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest)
}
if err := sum.Validate(); err != nil {
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum)
}
id = idOrDigest
if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) {
// The ID is a truncated version of the ID of an image that's present in local storage,
// so we might as well use the expanded value.
id = img.ID
}
} else if idOrDigest != "" {
// There was no middle portion, so the final portion could be either a digest or an ID.
if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil {
// It's an ID.
id = idOrDigest
} else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil {
// It's a digest.
sum = idSum
} else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) {
// It's a truncated version of the ID of an image that's present in local storage,
// and we may need the expanded value.
id = img.ID
} else {
return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest)
}
}
// If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's
// at least of what we guess is a reasonable minimum length, because we don't want a really short value
// like "a" matching an image by ID prefix when the input was actually meant to specify an image name.
if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" {
if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) {
// It's a truncated version of the ID of an image that's present in local storage;
// we need to expand it.
id = img.ID
ref = ""
}
}
// The initial portion is probably a name, possibly with a tag.
if ref != "" {
var err error
if name, err = reference.ParseNormalizedNamed(ref); err != nil {
return nil, errors.Wrapf(err, "error parsing named reference %q", ref)
}
}
if name == nil && sum == "" && id == "" {
return nil, errors.Errorf("error parsing reference")
}
// Construct a copy of the store spec.
optionsList := ""
options := store.GraphOptions()
if len(options) > 0 {
optionsList = ":" + strings.Join(options, ",")
}
storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]"
id := ""
if sum.Validate() == nil {
id = sum.Hex()
}
// Convert the name back into a reference string, if we got a name.
refname := ""
tag := ""
if name != nil {
name = reference.TagNameOnly(name)
refname = verboseName(name)
if sum.Validate() == nil {
canonical, err := reference.WithDigest(name, sum)
if err != nil {
return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum)
}
refname = verboseName(canonical)
} else {
name = reference.TagNameOnly(name)
tagged, ok := name.(reference.Tagged)
if !ok {
return nil, errors.Errorf("error parsing possibly-tagless name %q", ref)
}
refname = verboseName(name)
tag = tagged.Tag()
}
}
if refname == "" {
logrus.Debugf("parsed reference into %q", storeSpec+"@"+id)
logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id)
} else if id == "" {
logrus.Debugf("parsed reference into %q", storeSpec+refname)
logrus.Debugf("parsed reference to refname into %q", storeSpec+refname)
} else {
logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id)
logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id)
}
return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil
return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil
}
func (s *storageTransport) GetStore() (storage.Store, error) {
@@ -184,11 +251,14 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
return s.store, nil
}
// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"),
// ParseReference takes a name and a tag or digest and/or ID
// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"),
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or
// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]",
// tries to figure out which it is, and returns it in a reference object.
// If _id_ is the ID of an image that's present in local storage, it can be truncated, and
// even be specified as if it were a _name_, value.
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
var store storage.Store
// Check if there's a store location prefix. If there is, then it
@@ -267,17 +337,23 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
dref := ref.DockerReference()
if dref == nil {
if sref, ok := ref.(*storageReference); ok {
if sref.id != "" {
if img, err := store.Image(sref.id); err == nil {
return img, nil
}
if dref != nil {
if img, err := store.Image(verboseName(dref)); err == nil {
return img, nil
}
}
if sref, ok := ref.(*storageReference); ok {
if sref.id != "" {
if img, err := store.Image(sref.id); err == nil {
return img, nil
}
}
return nil, ErrInvalidReference
tmpRef := *sref
if img, err := tmpRef.resolveImage(); err == nil {
return img, nil
}
}
return store.Image(verboseName(dref))
return nil, storage.ErrImageUnknown
}
func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
@@ -337,7 +413,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
if err != nil {
return err
}
_, err = ddigest.Parse("sha256:" + scopeInfo[1])
_, err = digest.Parse("sha256:" + scopeInfo[1])
if err != nil {
return err
}
@@ -347,11 +423,28 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
return nil
}
func verboseName(name reference.Named) string {
name = reference.TagNameOnly(name)
tag := ""
if tagged, ok := name.(reference.NamedTagged); ok {
tag = ":" + tagged.Tag()
func verboseName(r reference.Reference) string {
if r == nil {
return ""
}
return name.Name() + tag
named, isNamed := r.(reference.Named)
digested, isDigested := r.(reference.Digested)
tagged, isTagged := r.(reference.Tagged)
name := ""
tag := ""
sum := ""
if isNamed {
name = (reference.TrimNamed(named)).String()
}
if isTagged {
if tagged.Tag() != "" {
tag = ":" + tagged.Tag()
}
}
if isDigested {
if digested.Digest().Validate() == nil {
sum = "@" + digested.Digest().String()
}
}
return name + tag + sum
}

View File

@@ -89,5 +89,5 @@ func (r *tarballReference) DeleteImage(ctx *types.SystemContext) error {
}
func (r *tarballReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
return nil, fmt.Errorf("destination not implemented yet")
return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`)
}

View File

@@ -254,7 +254,7 @@ func (is *tarballImageSource) Reference() types.ImageReference {
return &is.reference
}
// UpdatedLayerInfos() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
func (*tarballImageSource) UpdatedLayerInfos() []types.BlobInfo {
return nil
// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified.
func (*tarballImageSource) LayerInfosForCopy() ([]types.BlobInfo, error) {
return nil, nil
}

View File

@@ -1,4 +1,4 @@
// +build !containers_image_ostree_stub
// +build !containers_image_ostree_stub,linux
package alltransports

View File

@@ -1,4 +1,4 @@
// +build containers_image_ostree_stub
// +build containers_image_ostree_stub !linux
package alltransports

View File

@@ -126,8 +126,25 @@ type ImageSource interface {
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
// (e.g. if the source never returns manifest lists).
GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error)
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest.
// The Digest field is guaranteed to be provided; Size may be -1.
// WARNING: The list may contain duplicates, and they are semantically relevant.
LayerInfosForCopy() ([]BlobInfo, error)
}
// LayerCompression indicates if layers must be compressed, decompressed or preserved
type LayerCompression int
const (
// PreserveOriginal indicates the layer must be preserved, ie
// no compression or decompression.
PreserveOriginal LayerCompression = iota
// Decompress indicates the layer must be decompressed
Decompress
// Compress indicates the layer must be compressed
Compress
)
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
//
// There is a specific required order for some of the calls:
@@ -150,8 +167,8 @@ type ImageDestination interface {
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
SupportsSignatures() error
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
ShouldCompressLayers() bool
// DesiredLayerCompression indicates the kind of compression to apply on layers
DesiredLayerCompression() LayerCompression
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
// uploaded to the image destination, true otherwise.
AcceptsForeignLayerURLs() bool
@@ -164,7 +181,7 @@ type ImageDestination interface {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error)
PutBlob(stream io.Reader, inputInfo BlobInfo, isConfig bool) (BlobInfo, error)
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
@@ -234,6 +251,10 @@ type Image interface {
// The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// WARNING: The list may contain duplicates, and they are semantically relevant.
LayerInfos() []BlobInfo
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest.
// The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
// WARNING: The list may contain duplicates, and they are semantically relevant.
LayerInfosForCopy() ([]BlobInfo, error)
// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
// It returns false if the manifest does not embed a Docker reference.
// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
@@ -284,7 +305,7 @@ type ManifestUpdateInformation struct {
// for other manifest types.
type ImageInspectInfo struct {
Tag string
Created time.Time
Created *time.Time
DockerVersion string
Labels map[string]string
Architecture string

View File

@@ -1,5 +1,5 @@
github.com/sirupsen/logrus v1.0.0
github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165
github.com/containers/storage master
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
@@ -36,4 +36,5 @@ github.com/tchap/go-patricia v2.2.6
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
github.com/pquerna/ffjson master

View File

@@ -106,10 +106,25 @@ type containerStore struct {
byname map[string]*Container
}
func copyContainer(c *Container) *Container {
return &Container{
ID: c.ID,
Names: copyStringSlice(c.Names),
ImageID: c.ImageID,
LayerID: c.LayerID,
Metadata: c.Metadata,
BigDataNames: copyStringSlice(c.BigDataNames),
BigDataSizes: copyStringInt64Map(c.BigDataSizes),
BigDataDigests: copyStringDigestMap(c.BigDataDigests),
Created: c.Created,
Flags: copyStringInterfaceMap(c.Flags),
}
}
func (r *containerStore) Containers() ([]Container, error) {
containers := make([]Container, len(r.containers))
for i := range r.containers {
containers[i] = *(r.containers[i])
containers[i] = *copyContainer(r.containers[i])
}
return containers, nil
}
@@ -277,7 +292,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
}
err = r.Save()
}
return container, err
return copyContainer(container), err
}
func (r *containerStore) Metadata(id string) (string, error) {
@@ -355,7 +370,7 @@ func (r *containerStore) Delete(id string) error {
func (r *containerStore) Get(id string) (*Container, error) {
if container, ok := r.lookup(id); ok {
return container, nil
return copyContainer(container), nil
}
return nil, ErrContainerUnknown
}
@@ -444,7 +459,7 @@ func (r *containerStore) BigDataNames(id string) ([]string, error) {
if !ok {
return nil, ErrContainerUnknown
}
return c.BigDataNames, nil
return copyStringSlice(c.BigDataNames), nil
}
func (r *containerStore) SetBigData(id, key string, data []byte) error {

View File

@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: containers.go
// source: ./containers.go
package storage

View File

@@ -463,9 +463,9 @@ func (a *Driver) isParent(id, parent string) bool {
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (a *Driver) Diff(id, parent string) (io.ReadCloser, error) {
func (a *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) {
if !a.isParent(id, parent) {
return a.naiveDiff.Diff(id, parent)
return a.naiveDiff.Diff(id, parent, mountLabel)
}
// AUFS doesn't need the parent layer to produce a diff.
@@ -502,9 +502,9 @@ func (a *Driver) applyDiff(id string, diff io.Reader) error {
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
func (a *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) {
if !a.isParent(id, parent) {
return a.naiveDiff.DiffSize(id, parent)
return a.naiveDiff.DiffSize(id, parent, mountLabel)
}
// AUFS doesn't need the parent layer to calculate the diff size.
return directory.Size(path.Join(a.rootPath(), "diff", id))
@@ -513,9 +513,9 @@ func (a *Driver) DiffSize(id, parent string) (size int64, err error) {
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
func (a *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) {
if !a.isParent(id, parent) {
return a.naiveDiff.ApplyDiff(id, parent, diff)
return a.naiveDiff.ApplyDiff(id, parent, mountLabel, diff)
}
// AUFS doesn't need the parent id to apply the diff if it is the direct parent.
@@ -523,14 +523,14 @@ func (a *Driver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err e
return
}
return a.DiffSize(id, parent)
return a.DiffSize(id, parent, mountLabel)
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (a *Driver) Changes(id, parent string) ([]archive.Change, error) {
func (a *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) {
if !a.isParent(id, parent) {
return a.naiveDiff.Changes(id, parent)
return a.naiveDiff.Changes(id, parent, mountLabel)
}
// AUFS doesn't have snapshots, so we need to get changes from all parent

View File

@@ -92,19 +92,19 @@ type ProtoDriver interface {
type DiffDriver interface {
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
Diff(id, parent string) (io.ReadCloser, error)
Diff(id, parent, mountLabel string) (io.ReadCloser, error)
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
Changes(id, parent string) ([]archive.Change, error)
Changes(id, parent, mountLabel string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The io.Reader must be an uncompressed stream.
ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error)
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
DiffSize(id, parent string) (size int64, err error)
DiffSize(id, parent, mountLabel string) (size int64, err error)
}
// Driver is the interface for layered/snapshot file system drivers.

View File

@@ -31,10 +31,10 @@ type NaiveDiffDriver struct {
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
// Diff(id, parent string) (io.ReadCloser, error)
// Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff(id, parent string, diff io.Reader) (size int64, err error)
// DiffSize(id, parent string) (size int64, err error)
// Diff(id, parent, mountLabel string) (io.ReadCloser, error)
// Changes(id, parent, mountLabel string) ([]archive.Change, error)
// ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error)
// DiffSize(id, parent, mountLabel string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver {
return &NaiveDiffDriver{ProtoDriver: driver,
uidMaps: uidMaps,
@@ -43,11 +43,11 @@ func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Dr
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) {
func (gdw *NaiveDiffDriver) Diff(id, parent, mountLabel string) (arch io.ReadCloser, err error) {
startTime := time.Now()
driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "")
layerFs, err := driver.Get(id, mountLabel)
if err != nil {
return nil, err
}
@@ -70,7 +70,7 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
}), nil
}
parentFs, err := driver.Get(parent, "")
parentFs, err := driver.Get(parent, mountLabel)
if err != nil {
return nil, err
}
@@ -101,10 +101,10 @@ func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err err
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
func (gdw *NaiveDiffDriver) Changes(id, parent, mountLabel string) ([]archive.Change, error) {
driver := gdw.ProtoDriver
layerFs, err := driver.Get(id, "")
layerFs, err := driver.Get(id, mountLabel)
if err != nil {
return nil, err
}
@@ -113,7 +113,7 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error)
parentFs := ""
if parent != "" {
parentFs, err = driver.Get(parent, "")
parentFs, err = driver.Get(parent, mountLabel)
if err != nil {
return nil, err
}
@@ -126,11 +126,11 @@ func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) {
func (gdw *NaiveDiffDriver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) {
driver := gdw.ProtoDriver
// Mount the root filesystem so we can apply the diff/layer.
layerFs, err := driver.Get(id, "")
layerFs, err := driver.Get(id, mountLabel)
if err != nil {
return
}
@@ -151,15 +151,15 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size i
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) {
func (gdw *NaiveDiffDriver) DiffSize(id, parent, mountLabel string) (size int64, err error) {
driver := gdw.ProtoDriver
changes, err := gdw.Changes(id, parent)
changes, err := gdw.Changes(id, parent, mountLabel)
if err != nil {
return
}
layerFs, err := driver.Get(id, "")
layerFs, err := driver.Get(id, mountLabel)
if err != nil {
return
}

View File

@@ -3,7 +3,6 @@
package overlay
import (
"bufio"
"fmt"
"io"
"io/ioutil"
@@ -26,7 +25,6 @@ import (
"github.com/containers/storage/pkg/locker"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/parsers/kernel"
"github.com/containers/storage/pkg/system"
units "github.com/docker/go-units"
"github.com/opencontainers/selinux/go-selinux/label"
@@ -116,30 +114,14 @@ func init() {
}
// Init returns the a native diff driver for overlay filesystem.
// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned.
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
opts, err := parseOptions(options)
if err != nil {
return nil, err
}
if err := supportsOverlay(); err != nil {
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs")
}
// require kernel 4.0.0 to ensure multiple lower dirs are supported
v, err := kernel.GetKernelVersion()
if err != nil {
return nil, err
}
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 {
if !opts.overrideKernelCheck {
return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
}
logrus.Warn("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update")
}
fsMagic, err := graphdriver.GetFSMagic(home)
if err != nil {
return nil, err
@@ -153,40 +135,28 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
logrus.Errorf("'overlay' is not supported over %s", backingFs)
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs)
case graphdriver.FsMagicBtrfs:
// Support for OverlayFS on BTRFS was added in kernel 4.7
// See https://btrfs.wiki.kernel.org/index.php/Changelog
if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 {
if !opts.overrideKernelCheck {
logrus.Errorf("'overlay' requires kernel 4.7 to use on %s", backingFs)
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' requires kernel 4.7 to use on %s", backingFs)
}
logrus.Warn("Using pre-4.7.0 kernel for overlay on btrfs, may require kernel update")
}
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// Create the driver home dir
if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
if err := mount.MakePrivate(home); err != nil {
return nil, err
supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID)
if err != nil {
os.Remove(filepath.Join(home, linkDir))
os.Remove(home)
return nil, errors.Wrap(err, "kernel does not support overlay fs")
}
supportsDType, err := fsutils.SupportsDType(home)
if err != nil {
if err := mount.MakePrivate(home); err != nil {
return nil, err
}
if !supportsDType {
logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs))
// TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4
// return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs)
}
d := &Driver{
name: "overlay",
@@ -210,10 +180,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
}
} else if opts.quota.Size > 0 {
// if xfs is not the backing fs then error out if the storage-opt overlay.size is used.
return nil, fmt.Errorf("Storage Option overlay.size only supported for backingFS XFS. Found %v", backingFs)
return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs)
}
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported)
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v", backingFs, projectQuotaSupported, !useNaiveDiff(home))
return d, nil
}
@@ -264,25 +234,57 @@ func parseOptions(options []string) (*overlayOptions, error) {
return o, nil
}
func supportsOverlay() error {
// We can try to modprobe overlay first before looking at
// proc/filesystems for when overlay is supported
func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) {
// We can try to modprobe overlay first
exec.Command("modprobe", "overlay").Run()
f, err := os.Open("/proc/filesystems")
if err != nil {
return err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if s.Text() == "nodev\toverlay" {
return nil
layerDir, err := ioutil.TempDir(home, "compat")
if err == nil {
// Check if reading the directory's contents populates the d_type field, which is required
// for proper operation of the overlay filesystem.
supportsDType, err = fsutils.SupportsDType(layerDir)
if err != nil {
return false, err
}
if !supportsDType {
return false, overlayutils.ErrDTypeNotSupported("overlay", backingFs)
}
// Try a test mount in the specific location we're looking at using.
mergedDir := filepath.Join(layerDir, "merged")
lower1Dir := filepath.Join(layerDir, "lower1")
lower2Dir := filepath.Join(layerDir, "lower2")
defer func() {
// Permitted to fail, since the various subdirectories
// can be empty or not even there, and the home might
// legitimately be not empty
_ = unix.Unmount(mergedDir, unix.MNT_DETACH)
_ = os.RemoveAll(layerDir)
_ = os.Remove(home)
}()
_ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID)
_ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID)
_ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID)
flags := fmt.Sprintf("lowerdir=%s:%s", lower1Dir, lower2Dir)
if len(flags) < unix.Getpagesize() {
if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil {
logrus.Debugf("overlay test mount with multiple lowers succeeded")
return supportsDType, nil
}
}
flags = fmt.Sprintf("lowerdir=%s", lower1Dir)
if len(flags) < unix.Getpagesize() {
if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil {
logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower")
return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
}
}
logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home)
return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home)
}
logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.")
}
func useNaiveDiff(home string) bool {
@@ -424,11 +426,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return err
}
// if no parent directory, done
if parent == "" {
return nil
}
if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil {
return err
}
@@ -436,6 +433,11 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr
return err
}
// if no parent directory, create a dummy lower directory and skip writing a "lowers" file
if parent == "" {
return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID)
}
lower, err := d.getLower(parent)
if err != nil {
return err
@@ -556,11 +558,7 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
diffDir := path.Join(dir, "diff")
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
if err != nil {
// If no lower, just return diff directory
if os.IsNotExist(err) {
return diffDir, nil
}
if err != nil && !os.IsNotExist(err) {
return "", err
}
@@ -588,6 +586,10 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) {
newlowers = newlowers + ":" + lower
}
}
if len(lowers) == 0 {
newlowers = path.Join(dir, "empty")
lowers = []byte(newlowers)
}
mergedDir := path.Join(dir, "merged")
if count := d.ctr.Increment(mergedDir); count > 1 {
@@ -658,11 +660,7 @@ func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil {
// If no lower, we used the diff directory, so no work to do
if os.IsNotExist(err) {
return nil
}
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
return err
}
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
@@ -699,9 +697,9 @@ func (d *Driver) isParent(id, parent string) bool {
}
// ApplyDiff applies the new layer into a root
func (d *Driver) ApplyDiff(id string, parent string, diff io.Reader) (size int64, err error) {
func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) {
if !d.isParent(id, parent) {
return d.naiveDiff.ApplyDiff(id, parent, diff)
return d.naiveDiff.ApplyDiff(id, parent, mountLabel, diff)
}
applyDir := d.getDiffPath(id)
@@ -728,18 +726,23 @@ func (d *Driver) getDiffPath(id string) string {
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
func (d *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) {
if useNaiveDiff(d.home) || !d.isParent(id, parent) {
return d.naiveDiff.DiffSize(id, parent)
return d.naiveDiff.DiffSize(id, parent, mountLabel)
}
return directory.Size(d.getDiffPath(id))
}
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
func (d *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) {
if useNaiveDiff(d.home) || !d.isParent(id, parent) {
return d.naiveDiff.Diff(id, parent)
return d.naiveDiff.Diff(id, parent, mountLabel)
}
lowerDirs, err := d.getLowerDirs(id)
if err != nil {
return nil, err
}
diffPath := d.getDiffPath(id)
@@ -749,14 +752,15 @@ func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
UIDMaps: d.uidMaps,
GIDMaps: d.gidMaps,
WhiteoutFormat: archive.OverlayWhiteoutFormat,
WhiteoutData: lowerDirs,
})
}
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) {
if useNaiveDiff(d.home) || !d.isParent(id, parent) {
return d.naiveDiff.Changes(id, parent)
return d.naiveDiff.Changes(id, parent, mountLabel)
}
// Overlay doesn't have snapshots, so we need to get changes from all parent
// layers.

View File

@@ -3,8 +3,10 @@
package overlayutils
import (
"errors"
"fmt"
"github.com/containers/storage/drivers"
"github.com/pkg/errors"
)
// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type.
@@ -14,5 +16,5 @@ func ErrDTypeNotSupported(driver, backingFs string) error {
msg += " Reformat the filesystem with ftype=1 to enable d_type support."
}
msg += " Running without d_type is not supported."
return errors.New(msg)
return errors.Wrap(graphdriver.ErrNotSupported, msg)
}

View File

@@ -472,7 +472,7 @@ func (d *Driver) Cleanup() error {
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
// The layer should be mounted when calling this function
func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
func (d *Driver) Diff(id, parent, mountLabel string) (_ io.ReadCloser, err error) {
panicIfUsedByLcow()
rID, err := d.resolveID(id)
if err != nil {
@@ -509,7 +509,7 @@ func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
// The layer should not be mounted when calling this function.
func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) {
panicIfUsedByLcow()
rID, err := d.resolveID(id)
if err != nil {
@@ -565,7 +565,7 @@ func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
// The layer should not be mounted when calling this function
func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (int64, error) {
panicIfUsedByLcow()
var layerChain []string
if parent != "" {
@@ -600,14 +600,14 @@ func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
// DiffSize calculates the changes between the specified layer
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
func (d *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) {
panicIfUsedByLcow()
rPId, err := d.resolveID(parent)
if err != nil {
return
}
changes, err := d.Changes(id, rPId)
changes, err := d.Changes(id, rPId, mountLabel)
if err != nil {
return
}

View File

@@ -27,6 +27,9 @@ type Image struct {
// value which was generated by the library.
ID string `json:"id"`
// Digest is a digest value that we can use to locate the image.
Digest digest.Digest `json:"digest,omitempty"`
// Names is an optional set of user-defined convenience values. The
// image can be referred to by its ID or any of its names. Names are
// unique among images.
@@ -98,7 +101,7 @@ type ImageStore interface {
// Create creates an image that has a specified ID (or a random one) and
// optional names, using the specified layer as its topmost (hopefully
// read-only) layer. That layer can be referenced by multiple images.
Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error)
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
// SetNames replaces the list of names associated with an image with the
// supplied values.
@@ -121,10 +124,25 @@ type imageStore struct {
bydigest map[digest.Digest][]*Image
}
func copyImage(i *Image) *Image {
return &Image{
ID: i.ID,
Digest: i.Digest,
Names: copyStringSlice(i.Names),
TopLayer: i.TopLayer,
Metadata: i.Metadata,
BigDataNames: copyStringSlice(i.BigDataNames),
BigDataSizes: copyStringInt64Map(i.BigDataSizes),
BigDataDigests: copyStringDigestMap(i.BigDataDigests),
Created: i.Created,
Flags: copyStringInterfaceMap(i.Flags),
}
}
func (r *imageStore) Images() ([]Image, error) {
images := make([]Image, len(r.images))
for i := range r.images {
images[i] = *(r.images[i])
images[i] = *copyImage(r.images[i])
}
return images, nil
}
@@ -165,9 +183,16 @@ func (r *imageStore) Load() error {
}
names[name] = images[n]
}
// Implicit digest
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
digests[digest] = append(digests[digest], images[n])
}
// Explicit digest
if image.Digest == "" {
image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
} else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
digests[image.Digest] = append(digests[image.Digest], images[n])
}
}
}
if shouldSave && !r.IsReadWrite() {
@@ -284,7 +309,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error {
return r.Save()
}
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) {
func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) {
if !r.IsReadWrite() {
return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath())
}
@@ -311,6 +336,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
if err == nil {
image = &Image{
ID: id,
Digest: searchableDigest,
Names: names,
TopLayer: layer,
Metadata: metadata,
@@ -323,12 +349,16 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
r.images = append(r.images, image)
r.idindex.Add(id)
r.byid[id] = image
if searchableDigest != "" {
list := r.bydigest[searchableDigest]
r.bydigest[searchableDigest] = append(list, image)
}
for _, name := range names {
r.byname[name] = image
}
err = r.Save()
}
return image, err
return copyImage(image), err
}
func (r *imageStore) Metadata(id string) (string, error) {
@@ -413,6 +443,17 @@ func (r *imageStore) Delete(id string) error {
}
}
}
if image.Digest != "" {
// remove the image's hard-coded digest from the digest-based index
if list, ok := r.bydigest[image.Digest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {
delete(r.bydigest, image.Digest)
} else {
r.bydigest[image.Digest] = prunedList
}
}
}
if err := r.Save(); err != nil {
return err
}
@@ -424,7 +465,7 @@ func (r *imageStore) Delete(id string) error {
func (r *imageStore) Get(id string) (*Image, error) {
if image, ok := r.lookup(id); ok {
return image, nil
return copyImage(image), nil
}
return nil, ErrImageUnknown
}
@@ -520,7 +561,7 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) {
if !ok {
return nil, ErrImageUnknown
}
return image.BigDataNames, nil
return copyStringSlice(image.BigDataNames), nil
}
func imageSliceWithoutValue(slice []*Image, value *Image) []*Image {
@@ -577,9 +618,10 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
save = true
}
if key == ImageDigestBigDataKey {
if oldDigest != "" && oldDigest != newDigest {
if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
// remove the image from the list of images in the digest-based
// index which corresponds to the old digest for this item
// index which corresponds to the old digest for this item, unless
// it's also the hard-coded digest
if list, ok := r.bydigest[oldDigest]; ok {
prunedList := imageSliceWithoutValue(list, image)
if len(prunedList) == 0 {

View File

@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: images.go
// source: ./images.go
package storage
@@ -38,6 +38,11 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error {
buf.WriteString(`{ "id":`)
fflib.WriteJsonString(buf, string(j.ID))
buf.WriteByte(',')
if len(j.Digest) != 0 {
buf.WriteString(`"digest":`)
fflib.WriteJsonString(buf, string(j.Digest))
buf.WriteByte(',')
}
if len(j.Names) != 0 {
buf.WriteString(`"names":`)
if j.Names != nil {
@@ -146,6 +151,8 @@ const (
ffjtImageID
ffjtImageDigest
ffjtImageNames
ffjtImageTopLayer
@@ -165,6 +172,8 @@ const (
var ffjKeyImageID = []byte("id")
var ffjKeyImageDigest = []byte("digest")
var ffjKeyImageNames = []byte("names")
var ffjKeyImageTopLayer = []byte("layer")
@@ -268,6 +277,14 @@ mainparse:
goto mainparse
}
case 'd':
if bytes.Equal(ffjKeyImageDigest, kn) {
currentKey = ffjtImageDigest
state = fflib.FFParse_want_colon
goto mainparse
}
case 'f':
if bytes.Equal(ffjKeyImageFlags, kn) {
@@ -358,6 +375,12 @@ mainparse:
goto mainparse
}
if fflib.EqualFoldRight(ffjKeyImageDigest, kn) {
currentKey = ffjtImageDigest
state = fflib.FFParse_want_colon
goto mainparse
}
if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) {
currentKey = ffjtImageID
state = fflib.FFParse_want_colon
@@ -384,6 +407,9 @@ mainparse:
case ffjtImageID:
goto handle_ID
case ffjtImageDigest:
goto handle_Digest
case ffjtImageNames:
goto handle_Names
@@ -448,6 +474,32 @@ handle_ID:
state = fflib.FFParse_after_value
goto mainparse
handle_Digest:
/* handler: j.Digest type=digest.Digest kind=string quoted=false*/
{
{
if tok != fflib.FFTok_string && tok != fflib.FFTok_null {
return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok))
}
}
if tok == fflib.FFTok_null {
} else {
outBuf := fs.Output.Bytes()
j.Digest = digest.Digest(string(outBuf))
}
}
state = fflib.FFParse_after_value
goto mainparse
handle_Names:
/* handler: j.Names type=[]string kind=slice quoted=false*/

View File

@@ -223,10 +223,29 @@ type layerStore struct {
byuncompressedsum map[digest.Digest][]string
}
func copyLayer(l *Layer) *Layer {
return &Layer{
ID: l.ID,
Names: copyStringSlice(l.Names),
Parent: l.Parent,
Metadata: l.Metadata,
MountLabel: l.MountLabel,
MountPoint: l.MountPoint,
MountCount: l.MountCount,
Created: l.Created,
CompressedDigest: l.CompressedDigest,
CompressedSize: l.CompressedSize,
UncompressedDigest: l.UncompressedDigest,
UncompressedSize: l.UncompressedSize,
CompressionType: l.CompressionType,
Flags: copyStringInterfaceMap(l.Flags),
}
}
func (r *layerStore) Layers() ([]Layer, error) {
layers := make([]Layer, len(r.layers))
for i := range r.layers {
layers[i] = *(r.layers[i])
layers[i] = *copyLayer(r.layers[i])
}
return layers, nil
}
@@ -558,7 +577,7 @@ func (r *layerStore) Put(id, parent string, names []string, mountLabel string, o
return nil, -1, err
}
}
return layer, size, err
return copyLayer(layer), size, err
}
func (r *layerStore) CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) {
@@ -731,7 +750,7 @@ func (r *layerStore) Exists(id string) bool {
func (r *layerStore) Get(id string) (*Layer, error) {
if layer, ok := r.lookup(id); ok {
return layer, nil
return copyLayer(layer), nil
}
return nil, ErrLayerUnknown
}
@@ -778,11 +797,11 @@ func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID st
}
func (r *layerStore) Changes(from, to string) ([]archive.Change, error) {
from, to, _, err := r.findParentAndLayer(from, to)
from, to, toLayer, err := r.findParentAndLayer(from, to)
if err != nil {
return nil, ErrLayerUnknown
}
return r.driver.Changes(to, from)
return r.driver.Changes(to, from, toLayer.MountLabel)
}
type simpleGetCloser struct {
@@ -855,7 +874,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
}
if from != toLayer.Parent {
diff, err := r.driver.Diff(to, from)
diff, err := r.driver.Diff(to, from, toLayer.MountLabel)
if err != nil {
return nil, err
}
@@ -867,7 +886,7 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
if !os.IsNotExist(err) {
return nil, err
}
diff, err := r.driver.Diff(to, from)
diff, err := r.driver.Diff(to, from, toLayer.MountLabel)
if err != nil {
return nil, err
}
@@ -906,11 +925,12 @@ func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser,
}
func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
from, to, _, err = r.findParentAndLayer(from, to)
var toLayer *Layer
from, to, toLayer, err = r.findParentAndLayer(from, to)
if err != nil {
return -1, ErrLayerUnknown
}
return r.driver.DiffSize(to, from)
return r.driver.DiffSize(to, from, toLayer.MountLabel)
}
func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
@@ -950,7 +970,7 @@ func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error
if err != nil {
return -1, err
}
size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, payload)
size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, layer.MountLabel, payload)
if err != nil {
return -1, err
}
@@ -1002,7 +1022,7 @@ func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Di
if !ok {
return nil, ErrLayerUnknown
}
layers = append(layers, *layer)
layers = append(layers, *copyLayer(layer))
}
return layers, nil
}

View File

@@ -1,5 +1,5 @@
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
// source: layers.go
// source: ./layers.go
package storage

View File

@@ -2,14 +2,11 @@ package storage
import (
"fmt"
"os"
"path/filepath"
"sync"
"time"
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
// A Locker represents a file lock where the file is used to cache an
@@ -33,16 +30,8 @@ type Locker interface {
IsReadWrite() bool
}
type lockfile struct {
mu sync.Mutex
file string
fd uintptr
lw string
locktype int16
}
var (
lockfiles map[string]*lockfile
lockfiles map[string]Locker
lockfilesLock sync.Mutex
)
@@ -52,7 +41,7 @@ func GetLockfile(path string) (Locker, error) {
lockfilesLock.Lock()
defer lockfilesLock.Unlock()
if lockfiles == nil {
lockfiles = make(map[string]*lockfile)
lockfiles = make(map[string]Locker)
}
cleanPath := filepath.Clean(path)
if locker, ok := lockfiles[cleanPath]; ok {
@@ -61,12 +50,10 @@ func GetLockfile(path string) (Locker, error) {
}
return locker, nil
}
fd, err := unix.Open(cleanPath, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
locker, err := getLockFile(path, false) // platform dependent locker
if err != nil {
return nil, errors.Wrapf(err, "error opening %q", cleanPath)
return nil, err
}
unix.CloseOnExec(fd)
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK}
lockfiles[filepath.Clean(path)] = locker
return locker, nil
}
@@ -77,7 +64,7 @@ func GetROLockfile(path string) (Locker, error) {
lockfilesLock.Lock()
defer lockfilesLock.Unlock()
if lockfiles == nil {
lockfiles = make(map[string]*lockfile)
lockfiles = make(map[string]Locker)
}
cleanPath := filepath.Clean(path)
if locker, ok := lockfiles[cleanPath]; ok {
@@ -86,99 +73,10 @@ func GetROLockfile(path string) (Locker, error) {
}
return locker, nil
}
fd, err := unix.Open(cleanPath, os.O_RDONLY, 0)
locker, err := getLockFile(path, true) // platform dependent locker
if err != nil {
return nil, errors.Wrapf(err, "error opening %q", cleanPath)
return nil, err
}
unix.CloseOnExec(fd)
locker := &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK}
lockfiles[filepath.Clean(path)] = locker
return locker, nil
}
// Lock locks the lock file
func (l *lockfile) Lock() {
lk := unix.Flock_t{
Type: l.locktype,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
l.mu.Lock()
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
// Unlock unlocks the lock file
func (l *lockfile) Unlock() {
lk := unix.Flock_t{
Type: unix.F_UNLCK,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
l.mu.Unlock()
}
// Touch updates the lock file with the UID of the user
func (l *lockfile) Touch() error {
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return err
}
n, err := unix.Write(int(l.fd), id)
if err != nil {
return err
}
if n != len(id) {
return unix.ENOSPC
}
err = unix.Fsync(int(l.fd))
if err != nil {
return err
}
return nil
}
// Modified indicates if the lock file has been updated since the last time it was loaded
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return true, err
}
n, err := unix.Read(int(l.fd), id)
if err != nil {
return true, err
}
if n != len(id) {
return true, unix.ENOSPC
}
lw := l.lw
l.lw = string(id)
return l.lw != lw, nil
}
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
if err != nil {
return true
}
touched := time.Unix(statTMtimeUnix(st))
return when.Before(touched)
}
// IsRWLock indicates if the lock file is a read-write lock
func (l *lockfile) IsReadWrite() bool {
return (l.locktype == unix.F_WRLCK)
}

View File

@@ -0,0 +1,19 @@
// +build darwin freebsd
package storage
import (
"time"
"golang.org/x/sys/unix"
)
func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
if err != nil {
return true
}
touched := time.Unix(st.Mtimespec.Unix())
return when.Before(touched)
}

20
vendor/github.com/containers/storage/lockfile_linux.go generated vendored Normal file
View File

@@ -0,0 +1,20 @@
// +build linux solaris
package storage
import (
"time"
"golang.org/x/sys/unix"
)
// TouchedSince indicates if the lock file has been touched since the specified time
func (l *lockfile) TouchedSince(when time.Time) bool {
st := unix.Stat_t{}
err := unix.Fstat(int(l.fd), &st)
if err != nil {
return true
}
touched := time.Unix(st.Mtim.Unix())
return when.Before(touched)
}

115
vendor/github.com/containers/storage/lockfile_unix.go generated vendored Normal file
View File

@@ -0,0 +1,115 @@
// +build linux solaris darwin freebsd
package storage
import (
"os"
"sync"
"time"
"github.com/containers/storage/pkg/stringid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func getLockFile(path string, ro bool) (Locker, error) {
var fd int
var err error
if ro {
fd, err = unix.Open(path, os.O_RDONLY, 0)
} else {
fd, err = unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR)
}
if err != nil {
return nil, errors.Wrapf(err, "error opening %q", path)
}
unix.CloseOnExec(fd)
if ro {
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK}, nil
}
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK}, nil
}
type lockfile struct {
mu sync.Mutex
file string
fd uintptr
lw string
locktype int16
}
// Lock locks the lock file
func (l *lockfile) Lock() {
lk := unix.Flock_t{
Type: l.locktype,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
l.mu.Lock()
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
}
// Unlock unlocks the lock file
func (l *lockfile) Unlock() {
lk := unix.Flock_t{
Type: unix.F_UNLCK,
Whence: int16(os.SEEK_SET),
Start: 0,
Len: 0,
Pid: int32(os.Getpid()),
}
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
time.Sleep(10 * time.Millisecond)
}
l.mu.Unlock()
}
// Touch updates the lock file with the UID of the user
func (l *lockfile) Touch() error {
l.lw = stringid.GenerateRandomID()
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return err
}
n, err := unix.Write(int(l.fd), id)
if err != nil {
return err
}
if n != len(id) {
return unix.ENOSPC
}
err = unix.Fsync(int(l.fd))
if err != nil {
return err
}
return nil
}
// Modified indicates if the lock file has been updated since the last time it was loaded
func (l *lockfile) Modified() (bool, error) {
id := []byte(l.lw)
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
if err != nil {
return true, err
}
n, err := unix.Read(int(l.fd), id)
if err != nil {
return true, err
}
if n != len(id) {
return true, unix.ENOSPC
}
lw := l.lw
l.lw = string(id)
return l.lw != lw, nil
}
// IsRWLock indicates if the lock file is a read-write lock
func (l *lockfile) IsReadWrite() bool {
return (l.locktype == unix.F_WRLCK)
}

View File

@@ -0,0 +1,40 @@
// +build windows
package storage
import (
"os"
"sync"
"time"
)
func getLockFile(path string, ro bool) (Locker, error) {
return &lockfile{}, nil
}
type lockfile struct {
mu sync.Mutex
file string
}
func (l *lockfile) Lock() {
}
func (l *lockfile) Unlock() {
}
func (l *lockfile) Modified() (bool, error) {
return false, nil
}
func (l *lockfile) Touch() error {
return nil
}
func (l *lockfile) IsReadWrite() bool {
return false
}
func (l *lockfile) TouchedSince(when time.Time) bool {
stat, err := os.Stat(l.file)
if err != nil {
return true
}
return when.Before(stat.ModTime())
}

View File

@@ -45,6 +45,10 @@ type (
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat WhiteoutFormat
// This is additional data to be used by the converter. It will
// not survive a round trip through JSON, so it's primarily
// intended for generating archives (i.e., converting writes).
WhiteoutData interface{}
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
@@ -702,7 +706,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
compressWriter,
options.ChownOpts,
)
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
defer func() {
// Make sure to check the error on Close.
@@ -860,7 +864,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
var dirs []*tar.Header
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
rootIDs := idMappings.RootPair()
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat, options.WhiteoutData)
// Iterate through the files in the archive.
loop:

View File

@@ -5,21 +5,27 @@ import (
"os"
"path/filepath"
"strings"
"syscall"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
func getWhiteoutConverter(format WhiteoutFormat, data interface{}) tarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
return overlayWhiteoutConverter{}
if rolayers, ok := data.([]string); ok && len(rolayers) > 0 {
return overlayWhiteoutConverter{rolayers: rolayers}
}
return overlayWhiteoutConverter{rolayers: nil}
}
return nil
}
type overlayWhiteoutConverter struct{}
type overlayWhiteoutConverter struct {
rolayers []string
}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
@@ -31,7 +37,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
// convert opaque dirs to AUFS format by writing an empty file with the whiteout prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return nil, err
@@ -40,20 +46,64 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
if hdr.Xattrs != nil {
delete(hdr.Xattrs, "trusted.overlay.opaque")
}
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
wo = &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
// If there are no lower layers, then it can't have been deleted in this layer.
if len(o.rolayers) == 0 {
return nil, nil
}
// At this point, we have a directory that's opaque. If it appears in one of the lower
// layers, then it was newly-created here, so it wasn't also deleted here.
for _, rolayer := range o.rolayers {
stat, statErr := os.Stat(filepath.Join(rolayer, hdr.Name))
if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) {
// Not sure what happened here.
return nil, statErr
}
if statErr == nil {
if stat.Mode()&os.ModeCharDevice != 0 {
// It's a whiteout for this directory, so it can't have been
// both deleted and recreated in the layer we're diffing.
s := stat.Sys().(*syscall.Stat_t)
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
return nil, nil
}
}
// It's not whiteout, so it was there in the older layer, so we need to
// add a whiteout for this item in this layer.
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
wo = &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
break
}
for dir := filepath.Dir(hdr.Name); dir != "" && dir != "." && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
// Check for whiteout for a parent directory in a parent layer.
stat, statErr := os.Stat(filepath.Join(rolayer, dir))
if statErr != nil && !os.IsNotExist(statErr) && !isENOTDIR(statErr) {
// Not sure what happened here.
return nil, statErr
}
if statErr == nil {
if stat.Mode()&os.ModeCharDevice != 0 {
// If it's whiteout for a parent directory, then the
// original directory wasn't inherited into this layer,
// so we don't need to emit whiteout for it.
s := stat.Sys().(*syscall.Stat_t)
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
return nil, nil
}
}
}
}
}
}
}

View File

@@ -2,6 +2,6 @@
package archive
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
func getWhiteoutConverter(format WhiteoutFormat, data interface{}) tarWhiteoutConverter {
return nil
}

View File

@@ -81,7 +81,7 @@ func sameFsTimeSpec(a, b syscall.Timespec) bool {
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip, aufsWhiteoutPresent)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
@@ -104,10 +104,35 @@ func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
return "", nil
}
func aufsWhiteoutPresent(root, path string) (bool, error) {
f := filepath.Join(filepath.Dir(path), WhiteoutPrefix+filepath.Base(path))
_, err := os.Stat(filepath.Join(root, f))
if err == nil {
return true, nil
}
if os.IsNotExist(err) || isENOTDIR(err) {
return false, nil
}
return false, err
}
func isENOTDIR(err error) bool {
if err == nil {
return false
}
if perror, ok := err.(*os.PathError); ok {
if errno, ok := perror.Err.(syscall.Errno); ok {
return errno == syscall.ENOTDIR
}
}
return false
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
type whiteoutChange func(string, string) (bool, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
func changes(layers []string, rw string, dc deleteChange, sc skipChange, wc whiteoutChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
@@ -156,7 +181,28 @@ func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Chan
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
layerScan:
for _, layer := range layers {
if wc != nil {
// ...Unless a lower layer also had whiteout for this directory or one of its parents,
// in which case, it's new
ignore, err := wc(layer, path)
if err != nil {
return err
}
if ignore {
break layerScan
}
for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
ignore, err = wc(layer, dir)
if err != nil {
return err
}
if ignore {
break layerScan
}
}
}
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
@@ -187,10 +233,15 @@ func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Chan
}
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
changedDirs[parent] = struct{}{}
tail := []Change{}
for parent != "/" {
if _, ok := changedDirs[parent]; !ok && parent != "/" {
tail = append([]Change{{Path: parent, Kind: ChangeModify}}, tail...)
changedDirs[parent] = struct{}{}
}
parent = filepath.Dir(parent)
}
changes = append(changes, tail...)
}
// Record change

View File

@@ -288,26 +288,96 @@ func clen(n []byte) int {
// OverlayChanges walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func OverlayChanges(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, overlayDeletedFile, nil)
dc := func(root, path string, fi os.FileInfo) (string, error) {
return overlayDeletedFile(layers, root, path, fi)
}
return changes(layers, rw, dc, nil, overlayLowerContainsWhiteout)
}
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
func overlayLowerContainsWhiteout(root, path string) (bool, error) {
// Whiteout for a file or directory has the same name, but is for a character
// device with major/minor of 0/0.
stat, err := os.Stat(filepath.Join(root, path))
if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) {
// Not sure what happened here.
return false, err
}
if err == nil && stat.Mode()&os.ModeCharDevice != 0 {
// Check if there's whiteout for the specified item in the specified layer.
s := stat.Sys().(*syscall.Stat_t)
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
return true, nil
}
}
return false, nil
}
func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) {
// If it's a whiteout item, then a file or directory with that name is removed by this layer.
if fi.Mode()&os.ModeCharDevice != 0 {
s := fi.Sys().(*syscall.Stat_t)
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
return path, nil
}
}
if fi.Mode()&os.ModeDir != 0 {
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
if err != nil {
// After this we only need to pay attention to directories.
if !fi.IsDir() {
return "", nil
}
// If the directory isn't marked as opaque, then it's just a normal directory.
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
if err != nil {
return "", err
}
if len(opaque) != 1 || opaque[0] != 'y' {
return "", err
}
// If there are no lower layers, then it can't have been deleted and recreated in this layer.
if len(layers) == 0 {
return "", err
}
// At this point, we have a directory that's opaque. If it appears in one of the lower
// layers, then it was newly-created here, so it wasn't also deleted here.
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) {
// Not sure what happened here.
return "", err
}
if len(opaque) == 1 && opaque[0] == 'y' {
if err == nil {
if stat.Mode()&os.ModeCharDevice != 0 {
// It's a whiteout for this directory, so it can't have been
// deleted in this layer.
s := stat.Sys().(*syscall.Stat_t)
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
return "", nil
}
}
// It's not whiteout, so it was there in the older layer, so it has to be
// marked as deleted in this layer.
return path, nil
}
for dir := filepath.Dir(path); dir != "" && dir != string(os.PathSeparator); dir = filepath.Dir(dir) {
// Check for whiteout for a parent directory.
stat, err := os.Stat(filepath.Join(layer, dir))
if err != nil && !os.IsNotExist(err) && !isENOTDIR(err) {
// Not sure what happened here.
return "", err
}
if err == nil {
if stat.Mode()&os.ModeCharDevice != 0 {
// If it's whiteout for a parent directory, then the
// original directory wasn't inherited into the top layer.
s := stat.Sys().(*syscall.Stat_t)
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
return "", nil
}
}
}
}
}
// We didn't find the same path in any older layers, so it was new in this one.
return "", nil
}

View File

@@ -77,10 +77,7 @@ func (idx *TruncIndex) addID(id string) error {
func (idx *TruncIndex) Add(id string) error {
idx.Lock()
defer idx.Unlock()
if err := idx.addID(id); err != nil {
return err
}
return nil
return idx.addID(id)
}
// Delete removes an ID from the TruncIndex. If there are multiple IDs
@@ -128,8 +125,13 @@ func (idx *TruncIndex) Get(s string) (string, error) {
return "", ErrNotExist
}
// Iterate iterates over all stored IDs, and passes each of them to the given handler.
// Iterate iterates over all stored IDs and passes each of them to the given
// handler. Take care that the handler method does not call any public
// method on truncindex as the internal locking is not reentrant/recursive
// and will result in deadlock.
func (idx *TruncIndex) Iterate(handler func(id string)) {
idx.Lock()
defer idx.Unlock()
idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error {
handler(string(prefix))
return nil

View File

@@ -1,11 +0,0 @@
// +build linux solaris
package storage
import (
"golang.org/x/sys/unix"
)
func statTMtimeUnix(st unix.Stat_t) (int64, int64) {
return st.Mtim.Unix()
}

View File

@@ -1,11 +0,0 @@
// +build !linux,!solaris
package storage
import (
"golang.org/x/sys/unix"
)
func statTMtimeUnix(st unix.Stat_t) (int64, int64) {
return st.Mtimespec.Unix()
}

View File

@@ -434,6 +434,8 @@ type ImageOptions struct {
// CreationDate, if not zero, will override the default behavior of marking the image as having been
// created when CreateImage() was called, recording CreationDate instead.
CreationDate time.Time
// Digest is a hard-coded digest value that we can use to look up the image. It is optional.
Digest digest.Digest
}
// ContainerOptions is used for passing options to a Store's CreateContainer() method.
@@ -460,6 +462,12 @@ type store struct {
// GetStore attempts to find an already-created Store object matching the
// specified location and graph driver, and if it can't, it creates and
// initializes a new Store object, and the underlying storage that it controls.
//
// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used.
//
// These defaults observe environment variables:
// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use
// * `STORAGE_OPTS` for the string of options to pass to the driver
func GetStore(options StoreOptions) (Store, error) {
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
options = DefaultStoreOptions
@@ -491,11 +499,6 @@ func GetStore(options StoreOptions) (Store, error) {
if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
for _, subdir := range []string{} {
if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) {
return nil, err
}
}
if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
@@ -838,11 +841,11 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o
}
creationDate := time.Now().UTC()
if options != nil {
if options != nil && !options.CreationDate.IsZero() {
creationDate = options.CreationDate
}
return ristore.Create(id, names, layer, metadata, creationDate)
return ristore.Create(id, names, layer, metadata, creationDate, options.Digest)
}
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
@@ -2273,14 +2276,15 @@ func (s *store) Shutdown(force bool) ([]string, error) {
return mounted, err
}
s.graphLock.Lock()
defer s.graphLock.Unlock()
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
rlstore.Load()
}
s.graphLock.Lock()
defer s.graphLock.Unlock()
layers, err := rlstore.Layers()
if err != nil {
return mounted, err
@@ -2346,6 +2350,41 @@ func stringSliceWithoutValue(slice []string, value string) []string {
return modified
}
func copyStringSlice(slice []string) []string {
if len(slice) == 0 {
return nil
}
ret := make([]string, len(slice))
copy(ret, slice)
return ret
}
func copyStringInt64Map(m map[string]int64) map[string]int64 {
ret := make(map[string]int64, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
ret := make(map[string]digest.Digest, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
// copyStringInterfaceMap still forces us to assume that the interface{} is
// a non-pointer scalar value
func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
ret := make(map[string]interface{}, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
const configFile = "/etc/containers/storage.conf"
// OptionsConfig represents the "storage.options" TOML config table.

View File

@@ -16,7 +16,9 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
@@ -34,80 +36,49 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = ptrSize
offsetScalar = uintptr(0)
offsetFlag = ptrSize * 2
type flag uintptr
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = flagKindWidth - 1
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
var (
// flagRO indicates whether the value field of a reflect.Value
// is read-only.
flagRO flag
// flagAddr indicates whether the address of the reflect.Value's
// value may be taken.
flagAddr flag
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// flagKindMask holds the bits that make up the kind
// part of the flags field. In all the supported versions,
// it is in the lower 5 bits.
const flagKindMask = flag(0x1f)
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Different versions of Go have used different
// bit layouts for the flags type. This table
// records the known combinations.
var okFlags = []struct {
ro, addr flag
}{{
// From Go 1.4 to 1.5
ro: 1 << 5,
addr: 1 << 7,
}, {
// Up to Go tip.
ro: 1<<5 | 1<<6,
addr: 1 << 8,
}}
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
var flagValOffset = func() uintptr {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
return field.Offset
}()
// flagField returns a pointer to the flag field of a reflect.Value.
func flagField(v *reflect.Value) *flag {
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@@ -119,34 +90,56 @@ func init() {
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
func unsafeReflectValue(v reflect.Value) reflect.Value {
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
return v
}
flagFieldPtr := flagField(&v)
*flagFieldPtr &^= flagRO
*flagFieldPtr |= flagAddr
return v
}
// Sanity checks against future reflect package changes
// to the type or semantics of the Value.flag field.
func init() {
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
if !ok {
panic("reflect.Value has no flag field")
}
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
panic("reflect.Value flag field has changed kind")
}
type t0 int
var t struct {
A t0
// t0 will have flagEmbedRO set.
t0
// a will have flagStickyRO set
a t0
}
vA := reflect.ValueOf(t).FieldByName("A")
va := reflect.ValueOf(t).FieldByName("a")
vt0 := reflect.ValueOf(t).FieldByName("t0")
// Infer flagRO from the difference between the flags
// for the (otherwise identical) fields in t.
flagPublic := *flagField(&vA)
flagWithRO := *flagField(&va) | *flagField(&vt0)
flagRO = flagPublic ^ flagWithRO
// Infer flagAddr from the difference between a value
// taken from a pointer and not.
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
flagNoPtr := *flagField(&vA)
flagPtr := *flagField(&vPtrA)
flagAddr = flagNoPtr ^ flagPtr
// Check that the inferred flags tally with one of the known versions.
for _, f := range okFlags {
if flagRO == f.ro && flagAddr == f.addr {
return
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
panic("reflect.Value read-only flag has changed semantics")
}

View File

@@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
// +build js appengine safe disableunsafe !go1.4
package spew

View File

@@ -0,0 +1,13 @@
package metrics
import "github.com/docker/go-metrics"
const (
// NamespacePrefix is the namespace of prometheus metrics
NamespacePrefix = "registry"
)
var (
// StorageNamespace is the prometheus namespace of blob/cache related operations
StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil)
)

View File

@@ -45,13 +45,13 @@ type Manager interface {
// to a backend.
func NewSimpleManager() Manager {
return &simpleManager{
Challanges: make(map[string][]Challenge),
Challenges: make(map[string][]Challenge),
}
}
type simpleManager struct {
sync.RWMutex
Challanges map[string][]Challenge
Challenges map[string][]Challenge
}
func normalizeURL(endpoint *url.URL) {
@@ -64,7 +64,7 @@ func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
m.RLock()
defer m.RUnlock()
challenges := m.Challanges[endpoint.String()]
challenges := m.Challenges[endpoint.String()]
return challenges, nil
}
@@ -82,7 +82,7 @@ func (m *simpleManager) AddResponse(resp *http.Response) error {
m.Lock()
defer m.Unlock()
m.Challanges[urlCopy.String()] = challenges
m.Challenges[urlCopy.String()] = challenges
return nil
}

View File

@@ -205,13 +205,18 @@ type tags struct {
func (t *tags) All(ctx context.Context) ([]string, error) {
var tags []string
u, err := t.ub.BuildTagsURL(t.name)
listURLStr, err := t.ub.BuildTagsURL(t.name)
if err != nil {
return tags, err
}
listURL, err := url.Parse(listURLStr)
if err != nil {
return tags, err
}
for {
resp, err := t.client.Get(u)
resp, err := t.client.Get(listURL.String())
if err != nil {
return tags, err
}
@@ -231,7 +236,13 @@ func (t *tags) All(ctx context.Context) ([]string, error) {
}
tags = append(tags, tagsResponse.Tags...)
if link := resp.Header.Get("Link"); link != "" {
u = strings.Trim(strings.Split(link, ";")[0], "<>")
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
linkURL, err := url.Parse(linkURLStr)
if err != nil {
return tags, err
}
listURL = listURL.ResolveReference(linkURL)
} else {
return tags, nil
}

View File

@@ -4,6 +4,7 @@ import (
"context"
"github.com/docker/distribution"
prometheus "github.com/docker/distribution/metrics"
"github.com/opencontainers/go-digest"
)
@@ -38,6 +39,11 @@ type cachedBlobStatter struct {
tracker MetricsTracker
}
var (
// cacheCount is the number of total cache request received/hits/misses
cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
)
// NewCachedBlobStatter creates a new statter which prefers a cache and
// falls back to a backend.
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
@@ -58,6 +64,7 @@ func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, b
}
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
cacheCount.WithValues("Request").Inc(1)
desc, err := cbds.cache.Stat(ctx, dgst)
if err != nil {
if err != distribution.ErrBlobUnknown {
@@ -66,12 +73,13 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
goto fallback
}
cacheCount.WithValues("Hit").Inc(1)
if cbds.tracker != nil {
cbds.tracker.Hit()
}
return desc, nil
fallback:
cacheCount.WithValues("Miss").Inc(1)
if cbds.tracker != nil {
cbds.tracker.Miss()
}

View File

@@ -1,26 +1,32 @@
github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e
github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356
github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716
github.com/aws/aws-sdk-go 5bcc0a238d880469f949fc7cd24e35f32ab80cbd
github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85
github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a
github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6
github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870

View File

@@ -1,228 +0,0 @@
#include "osxkeychain_darwin.h"
#include <CoreFoundation/CoreFoundation.h>
#include <Foundation/NSValue.h>
#include <stdio.h>
#include <string.h>
char *get_error(OSStatus status) {
char *buf = malloc(128);
CFStringRef str = SecCopyErrorMessageString(status, NULL);
int success = CFStringGetCString(str, buf, 128, kCFStringEncodingUTF8);
if (!success) {
strncpy(buf, "Unknown error", 128);
}
return buf;
}
char *keychain_add(struct Server *server, char *label, char *username, char *secret) {
SecKeychainItemRef item;
OSStatus status = SecKeychainAddInternetPassword(
NULL,
strlen(server->host), server->host,
0, NULL,
strlen(username), username,
strlen(server->path), server->path,
server->port,
server->proto,
kSecAuthenticationTypeDefault,
strlen(secret), secret,
&item
);
if (status) {
return get_error(status);
}
SecKeychainAttribute attribute;
SecKeychainAttributeList attrs;
attribute.tag = kSecLabelItemAttr;
attribute.data = label;
attribute.length = strlen(label);
attrs.count = 1;
attrs.attr = &attribute;
status = SecKeychainItemModifyContent(item, &attrs, 0, NULL);
if (status) {
return get_error(status);
}
return NULL;
}
char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret) {
char *tmp;
SecKeychainItemRef item;
OSStatus status = SecKeychainFindInternetPassword(
NULL,
strlen(server->host), server->host,
0, NULL,
0, NULL,
strlen(server->path), server->path,
server->port,
server->proto,
kSecAuthenticationTypeDefault,
secret_l, (void **)&tmp,
&item);
if (status) {
return get_error(status);
}
*secret = strdup(tmp);
SecKeychainItemFreeContent(NULL, tmp);
SecKeychainAttributeList list;
SecKeychainAttribute attr;
list.count = 1;
list.attr = &attr;
attr.tag = kSecAccountItemAttr;
status = SecKeychainItemCopyContent(item, NULL, &list, NULL, NULL);
if (status) {
return get_error(status);
}
*username = strdup(attr.data);
*username_l = attr.length;
SecKeychainItemFreeContent(&list, NULL);
return NULL;
}
char *keychain_delete(struct Server *server) {
SecKeychainItemRef item;
OSStatus status = SecKeychainFindInternetPassword(
NULL,
strlen(server->host), server->host,
0, NULL,
0, NULL,
strlen(server->path), server->path,
server->port,
server->proto,
kSecAuthenticationTypeDefault,
0, NULL,
&item);
if (status) {
return get_error(status);
}
status = SecKeychainItemDelete(item);
if (status) {
return get_error(status);
}
return NULL;
}
char * CFStringToCharArr(CFStringRef aString) {
if (aString == NULL) {
return NULL;
}
CFIndex length = CFStringGetLength(aString);
CFIndex maxSize =
CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1;
char *buffer = (char *)malloc(maxSize);
if (CFStringGetCString(aString, buffer, maxSize,
kCFStringEncodingUTF8)) {
return buffer;
}
return NULL;
}
char *keychain_list(char *credsLabel, char *** paths, char *** accts, unsigned int *list_l) {
CFStringRef credsLabelCF = CFStringCreateWithCString(NULL, credsLabel, kCFStringEncodingUTF8);
CFMutableDictionaryRef query = CFDictionaryCreateMutable (NULL, 1, NULL, NULL);
CFDictionaryAddValue(query, kSecClass, kSecClassInternetPassword);
CFDictionaryAddValue(query, kSecReturnAttributes, kCFBooleanTrue);
CFDictionaryAddValue(query, kSecMatchLimit, kSecMatchLimitAll);
CFDictionaryAddValue(query, kSecAttrLabel, credsLabelCF);
//Use this query dictionary
CFTypeRef result= NULL;
OSStatus status = SecItemCopyMatching(
query,
&result);
CFRelease(credsLabelCF);
//Ran a search and store the results in result
if (status) {
return get_error(status);
}
CFIndex numKeys = CFArrayGetCount(result);
*paths = (char **) malloc((int)sizeof(char *)*numKeys);
*accts = (char **) malloc((int)sizeof(char *)*numKeys);
//result is of type CFArray
for(CFIndex i=0; i<numKeys; i++) {
CFDictionaryRef currKey = CFArrayGetValueAtIndex(result,i);
CFStringRef protocolTmp = CFDictionaryGetValue(currKey, CFSTR("ptcl"));
if (protocolTmp != NULL) {
CFStringRef protocolStr = CFStringCreateWithFormat(NULL, NULL, CFSTR("%@"), protocolTmp);
if (CFStringCompare(protocolStr, CFSTR("htps"), 0) == kCFCompareEqualTo) {
protocolTmp = CFSTR("https://");
}
else {
protocolTmp = CFSTR("http://");
}
CFRelease(protocolStr);
}
else {
char * path = "0";
char * acct = "0";
(*paths)[i] = (char *) malloc(sizeof(char)*(strlen(path)));
memcpy((*paths)[i], path, sizeof(char)*(strlen(path)));
(*accts)[i] = (char *) malloc(sizeof(char)*(strlen(acct)));
memcpy((*accts)[i], acct, sizeof(char)*(strlen(acct)));
continue;
}
CFMutableStringRef str = CFStringCreateMutableCopy(NULL, 0, protocolTmp);
CFStringRef serverTmp = CFDictionaryGetValue(currKey, CFSTR("srvr"));
if (serverTmp != NULL) {
CFStringAppend(str, serverTmp);
}
CFStringRef pathTmp = CFDictionaryGetValue(currKey, CFSTR("path"));
if (pathTmp != NULL) {
CFStringAppend(str, pathTmp);
}
const NSNumber * portTmp = CFDictionaryGetValue(currKey, CFSTR("port"));
if (portTmp != NULL && portTmp.integerValue != 0) {
CFStringRef portStr = CFStringCreateWithFormat(NULL, NULL, CFSTR("%@"), portTmp);
CFStringAppend(str, CFSTR(":"));
CFStringAppend(str, portStr);
CFRelease(portStr);
}
CFStringRef acctTmp = CFDictionaryGetValue(currKey, CFSTR("acct"));
if (acctTmp == NULL) {
acctTmp = CFSTR("account not defined");
}
char * path = CFStringToCharArr(str);
char * acct = CFStringToCharArr(acctTmp);
//We now have all we need, username and servername. Now export this to .go
(*paths)[i] = (char *) malloc(sizeof(char)*(strlen(path)+1));
memcpy((*paths)[i], path, sizeof(char)*(strlen(path)+1));
(*accts)[i] = (char *) malloc(sizeof(char)*(strlen(acct)+1));
memcpy((*accts)[i], acct, sizeof(char)*(strlen(acct)+1));
CFRelease(str);
}
*list_l = (int)numKeys;
return NULL;
}
void freeListData(char *** data, unsigned int length) {
for(int i=0; i<length; i++) {
free((*data)[i]);
}
free(*data);
}

View File

@@ -1,196 +0,0 @@
package osxkeychain
/*
#cgo CFLAGS: -x objective-c -mmacosx-version-min=10.10
#cgo LDFLAGS: -framework Security -framework Foundation -mmacosx-version-min=10.10
#include "osxkeychain_darwin.h"
#include <stdlib.h>
*/
import "C"
import (
"errors"
"net/url"
"strconv"
"strings"
"unsafe"
"github.com/docker/docker-credential-helpers/credentials"
)
// errCredentialsNotFound is the specific error message returned by OS X
// when the credentials are not in the keychain.
const errCredentialsNotFound = "The specified item could not be found in the keychain."
// Osxkeychain handles secrets using the OS X Keychain as store.
type Osxkeychain struct{}
// Add adds new credentials to the keychain.
func (h Osxkeychain) Add(creds *credentials.Credentials) error {
h.Delete(creds.ServerURL)
s, err := splitServer(creds.ServerURL)
if err != nil {
return err
}
defer freeServer(s)
label := C.CString(credentials.CredsLabel)
defer C.free(unsafe.Pointer(label))
username := C.CString(creds.Username)
defer C.free(unsafe.Pointer(username))
secret := C.CString(creds.Secret)
defer C.free(unsafe.Pointer(secret))
errMsg := C.keychain_add(s, label, username, secret)
if errMsg != nil {
defer C.free(unsafe.Pointer(errMsg))
return errors.New(C.GoString(errMsg))
}
return nil
}
// Delete removes credentials from the keychain.
func (h Osxkeychain) Delete(serverURL string) error {
s, err := splitServer(serverURL)
if err != nil {
return err
}
defer freeServer(s)
errMsg := C.keychain_delete(s)
if errMsg != nil {
defer C.free(unsafe.Pointer(errMsg))
return errors.New(C.GoString(errMsg))
}
return nil
}
// Get returns the username and secret to use for a given registry server URL.
func (h Osxkeychain) Get(serverURL string) (string, string, error) {
s, err := splitServer(serverURL)
if err != nil {
return "", "", err
}
defer freeServer(s)
var usernameLen C.uint
var username *C.char
var secretLen C.uint
var secret *C.char
defer C.free(unsafe.Pointer(username))
defer C.free(unsafe.Pointer(secret))
errMsg := C.keychain_get(s, &usernameLen, &username, &secretLen, &secret)
if errMsg != nil {
defer C.free(unsafe.Pointer(errMsg))
goMsg := C.GoString(errMsg)
if goMsg == errCredentialsNotFound {
return "", "", credentials.NewErrCredentialsNotFound()
}
return "", "", errors.New(goMsg)
}
user := C.GoStringN(username, C.int(usernameLen))
pass := C.GoStringN(secret, C.int(secretLen))
return user, pass, nil
}
// List returns the stored URLs and corresponding usernames.
func (h Osxkeychain) List() (map[string]string, error) {
credsLabelC := C.CString(credentials.CredsLabel)
defer C.free(unsafe.Pointer(credsLabelC))
var pathsC **C.char
defer C.free(unsafe.Pointer(pathsC))
var acctsC **C.char
defer C.free(unsafe.Pointer(acctsC))
var listLenC C.uint
errMsg := C.keychain_list(credsLabelC, &pathsC, &acctsC, &listLenC)
if errMsg != nil {
defer C.free(unsafe.Pointer(errMsg))
goMsg := C.GoString(errMsg)
return nil, errors.New(goMsg)
}
defer C.freeListData(&pathsC, listLenC)
defer C.freeListData(&acctsC, listLenC)
var listLen int
listLen = int(listLenC)
pathTmp := (*[1 << 30]*C.char)(unsafe.Pointer(pathsC))[:listLen:listLen]
acctTmp := (*[1 << 30]*C.char)(unsafe.Pointer(acctsC))[:listLen:listLen]
//taking the array of c strings into go while ignoring all the stuff irrelevant to credentials-helper
resp := make(map[string]string)
for i := 0; i < listLen; i++ {
if C.GoString(pathTmp[i]) == "0" {
continue
}
resp[C.GoString(pathTmp[i])] = C.GoString(acctTmp[i])
}
return resp, nil
}
func splitServer(serverURL string) (*C.struct_Server, error) {
u, err := parseURL(serverURL)
if err != nil {
return nil, err
}
proto := C.kSecProtocolTypeHTTPS
if u.Scheme == "http" {
proto = C.kSecProtocolTypeHTTP
}
var port int
p := getPort(u)
if p != "" {
port, err = strconv.Atoi(p)
if err != nil {
return nil, err
}
}
return &C.struct_Server{
proto: C.SecProtocolType(proto),
host: C.CString(getHostname(u)),
port: C.uint(port),
path: C.CString(u.Path),
}, nil
}
func freeServer(s *C.struct_Server) {
C.free(unsafe.Pointer(s.host))
C.free(unsafe.Pointer(s.path))
}
// parseURL parses and validates a given serverURL to an url.URL, and
// returns an error if validation failed. Querystring parameters are
// omitted in the resulting URL, because they are not used in the helper.
//
// If serverURL does not have a valid scheme, `//` is used as scheme
// before parsing. This prevents the hostname being used as path,
// and the credentials being stored without host.
func parseURL(serverURL string) (*url.URL, error) {
// Check if serverURL has a scheme, otherwise add `//` as scheme.
if !strings.Contains(serverURL, "://") && !strings.HasPrefix(serverURL, "//") {
serverURL = "//" + serverURL
}
u, err := url.Parse(serverURL)
if err != nil {
return nil, err
}
if u.Scheme != "" && u.Scheme != "https" && u.Scheme != "http" {
return nil, errors.New("unsupported scheme: " + u.Scheme)
}
if getHostname(u) == "" {
return nil, errors.New("no hostname in URL")
}
u.RawQuery = ""
return u, nil
}

View File

@@ -1,14 +0,0 @@
#include <Security/Security.h>
struct Server {
SecProtocolType proto;
char *host;
char *path;
unsigned int port;
};
char *keychain_add(struct Server *server, char *label, char *username, char *secret);
char *keychain_get(struct Server *server, unsigned int *username_l, char **username, unsigned int *secret_l, char **secret);
char *keychain_delete(struct Server *server);
char *keychain_list(char *credsLabel, char *** data, char *** accts, unsigned int *list_l);
void freeListData(char *** data, unsigned int length);

View File

@@ -1,13 +0,0 @@
//+build go1.8
package osxkeychain
import "net/url"
func getHostname(u *url.URL) string {
return u.Hostname()
}
func getPort(u *url.URL) string {
return u.Port()
}

Some files were not shown because too many files have changed in this diff Show More