Add skopeo registry mirror integration tests

- Update toml to latest release
- Update containers/image
- Add integration tests
- Add hidden `--registry-conf` flag used by the integration tests

Signed-off-by: Sascha Grunert <sgrunert@suse.com>
This commit is contained in:
Sascha Grunert 2019-03-14 12:38:29 +01:00
parent 2bdffc89c2
commit 6b5bdb7563
No known key found for this signature in database
GPG Key ID: 8CE029DD1A866E52
22 changed files with 1014 additions and 419 deletions

View File

@ -18,14 +18,15 @@ import (
var gitCommit = "" var gitCommit = ""
type globalOptions struct { type globalOptions struct {
debug bool // Enable debug output debug bool // Enable debug output
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:) tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
policyPath string // Path to a signature verification policy file policyPath string // Path to a signature verification policy file
insecurePolicy bool // Use an "allow everything" signature verification policy insecurePolicy bool // Use an "allow everything" signature verification policy
registriesDirPath string // Path to a "registries.d" registry configuratio directory registriesDirPath string // Path to a "registries.d" registry configuratio directory
overrideArch string // Architecture to use for choosing images, instead of the runtime one overrideArch string // Architecture to use for choosing images, instead of the runtime one
overrideOS string // OS to use for choosing images, instead of the runtime one overrideOS string // OS to use for choosing images, instead of the runtime one
commandTimeout time.Duration // Timeout for the command execution commandTimeout time.Duration // Timeout for the command execution
registriesConfPath string // Path to the "registries.conf" file
} }
// createApp returns a cli.App, and the underlying globalOptions object, to be run or tested. // createApp returns a cli.App, and the underlying globalOptions object, to be run or tested.
@ -83,6 +84,12 @@ func createApp() (*cli.App, *globalOptions) {
Usage: "timeout for the command execution", Usage: "timeout for the command execution",
Destination: &opts.commandTimeout, Destination: &opts.commandTimeout,
}, },
cli.StringFlag{
Name: "registries-conf",
Usage: "path to the registries.conf file",
Destination: &opts.registriesConfPath,
Hidden: true,
},
} }
app.Before = opts.before app.Before = opts.before
app.Commands = []cli.Command{ app.Commands = []cli.Command{

View File

@ -112,14 +112,15 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, c
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it. // It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) { func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
ctx := &types.SystemContext{ ctx := &types.SystemContext{
RegistriesDirPath: opts.global.registriesDirPath, RegistriesDirPath: opts.global.registriesDirPath,
ArchitectureChoice: opts.global.overrideArch, ArchitectureChoice: opts.global.overrideArch,
OSChoice: opts.global.overrideOS, OSChoice: opts.global.overrideOS,
DockerCertPath: opts.dockerCertPath, DockerCertPath: opts.dockerCertPath,
OCISharedBlobDirPath: opts.sharedBlobDir, OCISharedBlobDirPath: opts.sharedBlobDir,
AuthFilePath: opts.shared.authFilePath, AuthFilePath: opts.shared.authFilePath,
DockerDaemonHost: opts.dockerDaemonHost, DockerDaemonHost: opts.dockerDaemonHost,
DockerDaemonCertPath: opts.dockerCertPath, DockerDaemonCertPath: opts.dockerCertPath,
SystemRegistriesConfPath: opts.global.registriesConfPath,
} }
if opts.tlsVerify.present { if opts.tlsVerify.present {
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value

View File

@ -662,3 +662,37 @@ func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
mimeType := manifest.GuessMIMEType(manifestBlob) mimeType := manifest.GuessMIMEType(manifestBlob)
c.Assert(mimeType, check.Equals, expectedMIMEType) c.Assert(mimeType, check.Equals, expectedMIMEType)
} }
const regConfFixture = "./fixtures/registries.conf"
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {
dir, err := ioutil.TempDir("", "copy-mirror")
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
"docker://mirror.invalid/busybox", "dir:"+dir)
}
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) {
dir, err := ioutil.TempDir("", "copy-mirror")
c.Assert(err, check.IsNil)
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
"docker://invalid.invalid/busybox", "dir:"+dir)
}
func (s *SkopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix(c *check.C) {
dir, err := ioutil.TempDir("", "copy-mirror")
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
"docker://gcr.invalid/foo/bar/busybox", "dir:"+dir)
}
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable(c *check.C) {
dir, err := ioutil.TempDir("", "copy-mirror")
c.Assert(err, check.IsNil)
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
"docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir)
}

View File

@ -0,0 +1,28 @@
[[registry]]
location = "mirror.invalid"
mirror = [
{ location = "mirror-0.invalid" },
{ location = "mirror-1.invalid" },
{ location = "gcr.io/google-containers" },
]
# This entry is currently unused and exists only to ensure
# that the mirror.invalid/busybox is not rewritten twice.
[[registry]]
location = "gcr.io"
prefix = "gcr.io/google-containers"
[[registry]]
location = "invalid.invalid"
mirror = [
{ location = "invalid-mirror-0.invalid" },
{ location = "invalid-mirror-1.invalid" },
]
[[registry]]
location = "gcr.invalid"
prefix = "gcr.invalid/foo/bar"
mirror = [
{ location = "wrong-mirror-0.invalid" },
{ location = "gcr.io/google-containers" },
]

View File

@ -2,7 +2,7 @@
github.com/urfave/cli v1.20.0 github.com/urfave/cli v1.20.0
github.com/kr/pretty v0.1.0 github.com/kr/pretty v0.1.0
github.com/kr/text v0.1.0 github.com/kr/text v0.1.0
github.com/containers/image e9c3d17ddb8cb5d48450bc0588525b17b181c049 github.com/containers/image ff926d3c79684793a2135666a2cb738f44ba33dc
github.com/containers/buildah 810efa340ab43753034e2ed08ec290e4abab7e72 github.com/containers/buildah 810efa340ab43753034e2ed08ec290e4abab7e72
github.com/vbauerster/mpb v3.3.4 github.com/vbauerster/mpb v3.3.4
github.com/mattn/go-isatty v0.0.4 github.com/mattn/go-isatty v0.0.4
@ -59,7 +59,7 @@ github.com/pborman/uuid v1.0
github.com/opencontainers/selinux v1.1 github.com/opencontainers/selinux v1.1
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08 golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
github.com/tchap/go-patricia v2.2.6 github.com/tchap/go-patricia v2.2.6
github.com/BurntSushi/toml v0.2.0 github.com/BurntSushi/toml v0.3.1
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2 github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
github.com/klauspost/pgzip v1.2.1 github.com/klauspost/pgzip v1.2.1

View File

@ -1,14 +1,21 @@
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE The MIT License (MIT)
Version 2, December 2004
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net> Copyright (c) 2013 TOML authors
Everyone is permitted to copy and distribute verbatim or modified Permission is hereby granted, free of charge, to any person obtaining a copy
copies of this license document, and changing it is allowed as long of this software and associated documentation files (the "Software"), to deal
as the name is changed. in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE The above copyright notice and this permission notice shall be included in
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION all copies or substantial portions of the Software.
0. You just DO WHAT THE FUCK YOU WANT TO.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@ -1,17 +1,17 @@
## TOML parser and encoder for Go with reflection ## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml` reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data `encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.) representations. (There is an example of this below.)
Spec: https://github.com/mojombo/toml Spec: https://github.com/toml-lang/toml
Compatible with TOML version Compatible with TOML version
[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md) [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: http://godoc.org/github.com/BurntSushi/toml Documentation: https://godoc.org/github.com/BurntSushi/toml
Installation: Installation:
@ -26,8 +26,7 @@ go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml tomlv some-toml-file.toml
``` ```
[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) [![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing ### Testing
@ -87,7 +86,7 @@ type TOML struct {
### Using the `encoding.TextUnmarshaler` interface ### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into Here's an example that automatically parses duration strings into
`time.Duration` values: `time.Duration` values:
```toml ```toml
@ -120,7 +119,7 @@ for _, s := range favorites.Song {
} }
``` ```
And you'll also need a `duration` type that satisfies the And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface: `encoding.TextUnmarshaler` interface:
```go ```go
@ -217,4 +216,3 @@ Note that a case insensitive match will be tried if an exact match can't be
found. found.
A working example of the above can be found in `_examples/example.{go,toml}`. A working example of the above can be found in `_examples/example.{go,toml}`.

View File

@ -10,7 +10,9 @@ import (
"time" "time"
) )
var e = fmt.Errorf func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a // Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves. // TOML description of themselves.
@ -103,6 +105,13 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
// This decoder will not handle cyclic types. If a cyclic type is passed, // This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate. // `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) { func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data) p, err := parse(data)
if err != nil { if err != nil {
return MetaData{}, err return MetaData{}, err
@ -111,7 +120,7 @@ func Decode(data string, v interface{}) (MetaData, error) {
p.mapping, p.types, p.ordered, p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil, make(map[string]bool, len(p.ordered)), nil,
} }
return md, md.unify(p.mapping, rvalue(v)) return md, md.unify(p.mapping, indirect(rv))
} }
// DecodeFile is just like Decode, except it will automatically read the // DecodeFile is just like Decode, except it will automatically read the
@ -211,7 +220,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Interface: case reflect.Interface:
// we only support empty interfaces. // we only support empty interfaces.
if rv.NumMethod() > 0 { if rv.NumMethod() > 0 {
return e("Unsupported type '%s'.", rv.Kind()) return e("unsupported type %s", rv.Type())
} }
return md.unifyAnything(data, rv) return md.unifyAnything(data, rv)
case reflect.Float32: case reflect.Float32:
@ -219,7 +228,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Float64: case reflect.Float64:
return md.unifyFloat64(data, rv) return md.unifyFloat64(data, rv)
} }
return e("Unsupported type '%s'.", rv.Kind()) return e("unsupported type %s", rv.Kind())
} }
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
@ -228,7 +237,8 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
if mapping == nil { if mapping == nil {
return nil return nil
} }
return mismatch(rv, "map", mapping) return e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
} }
for key, datum := range tmap { for key, datum := range tmap {
@ -253,14 +263,13 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
md.decoded[md.context.add(key).String()] = true md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key) md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil { if err := md.unify(datum, subv); err != nil {
return e("Type mismatch for '%s.%s': %s", return err
rv.Type().String(), f.name, err)
} }
md.context = md.context[0 : len(md.context)-1] md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" { } else if f.name != "" {
// Bad user! No soup for you! // Bad user! No soup for you!
return e("Field '%s.%s' is unexported, and therefore cannot "+ return e("cannot write unexported field %s.%s",
"be loaded with reflection.", rv.Type().String(), f.name) rv.Type().String(), f.name)
} }
} }
} }
@ -378,15 +387,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
// No bounds checking necessary. // No bounds checking necessary.
case reflect.Int8: case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 { if num < math.MinInt8 || num > math.MaxInt8 {
return e("Value '%d' is out of range for int8.", num) return e("value %d is out of range for int8", num)
} }
case reflect.Int16: case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 { if num < math.MinInt16 || num > math.MaxInt16 {
return e("Value '%d' is out of range for int16.", num) return e("value %d is out of range for int16", num)
} }
case reflect.Int32: case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 { if num < math.MinInt32 || num > math.MaxInt32 {
return e("Value '%d' is out of range for int32.", num) return e("value %d is out of range for int32", num)
} }
} }
rv.SetInt(num) rv.SetInt(num)
@ -397,15 +406,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
// No bounds checking necessary. // No bounds checking necessary.
case reflect.Uint8: case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 { if num < 0 || unum > math.MaxUint8 {
return e("Value '%d' is out of range for uint8.", num) return e("value %d is out of range for uint8", num)
} }
case reflect.Uint16: case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 { if num < 0 || unum > math.MaxUint16 {
return e("Value '%d' is out of range for uint16.", num) return e("value %d is out of range for uint16", num)
} }
case reflect.Uint32: case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 { if num < 0 || unum > math.MaxUint32 {
return e("Value '%d' is out of range for uint32.", num) return e("value %d is out of range for uint32", num)
} }
} }
rv.SetUint(unum) rv.SetUint(unum)
@ -471,7 +480,7 @@ func rvalue(v interface{}) reflect.Value {
// interest to us (like encoding.TextUnmarshaler). // interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value { func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr { if v.Kind() != reflect.Ptr {
if v.CanAddr() { if v.CanSet() {
pv := v.Addr() pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok { if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv return pv
@ -496,10 +505,5 @@ func isUnifiable(rv reflect.Value) bool {
} }
func badtype(expected string, data interface{}) error { func badtype(expected string, data interface{}) error {
return e("Expected %s but found '%T'.", expected, data) return e("cannot load TOML value of type %T into a Go %s", data, expected)
}
func mismatch(user reflect.Value, expected string, data interface{}) error {
return e("Type mismatch for %s. Expected %s but found '%T'.",
user.Type().String(), expected, data)
} }

View File

@ -77,9 +77,8 @@ func (k Key) maybeQuoted(i int) string {
} }
if quote { if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
} else {
return k[i]
} }
return k[i]
} }
func (k Key) add(piece string) Key { func (k Key) add(piece string) Key {

View File

@ -4,7 +4,7 @@ files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the the Primitive type, and querying the set of keys in a TOML document with the
MetaData type. MetaData type.
The specification implemented: https://github.com/mojombo/toml The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the whether a file is a valid TOML document. It can also be used to print the

View File

@ -16,17 +16,17 @@ type tomlEncodeError struct{ error }
var ( var (
errArrayMixedElementTypes = errors.New( errArrayMixedElementTypes = errors.New(
"can't encode array with mixed element types") "toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New( errArrayNilElement = errors.New(
"can't encode array with nil element") "toml: cannot encode array with nil element")
errNonString = errors.New( errNonString = errors.New(
"can't encode a map with non-string key type") "toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New( errAnonNonStruct = errors.New(
"can't encode an anonymous field that is not a struct") "toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New( errArrayNoTable = errors.New(
"TOML array element can't contain a table") "toml: TOML array element cannot contain a table")
errNoKey = errors.New( errNoKey = errors.New(
"top-level values must be a Go map or struct") "toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing errAnything = errors.New("") // used in testing
) )
@ -148,7 +148,7 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
case reflect.Struct: case reflect.Struct:
enc.eTable(key, rv) enc.eTable(key, rv)
default: default:
panic(e("Unsupported type for key '%s': %s", key, k)) panic(e("unsupported type for key '%s': %s", key, k))
} }
} }
@ -160,7 +160,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
// Special case time.Time as a primitive. Has to come before // Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements // TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC. // encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z")) enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return return
case TextMarshaler: case TextMarshaler:
// Special case. Use text marshaler if it's available for this value. // Special case. Use text marshaler if it's available for this value.
@ -191,7 +191,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.String: case reflect.String:
enc.writeQuoted(rv.String()) enc.writeQuoted(rv.String())
default: default:
panic(e("Unexpected primitive type: %s", rv.Kind())) panic(e("unexpected primitive type: %s", rv.Kind()))
} }
} }
@ -241,7 +241,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
func (enc *Encoder) eTable(key Key, rv reflect.Value) { func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key) panicIfInvalidKey(key)
if len(key) == 1 { if len(key) == 1 {
// Output an extra new line between top-level tables. // Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.) // (The newline isn't written if nothing else has been written though.)
enc.newline() enc.newline()
} }
@ -315,10 +315,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
t := f.Type t := f.Type
switch t.Kind() { switch t.Kind() {
case reflect.Struct: case reflect.Struct:
addFields(t, frv, f.Index) // Treat anonymous struct fields with
continue // tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index)
continue
}
case reflect.Ptr: case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct { if t.Elem().Kind() == reflect.Struct &&
getOptions(f.Tag).name == "" {
if !frv.IsNil() { if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index) addFields(t.Elem(), frv.Elem(), f.Index)
} }
@ -347,17 +353,18 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
continue continue
} }
tag := sft.Tag.Get("toml") opts := getOptions(sft.Tag)
if tag == "-" { if opts.skip {
continue continue
} }
keyName, opts := getOptions(tag) keyName := sft.Name
if keyName == "" { if opts.name != "" {
keyName = sft.Name keyName = opts.name
} }
if _, ok := opts["omitempty"]; ok && isEmpty(sf) { if opts.omitempty && isEmpty(sf) {
continue continue
} else if _, ok := opts["omitzero"]; ok && isZero(sf) { }
if opts.omitzero && isZero(sf) {
continue continue
} }
@ -392,9 +399,8 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
case reflect.Array, reflect.Slice: case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) { if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash return tomlArrayHash
} else {
return tomlArray
} }
return tomlArray
case reflect.Ptr, reflect.Interface: case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem()) return tomlTypeOfGo(rv.Elem())
case reflect.String: case reflect.String:
@ -451,17 +457,30 @@ func tomlArrayType(rv reflect.Value) tomlType {
return firstType return firstType
} }
func getOptions(keyName string) (string, map[string]struct{}) { type tagOptions struct {
opts := make(map[string]struct{}) skip bool // "-"
ss := strings.Split(keyName, ",") name string
name := ss[0] omitempty bool
if len(ss) > 1 { omitzero bool
for _, opt := range ss { }
opts[opt] = struct{}{}
func getOptions(tag reflect.StructTag) tagOptions {
t := tag.Get("toml")
if t == "-" {
return tagOptions{skip: true}
}
var opts tagOptions
parts := strings.Split(t, ",")
opts.name = parts[0]
for _, s := range parts[1:] {
switch s {
case "omitempty":
opts.omitempty = true
case "omitzero":
opts.omitzero = true
} }
} }
return opts
return name, opts
} }
func isZero(rv reflect.Value) bool { func isZero(rv reflect.Value) bool {

View File

@ -3,6 +3,7 @@ package toml
import ( import (
"fmt" "fmt"
"strings" "strings"
"unicode"
"unicode/utf8" "unicode/utf8"
) )
@ -29,24 +30,28 @@ const (
itemArrayTableEnd itemArrayTableEnd
itemKeyStart itemKeyStart
itemCommentStart itemCommentStart
itemInlineTableStart
itemInlineTableEnd
) )
const ( const (
eof = 0 eof = 0
tableStart = '[' comma = ','
tableEnd = ']' tableStart = '['
arrayTableStart = '[' tableEnd = ']'
arrayTableEnd = ']' arrayTableStart = '['
tableSep = '.' arrayTableEnd = ']'
keySep = '=' tableSep = '.'
arrayStart = '[' keySep = '='
arrayEnd = ']' arrayStart = '['
arrayValTerm = ',' arrayEnd = ']'
commentStart = '#' commentStart = '#'
stringStart = '"' stringStart = '"'
stringEnd = '"' stringEnd = '"'
rawStringStart = '\'' rawStringStart = '\''
rawStringEnd = '\'' rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
) )
type stateFn func(lx *lexer) stateFn type stateFn func(lx *lexer) stateFn
@ -55,11 +60,18 @@ type lexer struct {
input string input string
start int start int
pos int pos int
width int
line int line int
state stateFn state stateFn
items chan item items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context. // A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places. // The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily // For example, values can appear at the top level or within arbitrarily
@ -87,7 +99,7 @@ func (lx *lexer) nextItem() item {
func lex(input string) *lexer { func lex(input string) *lexer {
lx := &lexer{ lx := &lexer{
input: input + "\n", input: input,
state: lexTop, state: lexTop,
line: 1, line: 1,
items: make(chan item, 10), items: make(chan item, 10),
@ -102,7 +114,7 @@ func (lx *lexer) push(state stateFn) {
func (lx *lexer) pop() stateFn { func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 { if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop.") return lx.errorf("BUG in lexer: no states to pop")
} }
last := lx.stack[len(lx.stack)-1] last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1] lx.stack = lx.stack[0 : len(lx.stack)-1]
@ -124,16 +136,25 @@ func (lx *lexer) emitTrim(typ itemType) {
} }
func (lx *lexer) next() (r rune) { func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) { if lx.pos >= len(lx.input) {
lx.width = 0 lx.atEOF = true
return eof return eof
} }
if lx.input[lx.pos] == '\n' { if lx.input[lx.pos] == '\n' {
lx.line++ lx.line++
} }
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) lx.prevWidths[2] = lx.prevWidths[1]
lx.pos += lx.width lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r return r
} }
@ -142,9 +163,20 @@ func (lx *lexer) ignore() {
lx.start = lx.pos lx.start = lx.pos
} }
// backup steps back one rune. Can be called only once per call of next. // backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() { func (lx *lexer) backup() {
lx.pos -= lx.width if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line-- lx.line--
} }
@ -166,9 +198,22 @@ func (lx *lexer) peek() rune {
return r return r
} }
// skip ignores all input that matches the given predicate.
func (lx *lexer) skip(pred func(rune) bool) {
for {
r := lx.next()
if pred(r) {
continue
}
lx.backup()
lx.ignore()
return
}
}
// errorf stops all lexing by emitting an error and returning `nil`. // errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special // Note that any value that is a character is escaped if it's a special
// character (new lines, tabs, etc.). // character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn { func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{ lx.items <- item{
itemError, itemError,
@ -184,7 +229,6 @@ func lexTop(lx *lexer) stateFn {
if isWhitespace(r) || isNL(r) { if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop) return lexSkip(lx, lexTop)
} }
switch r { switch r {
case commentStart: case commentStart:
lx.push(lexTop) lx.push(lexTop)
@ -193,7 +237,7 @@ func lexTop(lx *lexer) stateFn {
return lexTableStart return lexTableStart
case eof: case eof:
if lx.pos > lx.start { if lx.pos > lx.start {
return lx.errorf("Unexpected EOF.") return lx.errorf("unexpected EOF")
} }
lx.emit(itemEOF) lx.emit(itemEOF)
return nil return nil
@ -208,12 +252,12 @@ func lexTop(lx *lexer) stateFn {
// lexTopEnd is entered whenever a top-level item has been consumed. (A value // lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop // or a table.) It must see only whitespace, and will turn back to lexTop
// upon a new line. If it sees EOF, it will quit the lexer successfully. // upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn { func lexTopEnd(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { switch {
case r == commentStart: case r == commentStart:
// a comment will read to a new line for us. // a comment will read to a newline for us.
lx.push(lexTop) lx.push(lexTop)
return lexCommentStart return lexCommentStart
case isWhitespace(r): case isWhitespace(r):
@ -222,11 +266,11 @@ func lexTopEnd(lx *lexer) stateFn {
lx.ignore() lx.ignore()
return lexTop return lexTop
case r == eof: case r == eof:
lx.ignore() lx.emit(itemEOF)
return lexTop return nil
} }
return lx.errorf("Expected a top-level item to end with a new line, "+ return lx.errorf("expected a top-level item to end with a newline, "+
"comment or EOF, but got %q instead.", r) "comment, or EOF, but got %q instead", r)
} }
// lexTable lexes the beginning of a table. Namely, it makes sure that // lexTable lexes the beginning of a table. Namely, it makes sure that
@ -253,21 +297,22 @@ func lexTableEnd(lx *lexer) stateFn {
func lexArrayTableEnd(lx *lexer) stateFn { func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd { if r := lx.next(); r != arrayTableEnd {
return lx.errorf("Expected end of table array name delimiter %q, "+ return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead.", arrayTableEnd, r) "but got %q instead", arrayTableEnd, r)
} }
lx.emit(itemArrayTableEnd) lx.emit(itemArrayTableEnd)
return lexTopEnd return lexTopEnd
} }
func lexTableNameStart(lx *lexer) stateFn { func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); { switch r := lx.peek(); {
case r == tableEnd || r == eof: case r == tableEnd || r == eof:
return lx.errorf("Unexpected end of table name. (Table names cannot " + return lx.errorf("unexpected end of table name " +
"be empty.)") "(table names cannot be empty)")
case r == tableSep: case r == tableSep:
return lx.errorf("Unexpected table separator. (Table names cannot " + return lx.errorf("unexpected table separator " +
"be empty.)") "(table names cannot be empty)")
case r == stringStart || r == rawStringStart: case r == stringStart || r == rawStringStart:
lx.ignore() lx.ignore()
lx.push(lexTableNameEnd) lx.push(lexTableNameEnd)
@ -277,24 +322,22 @@ func lexTableNameStart(lx *lexer) stateFn {
} }
} }
// lexTableName lexes the name of a table. It assumes that at least one // lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read. // valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn { func lexBareTableName(lx *lexer) stateFn {
switch r := lx.next(); { r := lx.next()
case isBareKeyChar(r): if isBareKeyChar(r) {
return lexBareTableName return lexBareTableName
case r == tableSep || r == tableEnd:
lx.backup()
lx.emitTrim(itemText)
return lexTableNameEnd
default:
return lx.errorf("Bare keys cannot contain %q.", r)
} }
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
} }
// lexTableNameEnd reads the end of a piece of a table name, optionally // lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace. // consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn { func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); { switch r := lx.next(); {
case isWhitespace(r): case isWhitespace(r):
return lexTableNameEnd return lexTableNameEnd
@ -304,8 +347,8 @@ func lexTableNameEnd(lx *lexer) stateFn {
case r == tableEnd: case r == tableEnd:
return lx.pop() return lx.pop()
default: default:
return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ return lx.errorf("expected '.' or ']' to end table name, "+
"instead.", r) "but got %q instead", r)
} }
} }
@ -315,7 +358,7 @@ func lexKeyStart(lx *lexer) stateFn {
r := lx.peek() r := lx.peek()
switch { switch {
case r == keySep: case r == keySep:
return lx.errorf("Unexpected key separator %q.", keySep) return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r): case isWhitespace(r) || isNL(r):
lx.next() lx.next()
return lexSkip(lx, lexKeyStart) return lexSkip(lx, lexKeyStart)
@ -338,14 +381,15 @@ func lexBareKey(lx *lexer) stateFn {
case isBareKeyChar(r): case isBareKeyChar(r):
return lexBareKey return lexBareKey
case isWhitespace(r): case isWhitespace(r):
lx.emitTrim(itemText) lx.backup()
lx.emit(itemText)
return lexKeyEnd return lexKeyEnd
case r == keySep: case r == keySep:
lx.backup() lx.backup()
lx.emitTrim(itemText) lx.emit(itemText)
return lexKeyEnd return lexKeyEnd
default: default:
return lx.errorf("Bare keys cannot contain %q.", r) return lx.errorf("bare keys cannot contain %q", r)
} }
} }
@ -358,7 +402,7 @@ func lexKeyEnd(lx *lexer) stateFn {
case isWhitespace(r): case isWhitespace(r):
return lexSkip(lx, lexKeyEnd) return lexSkip(lx, lexKeyEnd)
default: default:
return lx.errorf("Expected key separator %q, but got %q instead.", return lx.errorf("expected key separator %q, but got %q instead",
keySep, r) keySep, r)
} }
} }
@ -367,20 +411,26 @@ func lexKeyEnd(lx *lexer) stateFn {
// lexValue will ignore whitespace. // lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned. // After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn { func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT new lines. // We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring new // In array syntax, the array states are responsible for ignoring newlines.
// lines.
r := lx.next() r := lx.next()
if isWhitespace(r) {
return lexSkip(lx, lexValue)
}
switch { switch {
case r == arrayStart: case isWhitespace(r):
return lexSkip(lx, lexValue)
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
}
switch r {
case arrayStart:
lx.ignore() lx.ignore()
lx.emit(itemArray) lx.emit(itemArray)
return lexArrayValue return lexArrayValue
case r == stringStart: case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) { if lx.accept(stringStart) {
if lx.accept(stringStart) { if lx.accept(stringStart) {
lx.ignore() // Ignore """ lx.ignore() // Ignore """
@ -390,7 +440,7 @@ func lexValue(lx *lexer) stateFn {
} }
lx.ignore() // ignore the '"' lx.ignore() // ignore the '"'
return lexString return lexString
case r == rawStringStart: case rawStringStart:
if lx.accept(rawStringStart) { if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) { if lx.accept(rawStringStart) {
lx.ignore() // Ignore """ lx.ignore() // Ignore """
@ -400,23 +450,24 @@ func lexValue(lx *lexer) stateFn {
} }
lx.ignore() // ignore the "'" lx.ignore() // ignore the "'"
return lexRawString return lexRawString
case r == 't': case '+', '-':
return lexTrue
case r == 'f':
return lexFalse
case r == '-':
return lexNumberStart return lexNumberStart
case isDigit(r): case '.': // special error case, be kind to users
lx.backup() // avoid an extra state and use the same as above return lx.errorf("floats must start with a digit, not '.'")
return lexNumberOrDateStart
case r == '.': // special error case, be kind to users
return lx.errorf("Floats must start with a digit, not '.'.")
} }
return lx.errorf("Expected value but found %q instead.", r) if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
// user wrote something like
// x = foo
// (i.e. not 'true' or 'false' but is something else word-like.)
lx.backup()
return lexBool
}
return lx.errorf("expected value but found %q instead", r)
} }
// lexArrayValue consumes one value in an array. It assumes that '[' or ',' // lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and new lines are ignored. // have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn { func lexArrayValue(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { switch {
@ -425,10 +476,11 @@ func lexArrayValue(lx *lexer) stateFn {
case r == commentStart: case r == commentStart:
lx.push(lexArrayValue) lx.push(lexArrayValue)
return lexCommentStart return lexCommentStart
case r == arrayValTerm: case r == comma:
return lx.errorf("Unexpected array value terminator %q.", return lx.errorf("unexpected comma")
arrayValTerm)
case r == arrayEnd: case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd return lexArrayEnd
} }
@ -437,8 +489,9 @@ func lexArrayValue(lx *lexer) stateFn {
return lexValue return lexValue
} }
// lexArrayValueEnd consumes the cruft between values of an array. Namely, // lexArrayValueEnd consumes everything between the end of an array value and
// it ignores whitespace and expects either a ',' or a ']'. // the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn { func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { switch {
@ -447,31 +500,88 @@ func lexArrayValueEnd(lx *lexer) stateFn {
case r == commentStart: case r == commentStart:
lx.push(lexArrayValueEnd) lx.push(lexArrayValueEnd)
return lexCommentStart return lexCommentStart
case r == arrayValTerm: case r == comma:
lx.ignore() lx.ignore()
return lexArrayValue // move on to the next value return lexArrayValue // move on to the next value
case r == arrayEnd: case r == arrayEnd:
return lexArrayEnd return lexArrayEnd
} }
return lx.errorf("Expected an array value terminator %q or an array "+ return lx.errorf(
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) "expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
} }
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has // lexArrayEnd finishes the lexing of an array.
// just been consumed. // It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn { func lexArrayEnd(lx *lexer) stateFn {
lx.ignore() lx.ignore()
lx.emit(itemArrayEnd) lx.emit(itemArrayEnd)
return lx.pop() return lx.pop()
} }
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the // lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored. // beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn { func lexString(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r): case isNL(r):
return lx.errorf("Strings cannot contain new lines.") return lx.errorf("strings cannot contain newlines")
case r == '\\': case r == '\\':
lx.push(lexString) lx.push(lexString)
return lexStringEscape return lexStringEscape
@ -488,11 +598,12 @@ func lexString(lx *lexer) stateFn {
// lexMultilineString consumes the inner contents of a string. It assumes that // lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored. // the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn { func lexMultilineString(lx *lexer) stateFn {
r := lx.next() switch lx.next() {
switch { case eof:
case r == '\\': return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape return lexMultilineStringEscape
case r == stringEnd: case stringEnd:
if lx.accept(stringEnd) { if lx.accept(stringEnd) {
if lx.accept(stringEnd) { if lx.accept(stringEnd) {
lx.backup() lx.backup()
@ -516,8 +627,10 @@ func lexMultilineString(lx *lexer) stateFn {
func lexRawString(lx *lexer) stateFn { func lexRawString(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r): case isNL(r):
return lx.errorf("Strings cannot contain new lines.") return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd: case r == rawStringEnd:
lx.backup() lx.backup()
lx.emit(itemRawString) lx.emit(itemRawString)
@ -529,12 +642,13 @@ func lexRawString(lx *lexer) stateFn {
} }
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such // lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'" has already been consumed and // a string. It assumes that the beginning "'''" has already been consumed and
// ignored. // ignored.
func lexMultilineRawString(lx *lexer) stateFn { func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next() switch lx.next() {
switch { case eof:
case r == rawStringEnd: return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) { if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) { if lx.accept(rawStringEnd) {
lx.backup() lx.backup()
@ -559,11 +673,10 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first: // Handle the special case first:
if isNL(lx.next()) { if isNL(lx.next()) {
return lexMultilineString return lexMultilineString
} else {
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
} }
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
} }
func lexStringEscape(lx *lexer) stateFn { func lexStringEscape(lx *lexer) stateFn {
@ -588,10 +701,9 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U': case 'U':
return lexLongUnicodeEscape return lexLongUnicodeEscape
} }
return lx.errorf("Invalid escape character %q. Only the following "+ return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+ "escape characters are allowed: "+
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
"\\uXXXX and \\UXXXXXXXX.", r)
} }
func lexShortUnicodeEscape(lx *lexer) stateFn { func lexShortUnicodeEscape(lx *lexer) stateFn {
@ -599,8 +711,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
r = lx.next() r = lx.next()
if !isHexadecimal(r) { if !isHexadecimal(r) {
return lx.errorf("Expected four hexadecimal digits after '\\u', "+ return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got '%s' instead.", lx.current()) "but got %q instead", lx.current())
} }
} }
return lx.pop() return lx.pop()
@ -611,40 +723,43 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
r = lx.next() r = lx.next()
if !isHexadecimal(r) { if !isHexadecimal(r) {
return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got '%s' instead.", lx.current()) "but got %q instead", lx.current())
} }
} }
return lx.pop() return lx.pop()
} }
// lexNumberOrDateStart consumes either a (positive) integer, float or // lexNumberOrDateStart consumes either an integer, a float, or datetime.
// datetime. It assumes that NO negative sign has been consumed.
func lexNumberOrDateStart(lx *lexer) stateFn { func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next() r := lx.next()
if !isDigit(r) { if isDigit(r) {
if r == '.' { return lexNumberOrDate
return lx.errorf("Floats must start with a digit, not '.'.")
} else {
return lx.errorf("Expected a digit but got %q.", r)
}
} }
return lexNumberOrDate switch r {
case '_':
return lexNumber
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
} }
// lexNumberOrDate consumes either a (positive) integer, float or datetime. // lexNumberOrDate consumes either an integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn { func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { if isDigit(r) {
case r == '-':
if lx.pos-lx.start != 5 {
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
}
return lexDateAfterYear
case isDigit(r):
return lexNumberOrDate return lexNumberOrDate
case r == '.': }
return lexFloatStart switch r {
case '-':
return lexDatetime
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
} }
lx.backup() lx.backup()
@ -652,46 +767,34 @@ func lexNumberOrDate(lx *lexer) stateFn {
return lx.pop() return lx.pop()
} }
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format. // lexDatetime consumes a Datetime, to a first approximation.
// It assumes that "YYYY-" has already been consumed. // The parser validates that it matches one of the accepted formats.
func lexDateAfterYear(lx *lexer) stateFn { func lexDatetime(lx *lexer) stateFn {
formats := []rune{ r := lx.next()
// digits are '0'. if isDigit(r) {
// everything else is direct equality. return lexDatetime
'0', '0', '-', '0', '0',
'T',
'0', '0', ':', '0', '0', ':', '0', '0',
'Z',
} }
for _, f := range formats { switch r {
r := lx.next() case '-', 'T', ':', '.', 'Z', '+':
if f == '0' { return lexDatetime
if !isDigit(r) {
return lx.errorf("Expected digit in ISO8601 datetime, "+
"but found %q instead.", r)
}
} else if f != r {
return lx.errorf("Expected %q in ISO8601 datetime, "+
"but found %q instead.", f, r)
}
} }
lx.backup()
lx.emit(itemDatetime) lx.emit(itemDatetime)
return lx.pop() return lx.pop()
} }
// lexNumberStart consumes either an integer or a float. It assumes that // lexNumberStart consumes either an integer or a float. It assumes that a sign
// a negative sign has already been read, but that *no* digits have been // has already been read, but that *no* digits have been consumed.
// consumed. lexNumberStart will move to the appropriate integer or float // lexNumberStart will move to the appropriate integer or float states.
// states.
func lexNumberStart(lx *lexer) stateFn { func lexNumberStart(lx *lexer) stateFn {
// we MUST see a digit. Even floats have to start with a digit. // We MUST see a digit. Even floats have to start with a digit.
r := lx.next() r := lx.next()
if !isDigit(r) { if !isDigit(r) {
if r == '.' { if r == '.' {
return lx.errorf("Floats must start with a digit, not '.'.") return lx.errorf("floats must start with a digit, not '.'")
} else {
return lx.errorf("Expected a digit but got %q.", r)
} }
return lx.errorf("expected a digit but got %q", r)
} }
return lexNumber return lexNumber
} }
@ -699,11 +802,14 @@ func lexNumberStart(lx *lexer) stateFn {
// lexNumber consumes an integer or a float after seeing the first digit. // lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn { func lexNumber(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { if isDigit(r) {
case isDigit(r):
return lexNumber return lexNumber
case r == '.': }
return lexFloatStart switch r {
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
} }
lx.backup() lx.backup()
@ -711,60 +817,42 @@ func lexNumber(lx *lexer) stateFn {
return lx.pop() return lx.pop()
} }
// lexFloatStart starts the consumption of digits of a float after a '.'. // lexFloat consumes the elements of a float. It allows any sequence of
// Namely, at least one digit is required. // float-like characters, so floats emitted by the lexer are only a first
func lexFloatStart(lx *lexer) stateFn { // approximation and must be validated by the parser.
r := lx.next()
if !isDigit(r) {
return lx.errorf("Floats must have a digit after the '.', but got "+
"%q instead.", r)
}
return lexFloat
}
// lexFloat consumes the digits of a float after a '.'.
// Assumes that one digit has been consumed after a '.' already.
func lexFloat(lx *lexer) stateFn { func lexFloat(lx *lexer) stateFn {
r := lx.next() r := lx.next()
if isDigit(r) { if isDigit(r) {
return lexFloat return lexFloat
} }
switch r {
case '_', '.', '-', '+', 'e', 'E':
return lexFloat
}
lx.backup() lx.backup()
lx.emit(itemFloat) lx.emit(itemFloat)
return lx.pop() return lx.pop()
} }
// lexConst consumes the s[1:] in s. It assumes that s[0] has already been // lexBool consumes a bool string: 'true' or 'false.
// consumed. func lexBool(lx *lexer) stateFn {
func lexConst(lx *lexer, s string) stateFn { var rs []rune
for i := range s[1:] { for {
if r := lx.next(); r != rune(s[i+1]) { r := lx.next()
return lx.errorf("Expected %q, but found %q instead.", s[:i+1], if !unicode.IsLetter(r) {
s[:i]+string(r)) lx.backup()
break
} }
rs = append(rs, r)
} }
return nil s := string(rs)
} switch s {
case "true", "false":
// lexTrue consumes the "rue" in "true". It assumes that 't' has already lx.emit(itemBool)
// been consumed. return lx.pop()
func lexTrue(lx *lexer) stateFn {
if fn := lexConst(lx, "true"); fn != nil {
return fn
} }
lx.emit(itemBool) return lx.errorf("expected value but found %q instead", s)
return lx.pop()
}
// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
// been consumed.
func lexFalse(lx *lexer) stateFn {
if fn := lexConst(lx, "false"); fn != nil {
return fn
}
lx.emit(itemBool)
return lx.pop()
} }
// lexCommentStart begins the lexing of a comment. It will emit // lexCommentStart begins the lexing of a comment. It will emit
@ -776,7 +864,7 @@ func lexCommentStart(lx *lexer) stateFn {
} }
// lexComment lexes an entire comment. It assumes that '#' has been consumed. // lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first new line character, and pass control // It will consume *up to* the first newline character, and pass control
// back to the last state on the stack. // back to the last state on the stack.
func lexComment(lx *lexer) stateFn { func lexComment(lx *lexer) stateFn {
r := lx.peek() r := lx.peek()
@ -834,13 +922,7 @@ func (itype itemType) String() string {
return "EOF" return "EOF"
case itemText: case itemText:
return "Text" return "Text"
case itemString: case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemRawString:
return "String"
case itemMultilineString:
return "String"
case itemRawMultilineString:
return "String" return "String"
case itemBool: case itemBool:
return "Bool" return "Bool"

View File

@ -2,7 +2,6 @@ package toml
import ( import (
"fmt" "fmt"
"log"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -81,7 +80,7 @@ func (p *parser) next() item {
} }
func (p *parser) bug(format string, v ...interface{}) { func (p *parser) bug(format string, v ...interface{}) {
log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...)) panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
} }
func (p *parser) expect(typ itemType) item { func (p *parser) expect(typ itemType) item {
@ -179,10 +178,18 @@ func (p *parser) value(it item) (interface{}, tomlType) {
} }
p.bug("Expected boolean value, but got '%s'.", it.val) p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger: case itemInteger:
num, err := strconv.ParseInt(it.val, 10, 64) if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil { if err != nil {
// See comment below for floats describing why we make a // Distinguish integer values. Normally, it'd be a bug if the lexer
// distinction between a bug and a user error. // provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok && if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange { e.Err == strconv.ErrRange {
@ -194,29 +201,57 @@ func (p *parser) value(it item) (interface{}, tomlType) {
} }
return num, p.typeOfPrimitive(it) return num, p.typeOfPrimitive(it)
case itemFloat: case itemFloat:
num, err := strconv.ParseFloat(it.val, 64) parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil { if err != nil {
// Distinguish float values. Normally, it'd be a bug if the lexer
// provides an invalid float, but it's possible that the float is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
//
// This is also true for integers.
if e, ok := err.(*strconv.NumError); ok && if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange { e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+ p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val) "IEEE-754 floating-point numbers.", it.val)
} else { } else {
p.bug("Expected float value, but got '%s'.", it.val) p.panicf("Invalid float value: %q", it.val)
} }
} }
return num, p.typeOfPrimitive(it) return num, p.typeOfPrimitive(it)
case itemDatetime: case itemDatetime:
t, err := time.Parse("2006-01-02T15:04:05Z", it.val) var t time.Time
if err != nil { var ok bool
p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val) var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
} }
return t, p.typeOfPrimitive(it) return t, p.typeOfPrimitive(it)
case itemArray: case itemArray:
@ -234,11 +269,75 @@ func (p *parser) value(it item) (interface{}, tomlType) {
types = append(types, typ) types = append(types, typ)
} }
return array, p.typeOfArray(types) return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
} }
p.bug("Unexpected value type: %s", it.typ) p.bug("Unexpected value type: %s", it.typ)
panic("unreachable") panic("unreachable")
} }
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
accept = false
continue
}
accept = true
}
return accept
}
// numPeriodsOK checks whether every period in s is followed by a digit.
func numPeriodsOK(s string) bool {
period := false
for _, r := range s {
if period && !isDigit(r) {
return false
}
period = r == '.'
}
return !period
}
// establishContext sets the current context of the parser, // establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is // where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter. // set depends on the value of the `array` parameter.

View File

@ -95,8 +95,8 @@ func typeFields(t reflect.Type) []field {
if sf.PkgPath != "" && !sf.Anonymous { // unexported if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue continue
} }
name, _ := getOptions(sf.Tag.Get("toml")) opts := getOptions(sf.Tag)
if name == "-" { if opts.skip {
continue continue
} }
index := make([]int, len(f.index)+1) index := make([]int, len(f.index)+1)
@ -110,8 +110,9 @@ func typeFields(t reflect.Type) []field {
} }
// Record found field and index sequence. // Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != "" tagged := opts.name != ""
name := opts.name
if name == "" { if name == "" {
name = sf.Name name = sf.Name
} }

View File

@ -81,10 +81,8 @@ type bearerToken struct {
// dockerClient is configuration for dealing with a single Docker registry. // dockerClient is configuration for dealing with a single Docker registry.
type dockerClient struct { type dockerClient struct {
// The following members are set by newDockerClient and do not change afterwards. // The following members are set by newDockerClient and do not change afterwards.
sys *types.SystemContext sys *types.SystemContext
registry string registry string
client *http.Client
insecureSkipTLSVerify bool
// The following members are not set by newDockerClient and must be set by callers if needed. // The following members are not set by newDockerClient and must be set by callers if needed.
username string username string
@ -96,6 +94,10 @@ type dockerClient struct {
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
challenges []challenge challenges []challenge
supportsSignatures bool supportsSignatures bool
// The tlsClientConfig is setup during the creation of the dockerClient and
// will be updated by detectPropertiesHelper(). Any HTTP request the
// dockerClient does will be done by this TLS client configuration.
tlsClientConfig *tls.Config
// Private state for setupRequestAuth (key: string, value: bearerToken) // Private state for setupRequestAuth (key: string, value: bearerToken)
tokenCache sync.Map tokenCache sync.Map
@ -229,8 +231,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
if registry == dockerHostname { if registry == dockerHostname {
registry = dockerRegistry registry = dockerRegistry
} }
tr := tlsclientconfig.NewTransport() tlsClientConfig := serverDefault()
tr.TLSClientConfig = serverDefault()
// It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
// because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
@ -241,38 +242,31 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
if err != nil { if err != nil {
return nil, err return nil, err
} }
if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil { if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil {
return nil, err return nil, err
} }
// Check if TLS verification shall be skipped (default=false) which can // Check if TLS verification shall be skipped (default=false) which can
// either be specified in the sysregistriesv2 configuration or via the // be specified in the sysregistriesv2 configuration.
// SystemContext, whereas the SystemContext is prioritized.
skipVerify := false skipVerify := false
if sys != nil && sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { reg, err := sysregistriesv2.FindRegistry(sys, reference)
// Only use the SystemContext if the actual value is defined. if err != nil {
skipVerify = sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue return nil, errors.Wrapf(err, "error loading registries")
} else {
reg, err := sysregistriesv2.FindRegistry(sys, reference)
if err != nil {
return nil, errors.Wrapf(err, "error loading registries")
}
if reg != nil {
skipVerify = reg.Insecure
}
} }
tr.TLSClientConfig.InsecureSkipVerify = skipVerify if reg != nil {
skipVerify = reg.Insecure
}
tlsClientConfig.InsecureSkipVerify = skipVerify
return &dockerClient{ return &dockerClient{
sys: sys, sys: sys,
registry: registry, registry: registry,
client: &http.Client{Transport: tr}, tlsClientConfig: tlsClientConfig,
insecureSkipTLSVerify: skipVerify,
}, nil }, nil
} }
// CheckAuth validates the credentials by attempting to log into the registry // CheckAuth validates the credentials by attempting to log into the registry
// returns an error if an error occcured while making the http request or the status code received was 401 // returns an error if an error occurred while making the http request or the status code received was 401
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
client, err := newDockerClient(sys, registry, registry) client, err := newDockerClient(sys, registry, registry)
if err != nil { if err != nil {
@ -445,11 +439,18 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
} }
} }
logrus.Debugf("%s %s", method, url) logrus.Debugf("%s %s", method, url)
res, err := c.client.Do(req)
if err != nil { // Build the transport and do the request by using the clients tlsclientconfig
return nil, err return c.doHTTP(req)
} }
return res, nil
// doHttp uses the clients internal TLS configuration for doing the
// provided HTTP request. It returns the response and an error on failure.
func (c *dockerClient) doHTTP(req *http.Request) (*http.Response, error) {
tr := tlsclientconfig.NewTransport()
tr.TLSClientConfig = c.tlsClientConfig
httpClient := &http.Client{Transport: tr}
return httpClient.Do(req)
} }
// we're using the challenges from the /v2/ ping response and not the one from the destination // we're using the challenges from the /v2/ ping response and not the one from the destination
@ -561,6 +562,12 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
return nil return nil
} }
// We overwrite the TLS clients `InsecureSkipVerify` only if explicitly
// specified by the system context
if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
}
ping := func(scheme string) error { ping := func(scheme string) error {
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil) resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
@ -579,7 +586,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
return nil return nil
} }
err := ping("https") err := ping("https")
if err != nil && c.insecureSkipTLSVerify { if err != nil && c.tlsClientConfig.InsecureSkipVerify {
err = ping("http") err = ping("http")
} }
if err != nil { if err != nil {
@ -603,7 +610,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
return true return true
} }
isV1 := pingV1("https") isV1 := pingV1("https")
if !isV1 && c.insecureSkipTLSVerify { if !isV1 && c.tlsClientConfig.InsecureSkipVerify {
isV1 = pingV1("http") isV1 = pingV1("http")
} }
if isV1 { if isV1 {

View File

@ -25,7 +25,7 @@ type Image struct {
// a client to the registry hosting the given image. // a client to the registry hosting the given image.
// The caller must call .Close() on the returned Image. // The caller must call .Close() on the returned Image.
func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) {
s, err := newImageSource(sys, ref) s, err := newImageSource(ctx, sys, ref)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -13,6 +13,7 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/pkg/sysregistriesv2"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
@ -28,17 +29,89 @@ type dockerImageSource struct {
cachedManifestMIMEType string // Only valid if cachedManifest != nil cachedManifestMIMEType string // Only valid if cachedManifest != nil
} }
// newImageSource creates a new ImageSource for the specified image reference. // newImageSource creates a new `ImageSource` for the specified image reference
// The caller must call .Close() on the returned ImageSource. // `ref`.
func newImageSource(sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { //
c, err := newDockerClientFromRef(sys, ref, false, "pull") // The following steps will be done during the instance creation:
//
// - Lookup the registry within the configured location in
// `sys.SystemRegistriesConfPath`. If there is no configured registry available,
// we fallback to the provided docker reference `ref`.
//
// - References which contain a configured prefix will be automatically rewritten
// to the correct target reference. For example, if the configured
// `prefix = "example.com/foo"`, `location = "example.com"` and the image will be
// pulled from the ref `example.com/foo/image`, then the resulting pull will
// effectively point to `example.com/image`.
//
// - If the rewritten reference succeeds, it will be used as the `dockerRef`
// in the client. If the rewrite fails, the function immediately returns an error.
//
// - Each mirror will be used (in the configured order) to test the
// availability of the image manifest on the remote location. For example,
// if the manifest is not reachable due to connectivity issues, then the next
// mirror will be tested instead. If no mirror is configured or contains the
// target manifest, then the initial `ref` will be tested as fallback. The
// creation of the new `dockerImageSource` only succeeds if a remote
// location with the available manifest was found.
//
// A cleanup call to `.Close()` is needed if the caller is done using the returned
// `ImageSource`.
func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) {
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
if err != nil { if err != nil {
return nil, err return nil, errors.Wrapf(err, "error loading registries configuration")
} }
return &dockerImageSource{
ref: ref, if registry == nil {
c: c, // No configuration was found for the provided reference, so we create
}, nil // a fallback registry by hand to make the client creation below work
// as intended.
registry = &sysregistriesv2.Registry{
Endpoint: sysregistriesv2.Endpoint{
Location: ref.ref.String(),
},
}
}
// Found the registry within the sysregistriesv2 configuration. Now we test
// all endpoints for the manifest availability. If a working image source
// was found, it will be used for all future pull actions.
var (
imageSource *dockerImageSource
manifestLoadErr error
)
for _, endpoint := range append(registry.Mirrors, registry.Endpoint) {
logrus.Debugf("Trying to pull %q from endpoint %q", ref.ref, endpoint.Location)
newRef, err := endpoint.RewriteReference(ref.ref, registry.Prefix)
if err != nil {
return nil, err
}
dockerRef, err := newReference(newRef)
if err != nil {
return nil, err
}
client, err := newDockerClientFromRef(sys, dockerRef, false, "pull")
if err != nil {
return nil, err
}
client.tlsClientConfig.InsecureSkipVerify = endpoint.Insecure
testImageSource := &dockerImageSource{
ref: dockerRef,
c: client,
}
manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx)
if manifestLoadErr == nil {
imageSource = testImageSource
break
}
}
return imageSource, manifestLoadErr
} }
// Reference returns the reference used to set up this source, _as specified by the user_ // Reference returns the reference used to set up this source, _as specified by the user_
@ -274,7 +347,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
return nil, false, err return nil, false, err
} }
req = req.WithContext(ctx) req = req.WithContext(ctx)
res, err := s.c.client.Do(req) res, err := s.c.doHTTP(req)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
} }

View File

@ -61,8 +61,13 @@ func ParseReference(refString string) (types.ImageReference, error) {
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). // NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
func NewReference(ref reference.Named) (types.ImageReference, error) { func NewReference(ref reference.Named) (types.ImageReference, error) {
return newReference(ref)
}
// newReference returns a dockerReference for a named reference.
func newReference(ref reference.Named) (dockerReference, error) {
if reference.IsNameOnly(ref) { if reference.IsNameOnly(ref) {
return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) return dockerReference{}, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
} }
// A github.com/distribution/reference value can have a tag and a digest at the same time! // A github.com/distribution/reference value can have a tag and a digest at the same time!
// The docker/distribution API does not really support that (we cant ask for an image with a specific // The docker/distribution API does not really support that (we cant ask for an image with a specific
@ -72,8 +77,9 @@ func NewReference(ref reference.Named) (types.ImageReference, error) {
_, isTagged := ref.(reference.NamedTagged) _, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical) _, isDigested := ref.(reference.Canonical)
if isTagged && isDigested { if isTagged && isDigested {
return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported") return dockerReference{}, errors.Errorf("Docker references with both a tag and digest are currently not supported")
} }
return dockerReference{ return dockerReference{
ref: ref, ref: ref,
}, nil }, nil
@ -135,7 +141,7 @@ func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContex
// NewImageSource returns a types.ImageSource for this reference. // NewImageSource returns a types.ImageSource for this reference.
// The caller must call .Close() on the returned ImageSource. // The caller must call .Close() on the returned ImageSource.
func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
return newImageSource(sys, ref) return newImageSource(ctx, sys, ref)
} }
// NewImageDestination returns a types.ImageDestination for this reference. // NewImageDestination returns a types.ImageDestination for this reference.

View File

@ -7,14 +7,68 @@ containers-registries.conf - Syntax of System Registry Configuration File
# DESCRIPTION # DESCRIPTION
The CONTAINERS-REGISTRIES configuration file is a system-wide configuration The CONTAINERS-REGISTRIES configuration file is a system-wide configuration
file for container image registries. The file format is TOML. The valid file for container image registries. The file format is TOML.
categories are: 'registries.search', 'registries.insecure', and
'registries.block'.
By default, the configuration file is located at `/etc/containers/registries.conf`. By default, the configuration file is located at `/etc/containers/registries.conf`.
# FORMAT # FORMATS
The TOML_format is used to build a simple list format for registries under three
## VERSION 2
VERSION 2 is the latest format of the `registries.conf` and is currently in
beta. This means in general VERSION 1 should be used in production environments
for now.
Every registry can have its own mirrors configured. The mirrors will be tested
in order for the availability of the remote manifest. This happens currently
only during an image pull. If the manifest is not reachable due to connectivity
issues or the unavailability of the remote manifest, then the next mirror will
be tested instead. If no mirror is configured or contains the manifest to be
pulled, then the initially provided reference will be used as fallback. It is
possible to set the `insecure` option per mirror, too.
Furthermore it is possible to specify a `prefix` for a registry. The `prefix`
is used to find the relevant target registry from where the image has to be
pulled. During the test for the availability of the image, the prefixed
location will be rewritten to the correct remote location. This applies to
mirrors as well as the fallback `location`. If no prefix is specified, it
defaults to the specified `location`. For example, if
`prefix = "example.com/foo"`, `location = "example.com"` and the image will be
pulled from `example.com/foo/image`, then the resulting pull will be effectively
point to `example.com/image`.
By default container runtimes use TLS when retrieving images from a registry.
If the registry is not setup with TLS, then the container runtime will fail to
pull images from the registry. If you set `insecure = true` for a registry or a
mirror you overwrite the `insecure` flag for that specific entry. This means
that the container runtime will attempt use unencrypted HTTP to pull the image.
It also allows you to pull from a registry with self-signed certificates.
If you set the `unqualified-search = true` for the registry, then it is possible
to omit the registry hostname when pulling images. This feature does not work
together with a specified `prefix`.
If `blocked = true` then it is not allowed to pull images from that registry.
### EXAMPLE
```
[[registry]]
location = "example.com"
insecure = false
prefix = "example.com/foo"
unqualified-search = false
blocked = false
mirror = [
{ location = "example-mirror-0.local", insecure = false },
{ location = "example-mirror-1.local", insecure = true }
]
```
## VERSION 1
VERSION 1 can be used as alternative to the VERSION 2, but it is not capable in
using registry mirrors or a prefix.
The TOML_format is used to build a simple list for registries under three
categories: `registries.search`, `registries.insecure`, and `registries.block`. categories: `registries.search`, `registries.insecure`, and `registries.block`.
You can list multiple registries using a comma separated list. You can list multiple registries using a comma separated list.
@ -22,18 +76,13 @@ Search registries are used when the caller of a container runtime does not fully
container image that they want to execute. These registries are prepended onto the front container image that they want to execute. These registries are prepended onto the front
of the specified container image until the named image is found at a registry. of the specified container image until the named image is found at a registry.
Insecure Registries. By default container runtimes use TLS when retrieving images
from a registry. If the registry is not setup with TLS, then the container runtime
will fail to pull images from the registry. If you add the registry to the list of
insecure registries then the container runtime will attempt use standard web protocols to
pull the image. It also allows you to pull from a registry with self-signed certificates.
Note insecure registries can be used for any registry, not just the registries listed Note insecure registries can be used for any registry, not just the registries listed
under search. under search.
Block Registries. The registries in this category are are not pulled from when The fields `registries.insecure` and `registries.block` work as like as the
retrieving images. `insecure` and `blocked` from VERSION 2.
# EXAMPLE ### EXAMPLE
The following example configuration defines two searchable registries, one The following example configuration defines two searchable registries, one
insecure registry, and two blocked registries. insecure registry, and two blocked registries.
@ -49,6 +98,8 @@ registries = ['registry.untrusted.com', 'registry.unsafe.com']
``` ```
# HISTORY # HISTORY
Mar 2019, Added additional configuration format by Sascha Grunert <sgrunert@suse.com>
Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg <vrothberg@suse.com> Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg <vrothberg@suse.com>
Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com> Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>

View File

@ -0,0 +1,109 @@
% CONTAINERS-TRANSPORTS(5) Containers Transports Man Page
% Valentin Rothberg
% April 2019
## NAME
containers-transports - description of supported transports for copying and storing container images
## DESCRIPTION
Tools which use the containers/image library, including skopeo(1), buildah(1), podman(1), all share a common syntax for referring to container images in various locations.
The general form of the syntax is _transport:details_, where details are dependent on the specified transport, which are documented below.
### **containers-storage:** [storage-specifier]{image-id|docker-reference[@image-id]}
An image located in a local containers storage.
The format of _docker-reference_ is described in detail in the **docker** transport.
The _storage-specifier_ allows for referencing storage locations on the file system and has the format `[[driver@]root[+run-root][:options]]` where the optional `driver` refers to the storage driver (e.g., overlay or btrfs) and where `root` is an absolute path to the storage's root directory.
The optional `run-root` can be used to specify the run directory of the storage where all temporary writable content is stored.
The optional `options` are a comma-separated list of driver-specific options.
Please refer to containers-storage.conf(5) for further information on the drivers and supported options.
### **dir:**_path_
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files.
This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
### **docker://**_docker-reference_
An image in a registry implementing the "Docker Registry HTTP API V2".
By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using podman-login(1).
If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using docker-login(1).
The containers-registries.conf(5) further allows for configuring various settings of a registry.
Note that a _docker-reference_ has the following format: `name[:tag|@digest]`.
While the docker transport does not support both a tag and a digest at the same time some formats like containers-storage do.
Digests can also be used in an image destination as long as the manifest matches the provided digest.
The digest of images can be explored with skopeo-inspect(1).
If `name` does not contain a slash, it is treated as `docker.io/library/name`.
Otherwise, the component before the first slash is checked if it is recognized as a `hostname[:port]` (i.e., it contains either a . or a :, or the component is exactly localhost).
If the first component of name is not recognized as a `hostname[:port]`, `name` is treated as `docker.io/name`.
### **docker-archive:**_path[:docker-reference]_
An image is stored in the docker-save(1) formatted file.
_docker-reference_ is only used when creating such a file, and it must not contain a digest.
It is further possible to copy data to stdin by specifying `docker-archive:/dev/stdin` but note that the used file must be seekable.
### **docker-daemon:**_docker-reference|algo:digest_
An image stored in the docker daemon's internal storage.
The image must be specified as a _docker-reference_ or in an alternative _algo:digest_ format when being used as an image source.
The _algo:digest_ refers to the image ID reported by docker-inspect(1).
### **oci:**_path[:tag]_
An image compliant with the "Open Container Image Layout Specification" at _path_.
Using a _tag_ is optional and allows for storing multiple images at the same _path_.
### **oci-archive:**_path[:tag]_
An image compliant with the "Open Container Image Layout Specification" stored as a tar(1) archive at _path_.
### **ostree:**_docker-reference[@/absolute/repo/path]_
An image in the local ostree(1) repository.
_/absolute/repo/path_ defaults to _/ostree/repo_.
## Examples
The following examples demonstrate how some of the containers transports can be used.
The examples use skopeo-copy(1) for copying container images.
**Copying an image from one registry to another**:
```
$ skopeo copy docker://docker.io/library/alpine:latest docker://localhost:5000/alpine:latest
```
**Copying an image from a running Docker daemon to a directory in the OCI layout**:
```
$ mkdir alpine-oci
$ skopeo copy docker-daemon:alpine:latest oci:alpine-oci
$ tree alpine-oci
test-oci/
├── blobs
│   └── sha256
│   ├── 83ef92b73cf4595aa7fe214ec6747228283d585f373d8f6bc08d66bebab531b7
│   ├── 9a6259e911dcd0a53535a25a9760ad8f2eded3528e0ad5604c4488624795cecc
│   └── ff8df268d29ccbe81cdf0a173076dcfbbea4bb2b6df1dd26766a73cb7b4ae6f7
├── index.json
└── oci-layout
2 directories, 5 files
```
**Copying an image from a registry to the local storage**:
```
$ skopeo copy docker://docker.io/library/alpine:latest containers-storage:alpine:latest
```
## SEE ALSO
docker-login(1), docker-save(1), ostree(1), podman-login(1), skopeo-copy(1), skopeo-inspect(1), tar(1), container-registries.conf(5), containers-storage.conf(5)
## AUTHORS
Miloslav Trmač <mitr@redhat.com>
Valentin Rothberg <rothberg@redhat.com>

View File

@ -10,6 +10,10 @@ import (
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/containers/image/docker/reference"
) )
// systemRegistriesConfPath is the path to the system-wide registry // systemRegistriesConfPath is the path to the system-wide registry
@ -22,34 +26,56 @@ var systemRegistriesConfPath = builtinRegistriesConfPath
// DO NOT change this, instead see systemRegistriesConfPath above. // DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/etc/containers/registries.conf" const builtinRegistriesConfPath = "/etc/containers/registries.conf"
// Mirror represents a mirror. Mirrors can be used as pull-through caches for // Endpoint describes a remote location of a registry.
// registries. type Endpoint struct {
type Mirror struct { // The endpoint's remote location.
// The mirror's URL. Location string `toml:"location"`
URL string `toml:"url"`
// If true, certs verification will be skipped and HTTP (non-TLS) // If true, certs verification will be skipped and HTTP (non-TLS)
// connections will be allowed. // connections will be allowed.
Insecure bool `toml:"insecure"` Insecure bool `toml:"insecure"`
} }
// RewriteReference will substitute the provided reference `prefix` to the
// endpoints `location` from the `ref` and creates a new named reference from it.
// The function errors if the newly created reference is not parsable.
func (e *Endpoint) RewriteReference(ref reference.Named, prefix string) (reference.Named, error) {
if ref == nil {
return nil, fmt.Errorf("provided reference is nil")
}
if prefix == "" {
return ref, nil
}
refString := ref.String()
if refMatchesPrefix(refString, prefix) {
newNamedRef := strings.Replace(refString, prefix, e.Location, 1)
newParsedRef, err := reference.ParseNamed(newNamedRef)
if newParsedRef != nil {
logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String())
}
if err != nil {
return nil, errors.Wrapf(err, "error rewriting reference")
}
return newParsedRef, nil
}
return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString)
}
// Registry represents a registry. // Registry represents a registry.
type Registry struct { type Registry struct {
// Serializable registry URL. // A registry is an Endpoint too
URL string `toml:"url"` Endpoint
// The registry's mirrors. // The registry's mirrors.
Mirrors []Mirror `toml:"mirror"` Mirrors []Endpoint `toml:"mirror"`
// If true, pulling from the registry will be blocked. // If true, pulling from the registry will be blocked.
Blocked bool `toml:"blocked"` Blocked bool `toml:"blocked"`
// If true, certs verification will be skipped and HTTP (non-TLS)
// connections will be allowed.
Insecure bool `toml:"insecure"`
// If true, the registry can be used when pulling an unqualified image. // If true, the registry can be used when pulling an unqualified image.
Search bool `toml:"unqualified-search"` Search bool `toml:"unqualified-search"`
// Prefix is used for matching images, and to translate one namespace to // Prefix is used for matching images, and to translate one namespace to
// another. If `Prefix="example.com/bar"`, `URL="example.com/foo/bar"` // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"`
// and we pull from "example.com/bar/myimage:latest", the image will // and we pull from "example.com/bar/myimage:latest", the image will
// effectively be pulled from "example.com/foo/bar/myimage:latest". // effectively be pulled from "example.com/foo/bar/myimage:latest".
// If no Prefix is specified, it defaults to the specified URL. // If no Prefix is specified, it defaults to the specified location.
Prefix string `toml:"prefix"` Prefix string `toml:"prefix"`
} }
@ -84,18 +110,18 @@ func (e *InvalidRegistries) Error() string {
return e.s return e.s
} }
// parseURL parses the input string, performs some sanity checks and returns // parseLocation parses the input string, performs some sanity checks and returns
// the sanitized input string. An error is returned if the input string is // the sanitized input string. An error is returned if the input string is
// empty or if contains an "http{s,}://" prefix. // empty or if contains an "http{s,}://" prefix.
func parseURL(input string) (string, error) { func parseLocation(input string) (string, error) {
trimmed := strings.TrimRight(input, "/") trimmed := strings.TrimRight(input, "/")
if trimmed == "" { if trimmed == "" {
return "", &InvalidRegistries{s: "invalid URL: cannot be empty"} return "", &InvalidRegistries{s: "invalid location: cannot be empty"}
} }
if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
msg := fmt.Sprintf("invalid URL '%s': URI schemes are not supported", input) msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input)
return "", &InvalidRegistries{s: msg} return "", &InvalidRegistries{s: msg}
} }
@ -111,21 +137,21 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
// to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations.
registryOrder := []string{} registryOrder := []string{}
getRegistry := func(url string) (*Registry, error) { // Note: _pointer_ to a long-lived object getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object
var err error var err error
url, err = parseURL(url) location, err = parseLocation(location)
if err != nil { if err != nil {
return nil, err return nil, err
} }
reg, exists := regMap[url] reg, exists := regMap[location]
if !exists { if !exists {
reg = &Registry{ reg = &Registry{
URL: url, Endpoint: Endpoint{Location: location},
Mirrors: []Mirror{}, Mirrors: []Endpoint{},
Prefix: url, Prefix: location,
} }
regMap[url] = reg regMap[location] = reg
registryOrder = append(registryOrder, url) registryOrder = append(registryOrder, location)
} }
return reg, nil return reg, nil
} }
@ -155,15 +181,15 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
} }
registries := []Registry{} registries := []Registry{}
for _, url := range registryOrder { for _, location := range registryOrder {
reg := regMap[url] reg := regMap[location]
registries = append(registries, *reg) registries = append(registries, *reg)
} }
return registries, nil return registries, nil
} }
// postProcessRegistries checks the consistency of all registries (e.g., set // postProcessRegistries checks the consistency of all registries (e.g., set
// the Prefix to URL if not set) and applies conflict checks. It returns an // the Prefix to Location if not set) and applies conflict checks. It returns an
// array of cleaned registries and error in case of conflicts. // array of cleaned registries and error in case of conflicts.
func postProcessRegistries(regs []Registry) ([]Registry, error) { func postProcessRegistries(regs []Registry) ([]Registry, error) {
var registries []Registry var registries []Registry
@ -172,16 +198,16 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
for _, reg := range regs { for _, reg := range regs {
var err error var err error
// make sure URL and Prefix are valid // make sure Location and Prefix are valid
reg.URL, err = parseURL(reg.URL) reg.Location, err = parseLocation(reg.Location)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if reg.Prefix == "" { if reg.Prefix == "" {
reg.Prefix = reg.URL reg.Prefix = reg.Location
} else { } else {
reg.Prefix, err = parseURL(reg.Prefix) reg.Prefix, err = parseLocation(reg.Prefix)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -189,13 +215,13 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
// make sure mirrors are valid // make sure mirrors are valid
for _, mir := range reg.Mirrors { for _, mir := range reg.Mirrors {
mir.URL, err = parseURL(mir.URL) mir.Location, err = parseLocation(mir.Location)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
registries = append(registries, reg) registries = append(registries, reg)
regMap[reg.URL] = append(regMap[reg.URL], reg) regMap[reg.Location] = append(regMap[reg.Location], reg)
} }
// Given a registry can be mentioned multiple times (e.g., to have // Given a registry can be mentioned multiple times (e.g., to have
@ -205,15 +231,15 @@ func postProcessRegistries(regs []Registry) ([]Registry, error) {
// Note: we need to iterate over the registries array to ensure a // Note: we need to iterate over the registries array to ensure a
// deterministic behavior which is not guaranteed by maps. // deterministic behavior which is not guaranteed by maps.
for _, reg := range registries { for _, reg := range registries {
others, _ := regMap[reg.URL] others, _ := regMap[reg.Location]
for _, other := range others { for _, other := range others {
if reg.Insecure != other.Insecure { if reg.Insecure != other.Insecure {
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.URL) msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location)
return nil, &InvalidRegistries{s: msg} return nil, &InvalidRegistries{s: msg}
} }
if reg.Blocked != other.Blocked { if reg.Blocked != other.Blocked {
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.URL) msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location)
return nil, &InvalidRegistries{s: msg} return nil, &InvalidRegistries{s: msg}
} }
} }

View File

@ -1,5 +1,11 @@
# For more information on this configuration file, see containers-registries.conf(5). # For more information on this configuration file, see containers-registries.conf(5).
# #
# There are multiple versions of the configuration syntax available, where the
# second iteration is backwards compatible to the first one. Mixing up both
# formats will result in an runtime error.
#
# The initial configuration format looks like this:
#
# Registries to search for images that are not fully-qualified. # Registries to search for images that are not fully-qualified.
# i.e. foobar.com/my_image:latest vs my_image:latest # i.e. foobar.com/my_image:latest vs my_image:latest
[registries.search] [registries.search]
@ -19,3 +25,41 @@ registries = []
# The atomic CLI `atomic trust` can be used to easily configure the policy.json file. # The atomic CLI `atomic trust` can be used to easily configure the policy.json file.
[registries.block] [registries.block]
registries = [] registries = []
# The second version of the configuration format allows to specify registry
# mirrors:
#
# [[registry]]
# # The main location of the registry
# location = "example.com"
#
# # If true, certs verification will be skipped and HTTP (non-TLS) connections
# # will be allowed.
# insecure = false
#
# # Prefix is used for matching images, and to translate one namespace to
# # another. If `prefix = "example.com/foo"`, `location = "example.com"` and
# # we pull from "example.com/foo/myimage:latest", the image will effectively be
# # pulled from "example.com/myimage:latest". If no Prefix is specified,
# # it defaults to the specified `location`. When a prefix is used, then a pull
# # without specifying the prefix is not possible any more.
# prefix = "example.com/foo"
#
# # If true, the registry can be used when pulling an unqualified image. If a
# # prefix is specified, unqualified pull is not possible any more.
# unqualified-search = false
#
# # If true, pulling from the registry will be blocked.
# blocked = false
#
# # All available mirrors of the registry. The mirrors will be evaluated in
# # order during an image pull. Furthermore it is possible to specify the
# # `insecure` flag per registry mirror, too.
# mirror = [
# { location = "example-mirror-0.local", insecure = false },
# { location = "example-mirror-1.local", insecure = true },
# # It is also possible to specify an additional path within the `location`.
# # A pull to `example.com/foo/image:latest` will then result in
# # `example-mirror-2.local/path/image:latest`.
# { location = "example-mirror-2.local/path" },
# ]