Bump github.com/containers/image/v5 from 5.2.1 to 5.3.0

Bumps [github.com/containers/image/v5](https://github.com/containers/image) from 5.2.1 to 5.3.0.
- [Release notes](https://github.com/containers/image/releases)
- [Commits](https://github.com/containers/image/compare/v5.2.1...v5.3.0)

Signed-off-by: dependabot-preview[bot] <support@dependabot.com>
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
dependabot-preview[bot] 2020-03-19 19:57:04 +00:00 committed by Miloslav Trmač
parent 12865fdfb8
commit 970af7d1b4
199 changed files with 4309 additions and 1398 deletions

2
go.mod
View File

@ -5,7 +5,7 @@ go 1.12
require ( require (
github.com/containers/buildah v1.13.1 // indirect github.com/containers/buildah v1.13.1 // indirect
github.com/containers/common v0.6.1 github.com/containers/common v0.6.1
github.com/containers/image/v5 v5.2.1 github.com/containers/image/v5 v5.3.0
github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741 github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741
github.com/containers/storage v1.16.5 github.com/containers/storage v1.16.5
github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23 github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23

10
go.sum
View File

@ -91,6 +91,8 @@ github.com/containers/image/v5 v5.2.0 h1:DowY5OII5x9Pb6Pt76vnHU79BgG4/jdwhZjeAj2
github.com/containers/image/v5 v5.2.0/go.mod h1:IAub4gDGvXoxaIAdNy4e3FbVTDPVNMv9F0UfVVFbYCU= github.com/containers/image/v5 v5.2.0/go.mod h1:IAub4gDGvXoxaIAdNy4e3FbVTDPVNMv9F0UfVVFbYCU=
github.com/containers/image/v5 v5.2.1 h1:rQR6QSUneWBoW1bTFpP9EJJTevQFv27YsKYQVJIzg+s= github.com/containers/image/v5 v5.2.1 h1:rQR6QSUneWBoW1bTFpP9EJJTevQFv27YsKYQVJIzg+s=
github.com/containers/image/v5 v5.2.1/go.mod h1:TfhmLwH+v1/HBVPIWH7diLs8XwcOkP3c7t7JFgqaUEc= github.com/containers/image/v5 v5.2.1/go.mod h1:TfhmLwH+v1/HBVPIWH7diLs8XwcOkP3c7t7JFgqaUEc=
github.com/containers/image/v5 v5.3.0 h1:m16khjCxqo5KnjkpWHnQLxi1Iza+U68sfX7mN3c+6bs=
github.com/containers/image/v5 v5.3.0/go.mod h1:AUpxRzTM+7DObq2ja8UE1sxtfmMZ1KlW/qOJS0+sQw0=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741 h1:8tQkOcednLJtUcZgK7sPglscXtxvMOnFOa6wd09VWLM= github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741 h1:8tQkOcednLJtUcZgK7sPglscXtxvMOnFOa6wd09VWLM=
@ -456,6 +458,8 @@ github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8= github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.7 h1:YvTNdFzX6+W5m9msiYg/zpkSURPPtOlzbqYjrFn7Yt4=
github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY= github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
@ -467,6 +471,8 @@ github.com/vbauerster/mpb/v4 v4.11.1 h1:ZOYQSVHgmeanXsbyC44aDg76tBGCS/54Rk8VkL8d
github.com/vbauerster/mpb/v4 v4.11.1/go.mod h1:vMLa1J/ZKC83G2lB/52XpqT+ZZtFG4aZOdKhmpRL1uM= github.com/vbauerster/mpb/v4 v4.11.1/go.mod h1:vMLa1J/ZKC83G2lB/52XpqT+ZZtFG4aZOdKhmpRL1uM=
github.com/vbauerster/mpb/v4 v4.11.2 h1:ynkUoKzi65DZ1UsQPx7sgi/KN6G9f7br+Us2nKm35AM= github.com/vbauerster/mpb/v4 v4.11.2 h1:ynkUoKzi65DZ1UsQPx7sgi/KN6G9f7br+Us2nKm35AM=
github.com/vbauerster/mpb/v4 v4.11.2/go.mod h1:jIuIRCltGJUnm6DCyPVkwjlLUk4nHTH+m4eD14CdFF0= github.com/vbauerster/mpb/v4 v4.11.2/go.mod h1:jIuIRCltGJUnm6DCyPVkwjlLUk4nHTH+m4eD14CdFF0=
github.com/vbauerster/mpb/v4 v4.12.2 h1:TsBs1nWRYF0m8cUH13pxNhOUqY6yKcOr2PeSYxp2L3I=
github.com/vbauerster/mpb/v4 v4.12.2/go.mod h1:LVRGvMch8T4HQO3eg2pFPsACH9kO/O6fT/7vhGje3QE=
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -494,6 +500,8 @@ golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6 h1:Sy5bstxEqwwbYs6n0/pBuxKENqOeZUgD45Gp3Q3pqLg=
golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -551,6 +559,8 @@ golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE= golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE=
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867 h1:JoRuNIf+rpHl+VhScRQQvzbHed86tKkqwPMV34T8myw=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=

View File

@ -8,13 +8,13 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"reflect" "reflect"
"runtime"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/image" "github.com/containers/image/v5/image"
"github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/manifest" "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/blobinfocache" "github.com/containers/image/v5/pkg/blobinfocache"
"github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/pkg/compression"
@ -356,11 +356,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
if err != nil { if err != nil {
return nil, "", errors.Wrapf(err, "Error reading manifest list") return nil, "", errors.Wrapf(err, "Error reading manifest list")
} }
list, err := manifest.ListFromBlob(manifestList, manifestType) originalList, err := manifest.ListFromBlob(manifestList, manifestType)
if err != nil { if err != nil {
return nil, "", errors.Wrapf(err, "Error parsing manifest list %q", string(manifestList)) return nil, "", errors.Wrapf(err, "Error parsing manifest list %q", string(manifestList))
} }
originalList := list.Clone() updatedList := originalList.Clone()
// Read and/or clear the set of signatures for this list. // Read and/or clear the set of signatures for this list.
var sigs [][]byte var sigs [][]byte
@ -390,18 +390,18 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
case imgspecv1.MediaTypeImageManifest: case imgspecv1.MediaTypeImageManifest:
forceListMIMEType = imgspecv1.MediaTypeImageIndex forceListMIMEType = imgspecv1.MediaTypeImageIndex
} }
selectedListType, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType) selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
if err != nil { if err != nil {
return nil, "", errors.Wrapf(err, "Error determining manifest list type to write to destination") return nil, "", errors.Wrapf(err, "Error determining manifest list type to write to destination")
} }
if selectedListType != list.MIMEType() { if selectedListType != originalList.MIMEType() {
if !canModifyManifestList { if !canModifyManifestList {
return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType) return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType)
} }
} }
// Copy each image, or just the ones we want to copy, in turn. // Copy each image, or just the ones we want to copy, in turn.
instanceDigests := list.Instances() instanceDigests := updatedList.Instances()
imagesToCopy := len(instanceDigests) imagesToCopy := len(instanceDigests)
if options.ImageListSelection == CopySpecificImages { if options.ImageListSelection == CopySpecificImages {
imagesToCopy = len(options.Instances) imagesToCopy = len(options.Instances)
@ -419,7 +419,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
} }
} }
if skip { if skip {
update, err := list.Instance(instanceDigest) update, err := updatedList.Instance(instanceDigest)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
@ -447,42 +447,58 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
} }
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made. // Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
if err = list.UpdateInstances(updates); err != nil { if err = updatedList.UpdateInstances(updates); err != nil {
return nil, "", errors.Wrapf(err, "Error updating manifest list") return nil, "", errors.Wrapf(err, "Error updating manifest list")
} }
// Perform the list conversion. // Iterate through supported list types, preferred format first.
if selectedListType != list.MIMEType() {
list, err = list.ConvertToMIMEType(selectedListType)
if err != nil {
return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", selectedListType)
}
}
// Check if the updates or a type conversion meaningfully changed the list of images
// by serializing them both so that we can compare them.
updatedManifestList, err := list.Serialize()
if err != nil {
return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", list.MIMEType(), list.Instances())
}
originalManifestList, err := originalList.Serialize()
if err != nil {
return nil, "", errors.Wrapf(err, "Error encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
}
// If we can't just use the original value, but we have to change it, flag an error.
if !bytes.Equal(updatedManifestList, originalManifestList) {
if !canModifyManifestList {
return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", selectedListType)
}
manifestList = updatedManifestList
logrus.Debugf("Manifest list has been updated")
}
// Save the manifest list.
c.Printf("Writing manifest list to image destination\n") c.Printf("Writing manifest list to image destination\n")
if err = c.dest.PutManifest(ctx, manifestList, nil); err != nil { var errs []string
return nil, "", errors.Wrapf(err, "Error writing manifest list %q", string(manifestList)) for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) {
attemptedList := updatedList
logrus.Debugf("Trying to use manifest list type %s…", thisListType)
// Perform the list conversion, if we need one.
if thisListType != updatedList.MIMEType() {
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
if err != nil {
return nil, "", errors.Wrapf(err, "Error converting manifest list to list with MIME type %q", thisListType)
}
}
// Check if the updates or a type conversion meaningfully changed the list of images
// by serializing them both so that we can compare them.
attemptedManifestList, err := attemptedList.Serialize()
if err != nil {
return nil, "", errors.Wrapf(err, "Error encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances())
}
originalManifestList, err := originalList.Serialize()
if err != nil {
return nil, "", errors.Wrapf(err, "Error encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
}
// If we can't just use the original value, but we have to change it, flag an error.
if !bytes.Equal(attemptedManifestList, originalManifestList) {
if !canModifyManifestList {
return nil, "", errors.Errorf("Error: manifest list must be converted to type %q to be written to destination, but that would invalidate signatures", thisListType)
}
logrus.Debugf("Manifest list has been updated")
}
// Save the manifest list.
err = c.dest.PutManifest(ctx, attemptedManifestList, nil)
if err != nil {
logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err)
errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err))
continue
}
errs = nil
manifestList = attemptedManifestList
break
}
if errs != nil {
return nil, "", fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
} }
// Sign the manifest list. // Sign the manifest list.
@ -527,15 +543,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
} }
// TODO: Remove src.SupportsEncryption call and interface once copyUpdatedConfigAndManifest does not depend on source Image manifest type
// Currently, the way copyUpdatedConfigAndManifest updates the manifest is to apply updates to the source manifest and call PutManifest
// of the modified source manifest. The implication is that schemas like docker2 cannot be encrypted even though the destination
// supports encryption because docker2 struct does not have annotations, which are required.
// Reference to issue: https://github.com/containers/image/issues/746
if options.OciEncryptLayers != nil && !src.SupportsEncryption(ctx) {
return nil, "", "", errors.Errorf("Encryption request but not supported by source transport %s", src.Reference().Transport().Name())
}
// If the destination is a digested reference, make a note of that, determine what digest value we're // If the destination is a digested reference, make a note of that, determine what digest value we're
// expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's // expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
// one item from a manifest list that matches it, accept that as a match. // one item from a manifest list that matches it, accept that as a match.
@ -708,21 +715,26 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
if err != nil { if err != nil {
return errors.Wrapf(err, "Error parsing image configuration") return errors.Wrapf(err, "Error parsing image configuration")
} }
wantedPlatforms, err := platform.WantedPlatforms(sys)
wantedOS := runtime.GOOS if err != nil {
if sys != nil && sys.OSChoice != "" { return errors.Wrapf(err, "error getting current platform information %#v", sys)
wantedOS = sys.OSChoice
}
if wantedOS != c.OS {
logrus.Infof("Image operating system mismatch: image uses %q, expecting %q", c.OS, wantedOS)
} }
wantedArch := runtime.GOARCH options := newOrderedSet()
if sys != nil && sys.ArchitectureChoice != "" { match := false
wantedArch = sys.ArchitectureChoice for _, wantedPlatform := range wantedPlatforms {
// Waiting for https://github.com/opencontainers/image-spec/pull/777 :
// This currently cant use image.MatchesPlatform because we dont know what to use
// for image.Variant.
if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture {
match = true
break
}
options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture))
} }
if wantedArch != c.Architecture { if !match {
logrus.Infof("Image architecture mismatch: image uses %q, expecting %q", c.Architecture, wantedArch) logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q",
c.OS, c.Architecture, strings.Join(options.list, ", "))
} }
} }
return nil return nil
@ -833,21 +845,24 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
} }
} }
func() { // A scope for defer if err := func() error { // A scope for defer
progressPool, progressCleanup := ic.c.newProgressPool(ctx) progressPool, progressCleanup := ic.c.newProgressPool(ctx)
defer progressCleanup() defer progressCleanup()
for i, srcLayer := range srcInfos { for i, srcLayer := range srcInfos {
err = copySemaphore.Acquire(ctx, 1) err = copySemaphore.Acquire(ctx, 1)
if err != nil { if err != nil {
logrus.Debug("Can't acquire semaphoer", err) return errors.Wrapf(err, "Can't acquire semaphore")
} }
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool) go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool)
} }
// Wait for all layers to be copied // Wait for all layers to be copied
copyGroup.Wait() copyGroup.Wait()
}() return nil
}(); err != nil {
return err
}
destInfos := make([]types.BlobInfo, numLayers) destInfos := make([]types.BlobInfo, numLayers)
diffIDs := make([]digest.Digest, numLayers) diffIDs := make([]digest.Digest, numLayers)
@ -1006,7 +1021,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
return destInfo, nil return destInfo, nil
}() }()
if err != nil { if err != nil {
return nil return err
} }
if destInfo.Digest != srcInfo.Digest { if destInfo.Digest != srcInfo.Digest {
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)

View File

@ -15,7 +15,7 @@ import (
// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. // orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
type orderedSet struct { type orderedSet struct {
list []string list []string
included map[string]struct{} included map[string]struct{}
@ -125,8 +125,10 @@ func isMultiImage(ctx context.Context, img types.UnparsedImage) (bool, error) {
// determineListConversion takes the current MIME type of a list of manifests, // determineListConversion takes the current MIME type of a list of manifests,
// the list of MIME types supported for a given destination, and a possible // the list of MIME types supported for a given destination, and a possible
// forced value, and returns the MIME type to which we should convert the list // forced value, and returns the MIME type to which we should convert the list
// of manifests, whether we are converting to it or using it unmodified. // of manifests (regardless of whether we are converting to it or using it
func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, error) { // unmodified) and a slice of other list types which might be supported by the
// destination.
func (c *copier) determineListConversion(currentListMIMEType string, destSupportedMIMETypes []string, forcedListMIMEType string) (string, []string, error) {
// If there's no list of supported types, then anything we support is expected to be supported. // If there's no list of supported types, then anything we support is expected to be supported.
if len(destSupportedMIMETypes) == 0 { if len(destSupportedMIMETypes) == 0 {
destSupportedMIMETypes = manifest.SupportedListMIMETypes destSupportedMIMETypes = manifest.SupportedListMIMETypes
@ -136,6 +138,7 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
destSupportedMIMETypes = []string{forcedListMIMEType} destSupportedMIMETypes = []string{forcedListMIMEType}
} }
var selectedType string var selectedType string
var otherSupportedTypes []string
for i := range destSupportedMIMETypes { for i := range destSupportedMIMETypes {
// The second priority is the first member of the list of acceptable types that is a list, // The second priority is the first member of the list of acceptable types that is a list,
// but keep going in case current type occurs later in the list. // but keep going in case current type occurs later in the list.
@ -148,15 +151,21 @@ func (c *copier) determineListConversion(currentListMIMEType string, destSupport
selectedType = destSupportedMIMETypes[i] selectedType = destSupportedMIMETypes[i]
} }
} }
// Pick out the other list types that we support.
for i := range destSupportedMIMETypes {
if selectedType != destSupportedMIMETypes[i] && manifest.MIMETypeIsMultiImage(destSupportedMIMETypes[i]) {
otherSupportedTypes = append(otherSupportedTypes, destSupportedMIMETypes[i])
}
}
logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", ")) logrus.Debugf("Manifest list has MIME type %s, ordered candidate list [%s]", currentListMIMEType, strings.Join(destSupportedMIMETypes, ", "))
if selectedType == "" { if selectedType == "" {
return "", errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes) return "", nil, errors.Errorf("destination does not support any supported manifest list types (%v)", manifest.SupportedListMIMETypes)
} }
if selectedType != currentListMIMEType { if selectedType != currentListMIMEType {
logrus.Debugf("... will convert to %s", selectedType) logrus.Debugf("... will convert to %s first, and then try %v", selectedType, otherSupportedTypes)
} else { } else {
logrus.Debugf("... will use the original manifest list type") logrus.Debugf("... will use the original manifest list type, and then try %v", otherSupportedTypes)
} }
// Done. // Done.
return selectedType, nil return selectedType, otherSupportedTypes, nil
} }

View File

@ -41,10 +41,10 @@ func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
// archiveReference is an ImageReference for Docker images. // archiveReference is an ImageReference for Docker images.
type archiveReference struct { type archiveReference struct {
// only used for destinations path string
// only used for destinations,
// archiveReference.destinationRef is optional and can be nil for destinations as well. // archiveReference.destinationRef is optional and can be nil for destinations as well.
destinationRef reference.NamedTagged destinationRef reference.NamedTagged
path string
} }
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
@ -64,11 +64,6 @@ func ParseReference(refString string) (types.ImageReference, error) {
return nil, errors.Wrapf(err, "docker-archive parsing reference") return nil, errors.Wrapf(err, "docker-archive parsing reference")
} }
ref = reference.TagNameOnly(ref) ref = reference.TagNameOnly(ref)
if _, isDigest := ref.(reference.Canonical); isDigest {
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString)
}
refTagged, isTagged := ref.(reference.NamedTagged) refTagged, isTagged := ref.(reference.NamedTagged)
if !isTagged { if !isTagged {
// Really shouldn't be hit... // Really shouldn't be hit...
@ -77,9 +72,20 @@ func ParseReference(refString string) (types.ImageReference, error) {
destinationRef = refTagged destinationRef = refTagged
} }
return NewReference(path, destinationRef)
}
// NewReference rethrns a Docker archive reference for a path and an optional destination reference.
func NewReference(path string, destinationRef reference.NamedTagged) (types.ImageReference, error) {
if strings.Contains(path, ":") {
return nil, errors.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
}
if _, isDigest := destinationRef.(reference.Canonical); isDigest {
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", destinationRef.String())
}
return archiveReference{ return archiveReference{
destinationRef: destinationRef,
path: path, path: path,
destinationRef: destinationRef,
}, nil }, nil
} }

View File

@ -1,11 +1,13 @@
package docker package docker
import ( import (
"bytes"
"context" "context"
"crypto/tls" "crypto/tls"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@ -21,6 +23,7 @@ import (
"github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/pkg/tlsclientconfig" "github.com/containers/image/v5/pkg/tlsclientconfig"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
clientLib "github.com/docker/distribution/registry/client" clientLib "github.com/docker/distribution/registry/client"
"github.com/docker/go-connections/tlsconfig" "github.com/docker/go-connections/tlsconfig"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
@ -51,7 +54,18 @@ const (
backoffMaxDelay = 60 * time.Second backoffMaxDelay = 60 * time.Second
) )
var systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"} type certPath struct {
path string
absolute bool
}
var (
homeCertDir = filepath.FromSlash(".config/containers/certs.d")
perHostCertDirs = []certPath{
{path: "/etc/containers/certs.d", absolute: true},
{path: "/etc/docker/certs.d", absolute: true},
}
)
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: // extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
// signature represents a Docker image signature. // signature represents a Docker image signature.
@ -85,8 +99,8 @@ type dockerClient struct {
// by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime.
tlsClientConfig *tls.Config tlsClientConfig *tls.Config
// The following members are not set by newDockerClient and must be set by callers if needed. // The following members are not set by newDockerClient and must be set by callers if needed.
username string auth types.DockerAuthConfig
password string registryToken string
signatureBase signatureStorageBase signatureBase signatureStorageBase
scope authScope scope authScope
@ -166,11 +180,12 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
hostCertDir string hostCertDir string
fullCertDirPath string fullCertDirPath string
) )
for _, systemPerHostCertDirPath := range systemPerHostCertDirPaths {
if sys != nil && sys.RootForImplicitAbsolutePaths != "" { for _, perHostCertDir := range append([]certPath{{path: filepath.Join(homedir.Get(), homeCertDir), absolute: false}}, perHostCertDirs...) {
hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) if sys != nil && sys.RootForImplicitAbsolutePaths != "" && perHostCertDir.absolute {
hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, perHostCertDir.path)
} else { } else {
hostCertDir = systemPerHostCertDirPath hostCertDir = perHostCertDir.path
} }
fullCertDirPath = filepath.Join(hostCertDir, hostPort) fullCertDirPath = filepath.Join(hostCertDir, hostPort)
@ -196,10 +211,11 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
registry := reference.Domain(ref.ref) registry := reference.Domain(ref.ref)
username, password, err := config.GetAuthentication(sys, registry) auth, err := config.GetCredentials(sys, registry)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error getting username and password") return nil, errors.Wrapf(err, "error getting username and password")
} }
sigBase, err := configuredSignatureStorageBase(sys, ref, write) sigBase, err := configuredSignatureStorageBase(sys, ref, write)
if err != nil { if err != nil {
return nil, err return nil, err
@ -209,8 +225,10 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
if err != nil { if err != nil {
return nil, err return nil, err
} }
client.username = username client.auth = auth
client.password = password if sys != nil {
client.registryToken = sys.DockerBearerRegistryToken
}
client.signatureBase = sigBase client.signatureBase = sigBase
client.scope.actions = actions client.scope.actions = actions
client.scope.remoteName = reference.Path(ref.ref) client.scope.remoteName = reference.Path(ref.ref)
@ -252,7 +270,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
} }
if reg != nil { if reg != nil {
if reg.Blocked { if reg.Blocked {
return nil, fmt.Errorf("registry %s is blocked in %s", reg.Prefix, sysregistriesv2.ConfigPath(sys)) return nil, fmt.Errorf("registry %s is blocked in %s or %s", reg.Prefix, sysregistriesv2.ConfigPath(sys), sysregistriesv2.ConfigDirPath(sys))
} }
skipVerify = reg.Insecure skipVerify = reg.Insecure
} }
@ -272,8 +290,10 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
if err != nil { if err != nil {
return errors.Wrapf(err, "error creating new docker client") return errors.Wrapf(err, "error creating new docker client")
} }
client.username = username client.auth = types.DockerAuthConfig{
client.password = password Username: username,
Password: password,
}
resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil) resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil)
if err != nil { if err != nil {
@ -315,7 +335,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
v1Res := &V1Results{} v1Res := &V1Results{}
// Get credentials from authfile for the underlying hostname // Get credentials from authfile for the underlying hostname
username, password, err := config.GetAuthentication(sys, registry) auth, err := config.GetCredentials(sys, registry)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error getting username and password") return nil, errors.Wrapf(err, "error getting username and password")
} }
@ -333,8 +353,10 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error creating new docker client") return nil, errors.Wrapf(err, "error creating new docker client")
} }
client.username = username client.auth = auth
client.password = password if sys != nil {
client.registryToken = sys.DockerBearerRegistryToken
}
// Only try the v1 search endpoint if the search query is not empty. If it is // Only try the v1 search endpoint if the search query is not empty. If it is
// empty skip to the v2 endpoint. // empty skip to the v2 endpoint.
@ -515,30 +537,43 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
schemeNames = append(schemeNames, challenge.Scheme) schemeNames = append(schemeNames, challenge.Scheme)
switch challenge.Scheme { switch challenge.Scheme {
case "basic": case "basic":
req.SetBasicAuth(c.username, c.password) req.SetBasicAuth(c.auth.Username, c.auth.Password)
return nil return nil
case "bearer": case "bearer":
cacheKey := "" registryToken := c.registryToken
scopes := []authScope{c.scope} if registryToken == "" {
if extraScope != nil { cacheKey := ""
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons). scopes := []authScope{c.scope}
cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions) if extraScope != nil {
scopes = append(scopes, *extraScope) // Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
} cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
var token bearerToken scopes = append(scopes, *extraScope)
t, inCache := c.tokenCache.Load(cacheKey)
if inCache {
token = t.(bearerToken)
}
if !inCache || time.Now().After(token.expirationTime) {
t, err := c.getBearerToken(req.Context(), challenge, scopes)
if err != nil {
return err
} }
token = *t var token bearerToken
c.tokenCache.Store(cacheKey, token) t, inCache := c.tokenCache.Load(cacheKey)
if inCache {
token = t.(bearerToken)
}
if !inCache || time.Now().After(token.expirationTime) {
var (
t *bearerToken
err error
)
if c.auth.IdentityToken != "" {
t, err = c.getBearerTokenOAuth2(req.Context(), challenge, scopes)
} else {
t, err = c.getBearerToken(req.Context(), challenge, scopes)
}
if err != nil {
return err
}
token = *t
c.tokenCache.Store(cacheKey, token)
}
registryToken = token.Token
} }
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token)) req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken))
return nil return nil
default: default:
logrus.Debugf("no handler for %s authentication", challenge.Scheme) logrus.Debugf("no handler for %s authentication", challenge.Scheme)
@ -548,48 +583,96 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
return nil return nil
} }
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) { func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge challenge,
scopes []authScope) (*bearerToken, error) {
realm, ok := challenge.Parameters["realm"] realm, ok := challenge.Parameters["realm"]
if !ok { if !ok {
return nil, errors.Errorf("missing realm in bearer auth challenge") return nil, errors.Errorf("missing realm in bearer auth challenge")
} }
authReq, err := http.NewRequest("GET", realm, nil) authReq, err := http.NewRequest(http.MethodPost, realm, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
authReq = authReq.WithContext(ctx) authReq = authReq.WithContext(ctx)
getParams := authReq.URL.Query()
if c.username != "" { // Make the form data required against the oauth2 authentication
getParams.Add("account", c.username) // More details here: https://docs.docker.com/registry/spec/auth/oauth/
} params := authReq.URL.Query()
if service, ok := challenge.Parameters["service"]; ok && service != "" { if service, ok := challenge.Parameters["service"]; ok && service != "" {
getParams.Add("service", service) params.Add("service", service)
} }
for _, scope := range scopes { for _, scope := range scopes {
if scope.remoteName != "" && scope.actions != "" { if scope.remoteName != "" && scope.actions != "" {
getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions)) params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
} }
} }
authReq.URL.RawQuery = getParams.Encode() params.Add("grant_type", "refresh_token")
if c.username != "" && c.password != "" { params.Add("refresh_token", c.auth.IdentityToken)
authReq.SetBasicAuth(c.username, c.password)
} authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String()) logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq) res, err := c.client.Do(authReq)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer res.Body.Close() defer res.Body.Close()
switch res.StatusCode { if err := httpResponseToError(res, "Trying to obtain access token"); err != nil {
case http.StatusUnauthorized: return nil, err
err := clientLib.HandleErrorResponse(res) }
logrus.Debugf("Server response when trying to obtain an access token: \n%q", err.Error())
return nil, ErrUnauthorizedForCredentials{Err: err} tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
case http.StatusOK: if err != nil {
break return nil, err
default: }
return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL)
return newBearerTokenFromJSONBlob(tokenBlob)
}
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
scopes []authScope) (*bearerToken, error) {
realm, ok := challenge.Parameters["realm"]
if !ok {
return nil, errors.Errorf("missing realm in bearer auth challenge")
}
authReq, err := http.NewRequest(http.MethodGet, realm, nil)
if err != nil {
return nil, err
}
authReq = authReq.WithContext(ctx)
params := authReq.URL.Query()
if c.auth.Username != "" {
params.Add("account", c.auth.Username)
}
if service, ok := challenge.Parameters["service"]; ok && service != "" {
params.Add("service", service)
}
for _, scope := range scopes {
if scope.remoteName != "" && scope.actions != "" {
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
}
}
authReq.URL.RawQuery = params.Encode()
if c.auth.Username != "" && c.auth.Password != "" {
authReq.SetBasicAuth(c.auth.Username, c.auth.Password)
}
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
res, err := c.client.Do(authReq)
if err != nil {
return nil, err
}
defer res.Body.Close()
if err := httpResponseToError(res, "Requesting bear token"); err != nil {
return nil, err
} }
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
if err != nil { if err != nil {

View File

@ -108,6 +108,7 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, pullSo
if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(ref.ref) != primaryDomain { if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(ref.ref) != primaryDomain {
copy := *endpointSys copy := *endpointSys
copy.DockerAuthConfig = nil copy.DockerAuthConfig = nil
copy.DockerBearerRegistryToken = ""
endpointSys = &copy endpointSys = &copy
} }

View File

@ -249,6 +249,9 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
} }
if parsedConfig.RootFS == nil {
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest[0].Config)
}
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
if err != nil { if err != nil {

View File

@ -56,7 +56,7 @@ func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) {
// layers in the resulting configuration isn't guaranteed to be returned to due how // layers in the resulting configuration isn't guaranteed to be returned to due how
// old image manifests work (docker v2s1 especially). // old image manifests work (docker v2s1 especially).
func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) {
v2s2, err := m.convertToManifestSchema2(nil, nil) v2s2, err := m.convertToManifestSchema2(ctx, types.ManifestUpdateInformation{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -107,6 +107,24 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp
// This does not change the state of the original Image object. // This does not change the state of the original Image object.
func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) {
copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} copy := manifestSchema1{m: manifest.Schema1Clone(m.m)}
// We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isnt a signature; so,
// handle conversions between them by doing nothing.
if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType {
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2,
})
if err != nil {
return nil, err
}
if converted != nil {
return converted, nil
}
}
// No conversion required, update manifest
if options.LayerInfos != nil { if options.LayerInfos != nil {
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err return nil, err
@ -121,36 +139,14 @@ func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.Manife
} }
} }
switch options.ManifestMIMEType {
case "": // No conversion, OK
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
// We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isnt a signature; so,
// handle conversions between them by doing nothing.
case manifest.DockerV2Schema2MediaType:
m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
if err != nil {
return nil, err
}
return memoryImageFromManifest(m2), nil
case imgspecv1.MediaTypeImageManifest:
// We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest
m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
if err != nil {
return nil, err
}
return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{
ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
InformationOnly: options.InformationOnly,
})
default:
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType)
}
return memoryImageFromManifest(&copy), nil return memoryImageFromManifest(&copy), nil
} }
// Based on github.com/docker/docker/distribution/pull_v2.go // Based on github.com/docker/docker/distribution/pull_v2.go
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, updateInfo types.ManifestUpdateInformation) (types.Image, error) {
uploadedLayerInfos := updateInfo.LayerInfos
layerDiffIDs := updateInfo.LayerDiffIDs
if len(m.m.ExtractedV1Compatibility) == 0 { if len(m.m.ExtractedV1Compatibility) == 0 {
// What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing.
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
@ -198,7 +194,21 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
Digest: digest.FromBytes(configJSON), Digest: digest.FromBytes(configJSON),
} }
return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil m1 := manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers)
return memoryImageFromManifest(m1), nil
}
func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, updateInfo types.ManifestUpdateInformation) (types.Image, error) {
// We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest
m2, err := m.convertToManifestSchema2(ctx, updateInfo)
if err != nil {
return nil, err
}
return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{
ManifestMIMEType: imgspecv1.MediaTypeImageManifest,
InformationOnly: updateInfo,
})
} }
// SupportsEncryption returns if encryption is supported for the manifest type // SupportsEncryption returns if encryption is supported for the manifest type

View File

@ -160,6 +160,21 @@ func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.Manife
configBlob: m.configBlob, configBlob: m.configBlob,
m: manifest.Schema2Clone(m.m), m: manifest.Schema2Clone(m.m),
} }
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1,
})
if err != nil {
return nil, err
}
if converted != nil {
return converted, nil
}
// No conversion required, update manifest
if options.LayerInfos != nil { if options.LayerInfos != nil {
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err return nil, err
@ -167,16 +182,6 @@ func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.Manife
} }
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
switch options.ManifestMIMEType {
case "": // No conversion, OK
case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType:
return copy.convertToManifestSchema1(ctx, options.InformationOnly.Destination)
case imgspecv1.MediaTypeImageManifest:
return copy.convertToManifestOCI1(ctx)
default:
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType)
}
return memoryImageFromManifest(&copy), nil return memoryImageFromManifest(&copy), nil
} }
@ -189,7 +194,7 @@ func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1
} }
} }
func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Image, error) { func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ types.ManifestUpdateInformation) (types.Image, error) {
configOCI, err := m.OCIConfig(ctx) configOCI, err := m.OCIConfig(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
@ -227,7 +232,8 @@ func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context) (types.Imag
} }
// Based on docker/distribution/manifest/schema1/config_builder.go // Based on docker/distribution/manifest/schema1/config_builder.go
func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest types.ImageDestination) (types.Image, error) { func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, updateInfo types.ManifestUpdateInformation) (types.Image, error) {
dest := updateInfo.Destination
configBytes, err := m.ConfigBlob(ctx) configBytes, err := m.ConfigBlob(ctx)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -8,6 +8,7 @@ import (
"github.com/containers/image/v5/manifest" "github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
) )
// genericManifest is an interface for parsing, modifying image manifests and related data. // genericManifest is an interface for parsing, modifying image manifests and related data.
@ -45,6 +46,10 @@ type genericManifest interface {
// This does not change the state of the original Image object. // This does not change the state of the original Image object.
UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error)
// SupportsEncryption returns if encryption is supported for the manifest type // SupportsEncryption returns if encryption is supported for the manifest type
//
// Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since
// the process of updating a manifest between different manifest types was to update then convert.
// This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
SupportsEncryption(ctx context.Context) bool SupportsEncryption(ctx context.Context) bool
} }
@ -75,3 +80,30 @@ func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo
} }
return blobs return blobs
} }
// manifestConvertFn is used to encapsulate helper manifest converstion functions
// to perform applying of manifest update information.
type manifestConvertFn func(context.Context, types.ManifestUpdateInformation) (types.Image, error)
// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if
// required and re-apply the options to the converted type.
// It returns (nil, nil) if no conversion was requested.
func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) {
if options.ManifestMIMEType == "" {
return nil, nil
}
converter, ok := converters[options.ManifestMIMEType]
if !ok {
return nil, errors.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType)
}
tmp, err := converter(ctx, options.InformationOnly)
if err != nil {
return nil, err
}
optionsCopy := options
optionsCopy.ManifestMIMEType = ""
return tmp.UpdatedImage(ctx, optionsCopy)
}

View File

@ -140,6 +140,21 @@ func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestU
configBlob: m.configBlob, configBlob: m.configBlob,
m: manifest.OCI1Clone(m.m), m: manifest.OCI1Clone(m.m),
} }
converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{
manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2,
manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1,
manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1,
})
if err != nil {
return nil, err
}
if converted != nil {
return converted, nil
}
// No conversion required, update manifest
if options.LayerInfos != nil { if options.LayerInfos != nil {
if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil {
return nil, err return nil, err
@ -147,24 +162,6 @@ func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestU
} }
// Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
switch options.ManifestMIMEType {
case "": // No conversion, OK
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
// We can't directly convert to V1, but we can transitively convert via a V2 image
m2, err := copy.convertToManifestSchema2()
if err != nil {
return nil, err
}
return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{
ManifestMIMEType: options.ManifestMIMEType,
InformationOnly: options.InformationOnly,
})
case manifest.DockerV2Schema2MediaType:
return copy.convertToManifestSchema2()
default:
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType)
}
return memoryImageFromManifest(&copy), nil return memoryImageFromManifest(&copy), nil
} }
@ -177,7 +174,7 @@ func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema
} }
} }
func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ types.ManifestUpdateInformation) (types.Image, error) {
// Create a copy of the descriptor. // Create a copy of the descriptor.
config := schema2DescriptorFromOCI1Descriptor(m.m.Config) config := schema2DescriptorFromOCI1Descriptor(m.m.Config)
@ -213,6 +210,19 @@ func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
return memoryImageFromManifest(m1), nil return memoryImageFromManifest(m1), nil
} }
func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, updateInfo types.ManifestUpdateInformation) (types.Image, error) {
// We can't directly convert to V1, but we can transitively convert via a V2 image
m2, err := m.convertToManifestSchema2(ctx, updateInfo)
if err != nil {
return nil, err
}
return m2.UpdatedImage(ctx, types.ManifestUpdateOptions{
ManifestMIMEType: manifest.DockerV2Schema1SignedMediaType,
InformationOnly: updateInfo,
})
}
// SupportsEncryption returns if encryption is supported for the manifest type // SupportsEncryption returns if encryption is supported for the manifest type
func (m *manifestOCI1) SupportsEncryption(context.Context) bool { func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
return true return true

View File

@ -0,0 +1,196 @@
package platform
// Largely based on
// https://github.com/moby/moby/blob/bc846d2e8fe5538220e0c31e9d0e8446f6fbc022/distribution/cpuinfo_unix.go
// Copyright 2012-2017 Docker, Inc.
//
// https://github.com/containerd/containerd/blob/726dcaea50883e51b2ec6db13caff0e7936b711d/platforms/cpuinfo.go
// Copyright The containerd Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import (
"bufio"
"fmt"
"os"
"runtime"
"strings"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// For Linux, the kernel has already detected the ABI, ISA and Features.
// So we don't need to access the ARM registers to detect platform information
// by ourselves. We can just parse these information from /proc/cpuinfo
func getCPUInfo(pattern string) (info string, err error) {
if runtime.GOOS != "linux" {
return "", fmt.Errorf("getCPUInfo for OS %s not implemented", runtime.GOOS)
}
cpuinfo, err := os.Open("/proc/cpuinfo")
if err != nil {
return "", err
}
defer cpuinfo.Close()
// Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
// the first core is enough.
scanner := bufio.NewScanner(cpuinfo)
for scanner.Scan() {
newline := scanner.Text()
list := strings.Split(newline, ":")
if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
return strings.TrimSpace(list[1]), nil
}
}
// Check whether the scanner encountered errors
err = scanner.Err()
if err != nil {
return "", err
}
return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern)
}
func getCPUVariantWindows() string {
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
// runtime.GOARCH to determine the variants
var variant string
switch runtime.GOARCH {
case "arm64":
variant = "v8"
case "arm":
variant = "v7"
default:
variant = ""
}
return variant
}
func getCPUVariantArm() string {
variant, err := getCPUInfo("Cpu architecture")
if err != nil {
return ""
}
// TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286)
switch strings.ToLower(variant) {
case "8", "aarch64":
variant = "v8"
case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
variant = "v7"
case "6", "6tej":
variant = "v6"
case "5", "5t", "5te", "5tej":
variant = "v5"
case "4", "4t":
variant = "v4"
case "3":
variant = "v3"
default:
variant = ""
}
return variant
}
func getCPUVariant(os string, arch string) string {
if os == "windows" {
return getCPUVariantWindows()
}
if arch == "arm" || arch == "arm64" {
return getCPUVariantArm()
}
return ""
}
var compatibility = map[string][]string{
"arm": {"v7", "v6", "v5"},
"arm64": {"v8"},
}
// Returns all compatible platforms with the platform specifics possibly overriden by user,
// the most compatible platform is first.
// If some option (arch, os, variant) is not present, a value from current platform is detected.
func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) {
wantedArch := runtime.GOARCH
if ctx != nil && ctx.ArchitectureChoice != "" {
wantedArch = ctx.ArchitectureChoice
}
wantedOS := runtime.GOOS
if ctx != nil && ctx.OSChoice != "" {
wantedOS = ctx.OSChoice
}
wantedVariant := getCPUVariant(runtime.GOOS, runtime.GOARCH)
if ctx != nil && ctx.VariantChoice != "" {
wantedVariant = ctx.VariantChoice
}
var wantedPlatforms []imgspecv1.Platform
if wantedVariant != "" && compatibility[wantedArch] != nil {
wantedPlatforms = make([]imgspecv1.Platform, 0, len(compatibility[wantedArch]))
wantedIndex := -1
for i, v := range compatibility[wantedArch] {
if wantedVariant == v {
wantedIndex = i
break
}
}
// user wants a variant which we know nothing about - not even compatibility
if wantedIndex == -1 {
wantedPlatforms = []imgspecv1.Platform{
{
OS: wantedOS,
Architecture: wantedArch,
Variant: wantedVariant,
},
}
} else {
for i := wantedIndex; i < len(compatibility[wantedArch]); i++ {
v := compatibility[wantedArch][i]
wantedPlatforms = append(wantedPlatforms, imgspecv1.Platform{
OS: wantedOS,
Architecture: wantedArch,
Variant: v,
})
}
}
} else {
wantedPlatforms = []imgspecv1.Platform{
{
OS: wantedOS,
Architecture: wantedArch,
Variant: wantedVariant,
},
}
}
return wantedPlatforms, nil
}
func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool {
if image.Architecture != wanted.Architecture {
return false
}
if image.OS != wanted.OS {
return false
}
if wanted.Variant == "" || image.Variant == wanted.Variant {
return true
}
return false
}

View File

@ -3,8 +3,8 @@ package manifest
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"runtime"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@ -81,9 +81,6 @@ func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
if updates[i].MediaType == "" { if updates[i].MediaType == "" {
return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType) return errors.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(updates), list.Manifests[i].MediaType)
} }
if err := SupportedSchema2MediaType(updates[i].MediaType); err != nil && SupportedOCI1MediaType(updates[i].MediaType) != nil {
return errors.Wrapf(err, "update %d of %d passed to Schema2List.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), list.Manifests[i].MediaType, updates[i].MediaType)
}
list.Manifests[i].MediaType = updates[i].MediaType list.Manifests[i].MediaType = updates[i].MediaType
} }
return nil return nil
@ -92,21 +89,25 @@ func (list *Schema2List) UpdateInstances(updates []ListUpdate) error {
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest // ChooseInstance parses blob as a schema2 manifest list, and returns the digest
// of the image which is appropriate for the current environment. // of the image which is appropriate for the current environment.
func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { func (list *Schema2List) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedArch := runtime.GOARCH wantedPlatforms, err := platform.WantedPlatforms(ctx)
if ctx != nil && ctx.ArchitectureChoice != "" { if err != nil {
wantedArch = ctx.ArchitectureChoice return "", errors.Wrapf(err, "error getting platform information %#v", ctx)
} }
wantedOS := runtime.GOOS for _, wantedPlatform := range wantedPlatforms {
if ctx != nil && ctx.OSChoice != "" { for _, d := range list.Manifests {
wantedOS = ctx.OSChoice imagePlatform := imgspecv1.Platform{
} Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
for _, d := range list.Manifests { OSVersion: d.Platform.OSVersion,
if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { OSFeatures: dupStringSlice(d.Platform.OSFeatures),
return d.Digest, nil Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
} }
} }
return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) return "", fmt.Errorf("no image found in manifest list for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
} }
// Serialize returns the list in a blob format. // Serialize returns the list in a blob format.

View File

@ -32,7 +32,14 @@ type OCI1 struct {
imgspecv1.Manifest imgspecv1.Manifest
} }
// SupportedOCI1MediaType checks if the specified string is a supported OCI1 media type. // SupportedOCI1MediaType checks if the specified string is a supported OCI1
// media type.
//
// Deprecated: blindly rejecting unknown MIME types when the consumer does not
// need to process the input just reduces interoperability (and violates the
// standard) with no benefit, and that this function does not check that the
// media type is appropriate for any specific purpose, so its not all that
// useful for validation anyway.
func SupportedOCI1MediaType(m string) error { func SupportedOCI1MediaType(m string) error {
switch m { switch m {
case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
@ -48,15 +55,6 @@ func OCI1FromManifest(manifest []byte) (*OCI1, error) {
if err := json.Unmarshal(manifest, &oci1); err != nil { if err := json.Unmarshal(manifest, &oci1); err != nil {
return nil, err return nil, err
} }
// Check manifest's and layers' media types.
if err := SupportedOCI1MediaType(oci1.Config.MediaType); err != nil {
return nil, err
}
for _, layer := range oci1.Layers {
if err := SupportedOCI1MediaType(layer.MediaType); err != nil {
return nil, err
}
}
return &oci1, nil return &oci1, nil
} }
@ -128,11 +126,6 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
for i, info := range layerInfos { for i, info := range layerInfos {
mimeType := original[i].MediaType mimeType := original[i].MediaType
// First make sure we support the media type of the original layer.
if err := SupportedOCI1MediaType(original[i].MediaType); err != nil {
return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType)
}
if info.CryptoOperation == types.Decrypt { if info.CryptoOperation == types.Decrypt {
decMimeType, err := getDecryptedMediaType(mimeType) decMimeType, err := getDecryptedMediaType(mimeType)
if err != nil { if err != nil {

View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"runtime" "runtime"
platform "github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go" imgspec "github.com/opencontainers/image-spec/specs-go"
@ -64,9 +65,6 @@ func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
if updates[i].MediaType == "" { if updates[i].MediaType == "" {
return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType) return errors.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(updates), index.Manifests[i].MediaType)
} }
if err := SupportedOCI1MediaType(updates[i].MediaType); err != nil && SupportedSchema2MediaType(updates[i].MediaType) != nil && updates[i].MediaType != imgspecv1.MediaTypeImageIndex {
return errors.Wrapf(err, "update %d of %d passed to OCI1Index.UpdateInstances had an unsupported media type (was %q): %q", i+1, len(updates), index.Manifests[i].MediaType, updates[i].MediaType)
}
index.Manifests[i].MediaType = updates[i].MediaType index.Manifests[i].MediaType = updates[i].MediaType
} }
return nil return nil
@ -75,26 +73,31 @@ func (index *OCI1Index) UpdateInstances(updates []ListUpdate) error {
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest // ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
// of the image which is appropriate for the current environment. // of the image which is appropriate for the current environment.
func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { func (index *OCI1Index) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
wantedArch := runtime.GOARCH wantedPlatforms, err := platform.WantedPlatforms(ctx)
if ctx != nil && ctx.ArchitectureChoice != "" { if err != nil {
wantedArch = ctx.ArchitectureChoice return "", errors.Wrapf(err, "error getting platform information %#v", ctx)
} }
wantedOS := runtime.GOOS for _, wantedPlatform := range wantedPlatforms {
if ctx != nil && ctx.OSChoice != "" { for _, d := range index.Manifests {
wantedOS = ctx.OSChoice imagePlatform := imgspecv1.Platform{
} Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
for _, d := range index.Manifests { OSVersion: d.Platform.OSVersion,
if d.Platform != nil && d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { OSFeatures: dupStringSlice(d.Platform.OSFeatures),
return d.Digest, nil Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
} }
} }
for _, d := range index.Manifests { for _, d := range index.Manifests {
if d.Platform == nil { if d.Platform == nil {
return d.Digest, nil return d.Digest, nil
} }
} }
return "", fmt.Errorf("no image found in image index for architecture %s, OS %s", wantedArch, wantedOS) return "", fmt.Errorf("no image found in image index for architecture %s, variant %s, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
} }
// Serialize returns the index in a blob format. // Serialize returns the index in a blob format.

View File

@ -18,7 +18,8 @@ import (
) )
type dockerAuthConfig struct { type dockerAuthConfig struct {
Auth string `json:"auth,omitempty"` Auth string `json:"auth,omitempty"`
IdentityToken string `json:"identitytoken,omitempty"`
} }
type dockerConfigFile struct { type dockerConfigFile struct {
@ -72,20 +73,23 @@ func SetAuthentication(sys *types.SystemContext, registry, username, password st
}) })
} }
// GetAuthentication returns the registry credentials stored in // GetCredentials returns the registry credentials stored in either auth.json
// either auth.json file or .docker/config.json // file or .docker/config.json, including support for OAuth2 and IdentityToken.
// If an entry is not found empty strings are returned for the username and password // If an entry is not found, an empty struct is returned.
func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) {
if sys != nil && sys.DockerAuthConfig != nil { if sys != nil && sys.DockerAuthConfig != nil {
logrus.Debug("Returning credentials from DockerAuthConfig") logrus.Debug("Returning credentials from DockerAuthConfig")
return sys.DockerAuthConfig.Username, sys.DockerAuthConfig.Password, nil return *sys.DockerAuthConfig, nil
} }
if enableKeyring { if enableKeyring {
username, password, err := getAuthFromKernelKeyring(registry) username, password, err := getAuthFromKernelKeyring(registry)
if err == nil { if err == nil {
logrus.Debug("returning credentials from kernel keyring") logrus.Debug("returning credentials from kernel keyring")
return username, password, nil return types.DockerAuthConfig{
Username: username,
Password: password,
}, nil
} }
} }
@ -104,18 +108,39 @@ func GetAuthentication(sys *types.SystemContext, registry string) (string, strin
authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true}) authPath{path: filepath.Join(homedir.Get(), dockerLegacyHomePath), legacyFormat: true})
for _, path := range paths { for _, path := range paths {
username, password, err := findAuthentication(registry, path.path, path.legacyFormat) authConfig, err := findAuthentication(registry, path.path, path.legacyFormat)
if err != nil { if err != nil {
logrus.Debugf("Credentials not found") logrus.Debugf("Credentials not found")
return "", "", err return types.DockerAuthConfig{}, err
} }
if username != "" && password != "" {
if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" {
logrus.Debugf("Returning credentials from %s", path.path) logrus.Debugf("Returning credentials from %s", path.path)
return username, password, nil return authConfig, nil
} }
} }
logrus.Debugf("Credentials not found") logrus.Debugf("Credentials not found")
return "", "", nil return types.DockerAuthConfig{}, nil
}
// GetAuthentication returns the registry credentials stored in
// either auth.json file or .docker/config.json
// If an entry is not found empty strings are returned for the username and password
//
// Deprecated: This API only has support for username and password. To get the
// support for oauth2 in docker registry authentication, we added the new
// GetCredentials API. The new API should be used and this API is kept to
// maintain backward compatibility.
func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) {
auth, err := GetCredentials(sys, registry)
if err != nil {
return "", "", err
}
if auth.IdentityToken != "" {
return "", "", errors.Wrap(ErrNotSupported, "non-empty identity token found and this API doesn't support it")
}
return auth.Username, auth.Password, nil
} }
// RemoveAuthentication deletes the credentials stored in auth.json // RemoveAuthentication deletes the credentials stored in auth.json
@ -294,20 +319,28 @@ func deleteAuthFromCredHelper(credHelper, registry string) error {
} }
// findAuthentication looks for auth of registry in path // findAuthentication looks for auth of registry in path
func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { func findAuthentication(registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) {
auths, err := readJSONFile(path, legacyFormat) auths, err := readJSONFile(path, legacyFormat)
if err != nil { if err != nil {
return "", "", errors.Wrapf(err, "error reading JSON file %q", path) return types.DockerAuthConfig{}, errors.Wrapf(err, "error reading JSON file %q", path)
} }
// First try cred helpers. They should always be normalized. // First try cred helpers. They should always be normalized.
if ch, exists := auths.CredHelpers[registry]; exists { if ch, exists := auths.CredHelpers[registry]; exists {
return getAuthFromCredHelper(ch, registry) username, password, err := getAuthFromCredHelper(ch, registry)
if err != nil {
return types.DockerAuthConfig{}, err
}
return types.DockerAuthConfig{
Username: username,
Password: password,
}, nil
} }
// I'm feeling lucky // I'm feeling lucky
if val, exists := auths.AuthConfigs[registry]; exists { if val, exists := auths.AuthConfigs[registry]; exists {
return decodeDockerAuth(val.Auth) return decodeDockerAuth(val)
} }
// bad luck; let's normalize the entries first // bad luck; let's normalize the entries first
@ -316,25 +349,35 @@ func findAuthentication(registry, path string, legacyFormat bool) (string, strin
for k, v := range auths.AuthConfigs { for k, v := range auths.AuthConfigs {
normalizedAuths[normalizeRegistry(k)] = v normalizedAuths[normalizeRegistry(k)] = v
} }
if val, exists := normalizedAuths[registry]; exists { if val, exists := normalizedAuths[registry]; exists {
return decodeDockerAuth(val.Auth) return decodeDockerAuth(val)
} }
return "", "", nil
return types.DockerAuthConfig{}, nil
} }
func decodeDockerAuth(s string) (string, string, error) { // decodeDockerAuth decodes the username and password, which is
decoded, err := base64.StdEncoding.DecodeString(s) // encoded in base64.
func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
decoded, err := base64.StdEncoding.DecodeString(conf.Auth)
if err != nil { if err != nil {
return "", "", err return types.DockerAuthConfig{}, err
} }
parts := strings.SplitN(string(decoded), ":", 2) parts := strings.SplitN(string(decoded), ":", 2)
if len(parts) != 2 { if len(parts) != 2 {
// if it's invalid just skip, as docker does // if it's invalid just skip, as docker does
return "", "", nil return types.DockerAuthConfig{}, nil
} }
user := parts[0] user := parts[0]
password := strings.Trim(parts[1], "\x00") password := strings.Trim(parts[1], "\x00")
return user, password, nil return types.DockerAuthConfig{
Username: user,
Password: password,
IdentityToken: conf.IdentityToken,
}, nil
} }
// convertToHostname converts a registry url which has http|https prepended // convertToHostname converts a registry url which has http|https prepended

View File

@ -2,16 +2,17 @@ package sysregistriesv2
import ( import (
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"sort"
"strings" "strings"
"sync" "sync"
"github.com/BurntSushi/toml" "github.com/BurntSushi/toml"
"github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/types" "github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -26,6 +27,16 @@ var systemRegistriesConfPath = builtinRegistriesConfPath
// DO NOT change this, instead see systemRegistriesConfPath above. // DO NOT change this, instead see systemRegistriesConfPath above.
const builtinRegistriesConfPath = "/etc/containers/registries.conf" const builtinRegistriesConfPath = "/etc/containers/registries.conf"
// systemRegistriesConfDirPath is the path to the system-wide registry
// configuration directory and is used to add/subtract potential registries for
// obtaining images. You can override this at build time with
// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfDirecotyPath=$your_path'
var systemRegistriesConfDirPath = builtinRegistriesConfDirPath
// builtinRegistriesConfDirPath is the path to the registry configuration directory.
// DO NOT change this, instead see systemRegistriesConfDirectoryPath above.
const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d"
// Endpoint describes a remote location of a registry. // Endpoint describes a remote location of a registry.
type Endpoint struct { type Endpoint struct {
// The endpoint's remote location. // The endpoint's remote location.
@ -35,6 +46,12 @@ type Endpoint struct {
Insecure bool `toml:"insecure,omitempty"` Insecure bool `toml:"insecure,omitempty"`
} }
// userRegistriesFile is the path to the per user registry configuration file.
var userRegistriesFile = filepath.FromSlash(".config/containers/registries.conf")
// userRegistriesDir is the path to the per user registry configuration file.
var userRegistriesDir = filepath.FromSlash(".config/containers/registries.conf.d")
// rewriteReference will substitute the provided reference `prefix` to the // rewriteReference will substitute the provided reference `prefix` to the
// endpoints `location` from the `ref` and creates a new named reference from it. // endpoints `location` from the `ref` and creates a new named reference from it.
// The function errors if the newly created reference is not parsable. // The function errors if the newly created reference is not parsable.
@ -49,7 +66,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "error rewriting reference") return nil, errors.Wrapf(err, "error rewriting reference")
} }
logrus.Debugf("reference rewritten from '%v' to '%v'", refString, newParsedRef.String())
return newParsedRef, nil return newParsedRef, nil
} }
@ -302,29 +319,83 @@ func (config *V2RegistriesConf) postProcess() error {
config.UnqualifiedSearchRegistries[i] = registry config.UnqualifiedSearchRegistries[i] = registry
} }
// Registries are ordered and the first longest prefix always wins,
// rendering later items with the same prefix non-existent. We cannot error
// out anymore as this might break existing users, so let's just ignore them
// to guarantee that the same prefix exists only once.
knownPrefixes := make(map[string]bool)
uniqueRegistries := []Registry{}
for i := range config.Registries {
// TODO: should we warn if we see the same prefix being used multiple times?
if _, exists := knownPrefixes[config.Registries[i].Prefix]; !exists {
knownPrefixes[config.Registries[i].Prefix] = true
uniqueRegistries = append(uniqueRegistries, config.Registries[i])
}
}
config.Registries = uniqueRegistries
return nil return nil
} }
// ConfigPath returns the path to the system-wide registry configuration file. // ConfigPath returns the path to the system-wide registry configuration file.
func ConfigPath(ctx *types.SystemContext) string { func ConfigPath(ctx *types.SystemContext) string {
confPath := systemRegistriesConfPath if ctx != nil && ctx.SystemRegistriesConfPath != "" {
if ctx != nil { return ctx.SystemRegistriesConfPath
if ctx.SystemRegistriesConfPath != "" { }
confPath = ctx.SystemRegistriesConfPath
} else if ctx.RootForImplicitAbsolutePaths != "" { userRegistriesFilePath := filepath.Join(homedir.Get(), userRegistriesFile)
confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) if _, err := os.Stat(userRegistriesFilePath); err == nil {
} return userRegistriesFilePath
}
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
}
return systemRegistriesConfPath
}
// ConfigDirPath returns the path to the system-wide directory for drop-in
// registry configuration files.
func ConfigDirPath(ctx *types.SystemContext) string {
if ctx != nil && ctx.SystemRegistriesConfDirPath != "" {
return ctx.SystemRegistriesConfDirPath
}
userRegistriesDirPath := filepath.Join(homedir.Get(), userRegistriesDir)
if _, err := os.Stat(userRegistriesDirPath); err == nil {
return userRegistriesDirPath
}
if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath)
}
return systemRegistriesConfDirPath
}
// configWrapper is used to store the paths from ConfigPath and ConfigDirPath
// and acts as a key to the internal cache.
type configWrapper struct {
configPath string
configDirPath string
}
// newConfigWrapper returns a configWrapper for the specified SystemContext.
func newConfigWrapper(ctx *types.SystemContext) configWrapper {
return configWrapper{
configPath: ConfigPath(ctx),
configDirPath: ConfigDirPath(ctx),
} }
return confPath
} }
// configMutex is used to synchronize concurrent accesses to configCache. // configMutex is used to synchronize concurrent accesses to configCache.
var configMutex = sync.Mutex{} var configMutex = sync.Mutex{}
// configCache caches already loaded configs with config paths as keys and is // configCache caches already loaded configs with config paths as keys and is
// used to avoid redudantly parsing configs. Concurrent accesses to the cache // used to avoid redundantly parsing configs. Concurrent accesses to the cache
// are synchronized via configMutex. // are synchronized via configMutex.
var configCache = make(map[string]*V2RegistriesConf) var configCache = make(map[configWrapper]*V2RegistriesConf)
// InvalidateCache invalidates the registry cache. This function is meant to be // InvalidateCache invalidates the registry cache. This function is meant to be
// used for long-running processes that need to reload potential changes made to // used for long-running processes that need to reload potential changes made to
@ -332,66 +403,108 @@ var configCache = make(map[string]*V2RegistriesConf)
func InvalidateCache() { func InvalidateCache() {
configMutex.Lock() configMutex.Lock()
defer configMutex.Unlock() defer configMutex.Unlock()
configCache = make(map[string]*V2RegistriesConf) configCache = make(map[configWrapper]*V2RegistriesConf)
} }
// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. // getConfig returns the config object corresponding to ctx, loading it if it is not yet cached.
func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) { func getConfig(ctx *types.SystemContext) (*V2RegistriesConf, error) {
configPath := ConfigPath(ctx) wrapper := newConfigWrapper(ctx)
configMutex.Lock() configMutex.Lock()
// if the config has already been loaded, return the cached registries if config, inCache := configCache[wrapper]; inCache {
if config, inCache := configCache[configPath]; inCache {
configMutex.Unlock() configMutex.Unlock()
return config, nil return config, nil
} }
configMutex.Unlock() configMutex.Unlock()
return TryUpdatingCache(ctx) return tryUpdatingCache(ctx, wrapper)
}
// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d
// directory.
func dropInConfigs(wrapper configWrapper) ([]string, error) {
var configs []string
err := filepath.Walk(wrapper.configDirPath,
// WalkFunc to read additional configs
func(path string, info os.FileInfo, err error) error {
switch {
case err != nil:
// return error (could be a permission problem)
return err
case info == nil:
// this should only happen when err != nil but let's be sure
return nil
case info.IsDir():
if path != wrapper.configDirPath {
// make sure to not recurse into sub-directories
return filepath.SkipDir
}
// ignore directories
return nil
default:
// only add *.conf files
if strings.HasSuffix(path, ".conf") {
configs = append(configs, path)
}
return nil
}
},
)
if err != nil && !os.IsNotExist(err) {
// Ignore IsNotExist errors: most systems won't have a registries.conf.d
// directory.
return nil, errors.Wrapf(err, "error reading registries.conf.d")
}
return configs, nil
} }
// TryUpdatingCache loads the configuration from the provided `SystemContext` // TryUpdatingCache loads the configuration from the provided `SystemContext`
// without using the internal cache. On success, the loaded configuration will // without using the internal cache. On success, the loaded configuration will
// be added into the internal registry cache. // be added into the internal registry cache.
func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) {
configPath := ConfigPath(ctx) return tryUpdatingCache(ctx, newConfigWrapper(ctx))
}
// tryUpdatingCache implements TryUpdatingCache with an additional configWrapper
// argument to avoid redundantly calculating the config paths.
func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*V2RegistriesConf, error) {
configMutex.Lock() configMutex.Lock()
defer configMutex.Unlock() defer configMutex.Unlock()
// load the config // load the config
config, err := loadRegistryConf(configPath) config := &tomlConfig{}
if err != nil { if err := config.loadConfig(wrapper.configPath, false); err != nil {
// Return an empty []Registry if we use the default config, // Continue with an empty []Registry if we use the default config, which
// which implies that the config path of the SystemContext // implies that the config path of the SystemContext isn't set.
// isn't set. Note: if ctx.SystemRegistriesConfPath points to //
// the default config, we will still return an error. // Note: if ctx.SystemRegistriesConfPath points to the default config,
// we will still return an error.
if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") {
return &V2RegistriesConf{Registries: []Registry{}}, nil config = &tomlConfig{}
config.V2RegistriesConf = V2RegistriesConf{Registries: []Registry{}}
} else {
return nil, errors.Wrapf(err, "error loading registries configuration %q", wrapper.configPath)
} }
}
// Load the configs from the conf directory path.
dinConfigs, err := dropInConfigs(wrapper)
if err != nil {
return nil, err return nil, err
} }
for _, path := range dinConfigs {
// Enforce v2 format for drop-in-configs.
if err := config.loadConfig(path, true); err != nil {
return nil, errors.Wrapf(err, "error loading drop-in registries configuration %q", path)
}
}
v2Config := &config.V2RegistriesConf v2Config := &config.V2RegistriesConf
// backwards compatibility for v1 configs
if config.V1RegistriesConf.Nonempty() {
if config.V2RegistriesConf.Nonempty() {
return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
}
v2, err := config.V1RegistriesConf.ConvertToV2()
if err != nil {
return nil, err
}
v2Config = v2
}
if err := v2Config.postProcess(); err != nil {
return nil, err
}
// populate the cache // populate the cache
configCache[configPath] = v2Config configCache[wrapper] = v2Config
return v2Config, nil return v2Config, nil
} }
@ -470,16 +583,72 @@ func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
return nil, nil return nil, nil
} }
// Loads the registry configuration file from the filesystem and then unmarshals // loadConfig loads and unmarshals the configuration at the specified path. Note
// it. Returns the unmarshalled object. // that v1 configs are translated into v2 and are cleared. Use forceV2 if the
func loadRegistryConf(configPath string) (*tomlConfig, error) { // config must in the v2 format.
config := &tomlConfig{} //
// Note that specified fields in path will replace already set fields in the
// tomlConfig. Only the [[registry]] tables are merged by prefix.
func (c *tomlConfig) loadConfig(path string, forceV2 bool) error {
logrus.Debugf("Loading registries configuration %q", path)
configBytes, err := ioutil.ReadFile(configPath) // Save the registries before decoding the file where they could be lost.
if err != nil { // We merge them later again.
return nil, err registryMap := make(map[string]Registry)
for i := range c.Registries {
registryMap[c.Registries[i].Prefix] = c.Registries[i]
} }
err = toml.Unmarshal(configBytes, &config) // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields.
return config, err c.Registries = nil // important to clear the memory to prevent us from overlapping fields
_, err := toml.DecodeFile(path, c)
if err != nil {
return err
}
if c.V1RegistriesConf.Nonempty() {
// Enforce the v2 format if requested.
if forceV2 {
return &InvalidRegistries{s: "registry must be in v2 format but is in v1"}
}
// Convert a v1 config into a v2 config.
if c.V2RegistriesConf.Nonempty() {
return &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
}
v2, err := c.V1RegistriesConf.ConvertToV2()
if err != nil {
return err
}
c.V1RegistriesConf = V1RegistriesConf{}
c.V2RegistriesConf = *v2
}
// Post process registries, set the correct prefixes, sanity checks, etc.
if err := c.postProcess(); err != nil {
return err
}
// Merge the freshly loaded registries.
for i := range c.Registries {
registryMap[c.Registries[i].Prefix] = c.Registries[i]
}
// Go maps have a non-deterministic order when iterating the keys, so
// we dump them in a slice and sort it to enforce some order in
// Registries slice. Some consumers of c/image (e.g., CRI-O) log the
// the configuration where a non-deterministic order could easily cause
// confusion.
prefixes := []string{}
for prefix := range registryMap {
prefixes = append(prefixes, prefix)
}
sort.Strings(prefixes)
c.Registries = []Registry{}
for _, prefix := range prefixes {
c.Registries = append(c.Registries, registryMap[prefix])
}
return nil
} }

View File

@ -30,6 +30,14 @@ func newReference(transport storageTransport, named reference.Named, id string)
if named == nil && id == "" { if named == nil && id == "" {
return nil, ErrInvalidReference return nil, ErrInvalidReference
} }
if named != nil && reference.IsNameOnly(named) {
return nil, errors.Wrapf(ErrInvalidReference, "reference %s has neither a tag nor a digest", named.String())
}
if id != "" {
if err := validateImageID(id); err != nil {
return nil, errors.Wrapf(ErrInvalidReference, "invalid ID value %q: %v", id, err)
}
}
// We take a copy of the transport, which contains a pointer to the // We take a copy of the transport, which contains a pointer to the
// store that it used for resolving this reference, so that the // store that it used for resolving this reference, so that the
// transport that we'll return from Transport() won't be affected by // transport that we'll return from Transport() won't be affected by

View File

@ -43,6 +43,8 @@ type StoreTransport interface {
types.ImageTransport types.ImageTransport
// SetStore sets the default store for this transport. // SetStore sets the default store for this transport.
SetStore(storage.Store) SetStore(storage.Store)
// GetStoreIfSet returns the default store for this transport, or nil if not set/determined yet.
GetStoreIfSet() storage.Store
// GetImage retrieves the image from the transport's store that's named // GetImage retrieves the image from the transport's store that's named
// by the reference. // by the reference.
GetImage(types.ImageReference) (*storage.Image, error) GetImage(types.ImageReference) (*storage.Image, error)
@ -52,6 +54,9 @@ type StoreTransport interface {
// ParseStoreReference parses a reference, overriding any store // ParseStoreReference parses a reference, overriding any store
// specification that it may contain. // specification that it may contain.
ParseStoreReference(store storage.Store, reference string) (*storageReference, error) ParseStoreReference(store storage.Store, reference string) (*storageReference, error)
// NewStoreReference creates a reference for (named@ID) in store.
// either of name or ID can be unset; named must not be a reference.IsNameOnly.
NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error)
// SetDefaultUIDMap sets the default UID map to use when opening stores. // SetDefaultUIDMap sets the default UID map to use when opening stores.
SetDefaultUIDMap(idmap []idtools.IDMap) SetDefaultUIDMap(idmap []idtools.IDMap)
// SetDefaultGIDMap sets the default GID map to use when opening stores. // SetDefaultGIDMap sets the default GID map to use when opening stores.
@ -82,6 +87,11 @@ func (s *storageTransport) SetStore(store storage.Store) {
s.store = store s.store = store
} }
// GetStoreIfSet returns the default store for this transport, as set using SetStore() or initialized by default, or nil if not set/determined yet.
func (s *storageTransport) GetStoreIfSet() storage.Store {
return s.store
}
// SetDefaultUIDMap sets the default UID map to use when opening stores. // SetDefaultUIDMap sets the default UID map to use when opening stores.
func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) {
s.defaultUIDMap = idmap s.defaultUIDMap = idmap
@ -129,7 +139,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
// If it looks like a digest, leave it alone for now. // If it looks like a digest, leave it alone for now.
if _, err := digest.Parse(possibleID); err != nil { if _, err := digest.Parse(possibleID); err != nil {
// Otherwise… // Otherwise…
if idSum, err := digest.Parse("sha256:" + possibleID); err == nil && idSum.Validate() == nil { if err := validateImageID(possibleID); err == nil {
id = possibleID // … it is a full ID id = possibleID // … it is a full ID
} else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) { } else if img, err := store.Image(possibleID); err == nil && img != nil && len(possibleID) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, possibleID) {
// … it is a truncated version of the ID of an image that's present in local storage, // … it is a truncated version of the ID of an image that's present in local storage,
@ -167,7 +177,7 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
named = reference.TagNameOnly(named) named = reference.TagNameOnly(named)
} }
result, err := newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id) result, err := s.NewStoreReference(store, named, id)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -175,6 +185,12 @@ func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (
return result, nil return result, nil
} }
// NewStoreReference creates a reference for (named@ID) in store.
// either of name or ID can be unset; named must not be a reference.IsNameOnly.
func (s *storageTransport) NewStoreReference(store storage.Store, named reference.Named, id string) (*storageReference, error) {
return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, named, id)
}
func (s *storageTransport) GetStore() (storage.Store, error) { func (s *storageTransport) GetStore() (storage.Store, error) {
// Return the transport's previously-set store. If we don't have one // Return the transport's previously-set store. If we don't have one
// of those, initialize one now. // of those, initialize one now.
@ -342,7 +358,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
switch len(fields) { switch len(fields) {
case 1: // name only case 1: // name only
case 2: // name:tag@ID or name[:tag]@digest case 2: // name:tag@ID or name[:tag]@digest
if _, idErr := digest.Parse("sha256:" + fields[1]); idErr != nil { if idErr := validateImageID(fields[1]); idErr != nil {
if _, digestErr := digest.Parse(fields[1]); digestErr != nil { if _, digestErr := digest.Parse(fields[1]); digestErr != nil {
return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error()) return fmt.Errorf("%v is neither a valid digest(%s) nor a valid ID(%s)", fields[1], digestErr.Error(), idErr.Error())
} }
@ -351,7 +367,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
if _, err := digest.Parse(fields[1]); err != nil { if _, err := digest.Parse(fields[1]); err != nil {
return err return err
} }
if _, err := digest.Parse("sha256:" + fields[2]); err != nil { if err := validateImageID(fields[2]); err != nil {
return err return err
} }
default: // Coverage: This should never happen default: // Coverage: This should never happen
@ -363,3 +379,9 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
// are few semantically invalid strings. // are few semantically invalid strings.
return nil return nil
} }
// validateImageID returns nil if id is a valid (full) image ID, or an error
func validateImageID(id string) error {
_, err := digest.Parse("sha256:" + id)
return err
}

View File

@ -22,7 +22,6 @@ type ConfigUpdater interface {
} }
type tarballReference struct { type tarballReference struct {
transport types.ImageTransport
config imgspecv1.Image config imgspecv1.Image
annotations map[string]string annotations map[string]string
filenames []string filenames []string
@ -43,7 +42,7 @@ func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[
} }
func (r *tarballReference) Transport() types.ImageTransport { func (r *tarballReference) Transport() types.ImageTransport {
return r.transport return Transport
} }
func (r *tarballReference) StringWithinTransport() string { func (r *tarballReference) StringWithinTransport() string {

View File

@ -48,12 +48,21 @@ func (t *tarballTransport) ParseReference(reference string) (types.ImageReferenc
} }
f.Close() f.Close()
} }
ref := &tarballReference{ return NewReference(filenames, stdin)
transport: t, }
filenames: filenames,
stdin: stdin, // NewReference creates a new "tarball:" reference for the listed fileNames.
// If any of the fileNames is "-", the contents of stdin are used instead.
func NewReference(fileNames []string, stdin []byte) (types.ImageReference, error) {
for _, path := range fileNames {
if strings.Contains(path, separator) {
return nil, fmt.Errorf("Invalid path %q: paths including the separator %q are not supported", path, separator)
}
} }
return ref, nil return &tarballReference{
filenames: fileNames,
stdin: stdin,
}, nil
} }
func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error {

View File

@ -399,6 +399,10 @@ type Image interface {
// This does not change the state of the original Image object. // This does not change the state of the original Image object.
UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error)
// SupportsEncryption returns an indicator that the image supports encryption // SupportsEncryption returns an indicator that the image supports encryption
//
// Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since
// the process of updating a manifest between different manifest types was to update then convert.
// This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836
SupportsEncryption(ctx context.Context) bool SupportsEncryption(ctx context.Context) bool
// Size returns an approximation of the amount of disk space which is consumed by the image in its current // Size returns an approximation of the amount of disk space which is consumed by the image in its current
// location. If the size is not known, -1 will be returned. // location. If the size is not known, -1 will be returned.
@ -450,6 +454,11 @@ type ImageInspectInfo struct {
type DockerAuthConfig struct { type DockerAuthConfig struct {
Username string Username string
Password string Password string
// IdentityToken can be used as an refresh_token in place of username and
// password to obtain the bearer/access token in oauth2 flow. If identity
// token is set, password should not be set.
// Ref: https://docs.docker.com/registry/spec/auth/oauth/
IdentityToken string
} }
// OptionalBool is a boolean with an additional undefined value, which is meant // OptionalBool is a boolean with an additional undefined value, which is meant
@ -497,6 +506,8 @@ type SystemContext struct {
RegistriesDirPath string RegistriesDirPath string
// Path to the system-wide registries configuration file // Path to the system-wide registries configuration file
SystemRegistriesConfPath string SystemRegistriesConfPath string
// Path to the system-wide registries configuration directory
SystemRegistriesConfDirPath string
// If not "", overrides the default path for the authentication file, but only new format files // If not "", overrides the default path for the authentication file, but only new format files
AuthFilePath string AuthFilePath string
// if not "", overrides the default path for the authentication file, but with the legacy format; // if not "", overrides the default path for the authentication file, but with the legacy format;
@ -510,6 +521,8 @@ type SystemContext struct {
ArchitectureChoice string ArchitectureChoice string
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
OSChoice string OSChoice string
// If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match.
VariantChoice string
// If not "", overrides the system's default directory containing a blob info cache. // If not "", overrides the system's default directory containing a blob info cache.
BlobInfoCacheDir string BlobInfoCacheDir string
// Additional tags when creating or copying a docker-archive. // Additional tags when creating or copying a docker-archive.
@ -540,7 +553,10 @@ type SystemContext struct {
// Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
DockerInsecureSkipTLSVerify OptionalBool DockerInsecureSkipTLSVerify OptionalBool
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
// Ignored if DockerBearerRegistryToken is non-empty.
DockerAuthConfig *DockerAuthConfig DockerAuthConfig *DockerAuthConfig
// if not "", the library uses this registry token to authenticate to the registry
DockerBearerRegistryToken string
// if not "", an User-Agent header is added to each request when contacting a registry. // if not "", an User-Agent header is added to each request when contacting a registry.
DockerRegistryUserAgent string DockerRegistryUserAgent string
// if true, a V1 ping attempt isn't done to give users a better error. Default is false. // if true, a V1 ping attempt isn't done to give users a better error. Default is false.

View File

@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes // VersionMajor is for an API incompatible changes
VersionMajor = 5 VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner // VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 2 VersionMinor = 3
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch = 1 VersionPatch = 0
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.
VersionDev = "" VersionDev = ""

View File

@ -1,4 +1,4 @@
Copyright (c) 2014-2016 Ulrich Kunitz Copyright (c) 2014-2020 Ulrich Kunitz
All rights reserved. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without

View File

@ -1,5 +1,9 @@
# TODO list # TODO list
## Release v0.5.x
1. Support check flag in gxz command.
## Release v0.6 ## Release v0.6
1. Review encoder and check for lzma improvements under xz. 1. Review encoder and check for lzma improvements under xz.
@ -86,6 +90,11 @@
## Log ## Log
### 2020-02-24
Release v0.5.7 supports the check-ID None and fixes
[issue #27](https://github.com/ulikunitz/xz/issues/27).
### 2019-02-20 ### 2019-02-20
Release v0.5.6 supports the go.mod file. Release v0.5.6 supports the go.mod file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
@ -46,7 +46,8 @@ const HeaderLen = 12
// Constants for the checksum methods supported by xz. // Constants for the checksum methods supported by xz.
const ( const (
CRC32 byte = 0x1 None byte = 0x0
CRC32 = 0x1
CRC64 = 0x4 CRC64 = 0x4
SHA256 = 0xa SHA256 = 0xa
) )
@ -58,7 +59,7 @@ var errInvalidFlags = errors.New("xz: invalid flags")
// invalid. // invalid.
func verifyFlags(flags byte) error { func verifyFlags(flags byte) error {
switch flags { switch flags {
case CRC32, CRC64, SHA256: case None, CRC32, CRC64, SHA256:
return nil return nil
default: default:
return errInvalidFlags return errInvalidFlags
@ -67,6 +68,7 @@ func verifyFlags(flags byte) error {
// flagstrings maps flag values to strings. // flagstrings maps flag values to strings.
var flagstrings = map[byte]string{ var flagstrings = map[byte]string{
None: "None",
CRC32: "CRC-32", CRC32: "CRC-32",
CRC64: "CRC-64", CRC64: "CRC-64",
SHA256: "SHA-256", SHA256: "SHA-256",
@ -85,6 +87,8 @@ func flagString(flags byte) string {
// hash method encoded in flags. // hash method encoded in flags.
func newHashFunc(flags byte) (newHash func() hash.Hash, err error) { func newHashFunc(flags byte) (newHash func() hash.Hash, err error) {
switch flags { switch flags {
case None:
newHash = newNoneHash
case CRC32: case CRC32:
newHash = newCRC32 newHash = newCRC32
case CRC64: case CRC64:

BIN
vendor/github.com/ulikunitz/xz/fox-check-none.xz generated vendored Normal file

Binary file not shown.

View File

@ -1 +1,3 @@
module github.com/ulikunitz/xz module github.com/ulikunitz/xz
go 1.12

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.

23
vendor/github.com/ulikunitz/xz/none-check.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
// Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package xz
import "hash"
type noneHash struct{}
func (h noneHash) Write(p []byte) (n int, err error) { return len(p), nil }
func (h noneHash) Sum(b []byte) []byte { return b }
func (h noneHash) Reset() {}
func (h noneHash) Size() int { return 0 }
func (h noneHash) BlockSize() int { return 0 }
func newNoneHash() hash.Hash {
return &noneHash{}
}

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
@ -283,7 +283,11 @@ func (c *ReaderConfig) newBlockReader(xz io.Reader, h *blockHeader,
if err != nil { if err != nil {
return nil, err return nil, err
} }
br.r = io.TeeReader(fr, br.hash) if br.hash.Size() != 0 {
br.r = io.TeeReader(fr, br.hash)
} else {
br.r = fr
}
return br, nil return br, nil
} }

View File

@ -1,4 +1,4 @@
// Copyright 2014-2017 Ulrich Kunitz. All rights reserved. // Copyright 2014-2019 Ulrich Kunitz. All rights reserved.
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
@ -18,8 +18,10 @@ type WriterConfig struct {
DictCap int DictCap int
BufSize int BufSize int
BlockSize int64 BlockSize int64
// checksum method: CRC32, CRC64 or SHA256 // checksum method: CRC32, CRC64 or SHA256 (default: CRC64)
CheckSum byte CheckSum byte
// Forces NoChecksum (default: false)
NoCheckSum bool
// match algorithm // match algorithm
Matcher lzma.MatchAlgorithm Matcher lzma.MatchAlgorithm
} }
@ -41,6 +43,9 @@ func (c *WriterConfig) fill() {
if c.CheckSum == 0 { if c.CheckSum == 0 {
c.CheckSum = CRC64 c.CheckSum = CRC64
} }
if c.NoCheckSum {
c.CheckSum = None
}
} }
// Verify checks the configuration for errors. Zero values will be // Verify checks the configuration for errors. Zero values will be
@ -284,7 +289,11 @@ func (c *WriterConfig) newBlockWriter(xz io.Writer, hash hash.Hash) (bw *blockWr
if err != nil { if err != nil {
return nil, err return nil, err
} }
bw.mw = io.MultiWriter(bw.w, bw.hash) if bw.hash.Size() != 0 {
bw.mw = io.MultiWriter(bw.w, bw.hash)
} else {
bw.mw = bw.w
}
return bw, nil return bw, nil
} }

View File

@ -29,11 +29,11 @@ func (f FillerFunc) Fill(w io.Writer, width int, stat *decor.Statistics) {
f(w, width, stat) f(w, width, stat)
} }
// Wrapper interface. // WrapFiller interface.
// If you're implementing custom Filler by wrapping a built-in one, // If you're implementing custom Filler by wrapping a built-in one,
// it is necessary to implement this interface to retain functionality // it is necessary to implement this interface to retain functionality
// of built-in Filler. // of built-in Filler.
type Wrapper interface { type WrapFiller interface {
Base() Filler Base() Filler
} }

View File

@ -18,13 +18,14 @@ const (
rRefill rRefill
) )
// DefaultBarStyle is applied when bar constructed with *Progress.AddBar method. // DefaultBarStyle is a string containing 7 runes.
// Each rune is a building block of a progress bar.
// //
// '1th rune' stands for left boundary rune // '1st rune' stands for left boundary rune
// //
// '2th rune' stands for fill rune // '2nd rune' stands for fill rune
// //
// '3th rune' stands for tip rune // '3rd rune' stands for tip rune
// //
// '4th rune' stands for empty rune // '4th rune' stands for empty rune
// //
@ -44,16 +45,16 @@ type barFiller struct {
flush func(w io.Writer, bb [][]byte) flush func(w io.Writer, bb [][]byte)
} }
// NewBarFiller constucts mpb.Filler, to be used with *Progress.Add method. // NewBarFiller constucts mpb.Filler, to be used with *Progress.Add(...) *Bar method.
func NewBarFiller(style string, reverse bool) Filler { func NewBarFiller(style string, reverse bool) Filler {
if style == "" { if style == "" {
style = DefaultBarStyle style = DefaultBarStyle
} }
bf := &barFiller{ bf := &barFiller{
format: make([][]byte, utf8.RuneCountInString(style)), format: make([][]byte, utf8.RuneCountInString(style)),
reverse: reverse,
} }
bf.SetStyle(style) bf.SetStyle(style)
bf.SetReverse(reverse)
return bf return bf
} }
@ -66,28 +67,16 @@ func (s *barFiller) SetStyle(style string) {
src = append(src, []byte(string(r))) src = append(src, []byte(string(r)))
} }
copy(s.format, src) copy(s.format, src)
if s.reverse { s.SetReverse(s.reverse)
s.tip = s.format[rRevTip]
} else {
s.tip = s.format[rTip]
}
} }
func (s *barFiller) SetReverse(reverse bool) { func (s *barFiller) SetReverse(reverse bool) {
if reverse { if reverse {
s.tip = s.format[rRevTip] s.tip = s.format[rRevTip]
s.flush = func(w io.Writer, bb [][]byte) { s.flush = reverseFlush
for i := len(bb) - 1; i >= 0; i-- {
w.Write(bb[i])
}
}
} else { } else {
s.tip = s.format[rTip] s.tip = s.format[rTip]
s.flush = func(w io.Writer, bb [][]byte) { s.flush = normalFlush
for i := 0; i < len(bb); i++ {
w.Write(bb[i])
}
}
} }
s.reverse = reverse s.reverse = reverse
} }
@ -135,3 +124,15 @@ func (s *barFiller) Fill(w io.Writer, width int, stat *decor.Statistics) {
s.flush(w, bb) s.flush(w, bb)
} }
func normalFlush(w io.Writer, bb [][]byte) {
for i := 0; i < len(bb); i++ {
w.Write(bb[i])
}
}
func reverseFlush(w io.Writer, bb [][]byte) {
for i := len(bb) - 1; i >= 0; i-- {
w.Write(bb[i])
}
}

View File

@ -199,8 +199,8 @@ func MakeFillerTypeSpecificBarOption(
} }
} }
// BarOptOnCond returns option when condition evaluates to true. // BarOptOn returns option when condition evaluates to true.
func BarOptOnCond(option BarOption, condition func() bool) BarOption { func BarOptOn(option BarOption, condition func() bool) BarOption {
if condition() { if condition() {
return option return option
} }

21
vendor/github.com/vbauerster/mpb/v4/decor/any.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package decor
// Any decorator displays text, that can be changed during decorator's
// lifetime via provided func call back.
//
// `f` call back which provides string to display
//
// `wcc` optional WC config
//
func Any(f func(*Statistics) string, wcc ...WC) Decorator {
return &any{initWC(wcc...), f}
}
type any struct {
WC
f func(*Statistics) string
}
func (d *any) Decor(s *Statistics) string {
return d.FormatMsg(d.f(s))
}

View File

@ -43,24 +43,7 @@ func CountersKiloByte(pairFmt string, wcc ...WC) Decorator {
// pairFmt="% d / % d" output: "1 MB / 12 MB" // pairFmt="% d / % d" output: "1 MB / 12 MB"
// //
func Counters(unit int, pairFmt string, wcc ...WC) Decorator { func Counters(unit int, pairFmt string, wcc ...WC) Decorator {
var wc WC return Any(chooseSizeProducer(unit, pairFmt), wcc...)
for _, widthConf := range wcc {
wc = widthConf
}
d := &countersDecorator{
WC: wc.Init(),
producer: chooseSizeProducer(unit, pairFmt),
}
return d
}
type countersDecorator struct {
WC
producer func(*Statistics) string
}
func (d *countersDecorator) Decor(st *Statistics) string {
return d.FormatMsg(d.producer(st))
} }
func chooseSizeProducer(unit int, format string) func(*Statistics) string { func chooseSizeProducer(unit int, format string) func(*Statistics) string {
@ -69,16 +52,16 @@ func chooseSizeProducer(unit int, format string) func(*Statistics) string {
} }
switch unit { switch unit {
case UnitKiB: case UnitKiB:
return func(st *Statistics) string { return func(s *Statistics) string {
return fmt.Sprintf(format, SizeB1024(st.Current), SizeB1024(st.Total)) return fmt.Sprintf(format, SizeB1024(s.Current), SizeB1024(s.Total))
} }
case UnitKB: case UnitKB:
return func(st *Statistics) string { return func(s *Statistics) string {
return fmt.Sprintf(format, SizeB1000(st.Current), SizeB1000(st.Total)) return fmt.Sprintf(format, SizeB1000(s.Current), SizeB1000(s.Total))
} }
default: default:
return func(st *Statistics) string { return func(s *Statistics) string {
return fmt.Sprintf(format, st.Current, st.Total) return fmt.Sprintf(format, s.Current, s.Total)
} }
} }
} }

View File

@ -176,3 +176,11 @@ func (wc *WC) GetConf() WC {
func (wc *WC) SetConf(conf WC) { func (wc *WC) SetConf(conf WC) {
*wc = conf.Init() *wc = conf.Init()
} }
func initWC(wcc ...WC) WC {
var wc WC
for _, nwc := range wcc {
wc = nwc
}
return wc.Init()
}

View File

@ -9,6 +9,7 @@ import (
// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
// //
// `wcc` optional WC config // `wcc` optional WC config
//
func Elapsed(style TimeStyle, wcc ...WC) Decorator { func Elapsed(style TimeStyle, wcc ...WC) Decorator {
return NewElapsed(style, time.Now(), wcc...) return NewElapsed(style, time.Now(), wcc...)
} }
@ -20,29 +21,15 @@ func Elapsed(style TimeStyle, wcc ...WC) Decorator {
// `startTime` start time // `startTime` start time
// //
// `wcc` optional WC config // `wcc` optional WC config
//
func NewElapsed(style TimeStyle, startTime time.Time, wcc ...WC) Decorator { func NewElapsed(style TimeStyle, startTime time.Time, wcc ...WC) Decorator {
var wc WC var msg string
for _, widthConf := range wcc { producer := chooseTimeProducer(style)
wc = widthConf f := func(s *Statistics) string {
if !s.Completed {
msg = producer(time.Since(startTime))
}
return msg
} }
d := &elapsedDecorator{ return Any(f, wcc...)
WC: wc.Init(),
startTime: startTime,
producer: chooseTimeProducer(style),
}
return d
}
type elapsedDecorator struct {
WC
startTime time.Time
producer func(time.Duration) string
msg string
}
func (d *elapsedDecorator) Decor(st *Statistics) string {
if !st.Completed {
d.msg = d.producer(time.Since(d.startTime))
}
return d.FormatMsg(d.msg)
} }

View File

@ -33,7 +33,7 @@ func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator {
} else { } else {
average = ewma.NewMovingAverage(age) average = ewma.NewMovingAverage(age)
} }
return MovingAverageETA(style, average, nil, wcc...) return MovingAverageETA(style, NewThreadSafeMovingAverage(average), nil, wcc...)
} }
// MovingAverageETA decorator relies on MovingAverage implementation to calculate its average. // MovingAverageETA decorator relies on MovingAverage implementation to calculate its average.
@ -45,13 +45,10 @@ func EwmaETA(style TimeStyle, age float64, wcc ...WC) Decorator {
// `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] // `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer]
// //
// `wcc` optional WC config // `wcc` optional WC config
//
func MovingAverageETA(style TimeStyle, average MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator { func MovingAverageETA(style TimeStyle, average MovingAverage, normalizer TimeNormalizer, wcc ...WC) Decorator {
var wc WC
for _, widthConf := range wcc {
wc = widthConf
}
d := &movingAverageETA{ d := &movingAverageETA{
WC: wc.Init(), WC: initWC(wcc...),
average: average, average: average,
normalizer: normalizer, normalizer: normalizer,
producer: chooseTimeProducer(style), producer: chooseTimeProducer(style),
@ -66,9 +63,9 @@ type movingAverageETA struct {
producer func(time.Duration) string producer func(time.Duration) string
} }
func (d *movingAverageETA) Decor(st *Statistics) string { func (d *movingAverageETA) Decor(s *Statistics) string {
v := math.Round(d.average.Value()) v := math.Round(d.average.Value())
remaining := time.Duration((st.Total - st.Current) * int64(v)) remaining := time.Duration((s.Total - s.Current) * int64(v))
if d.normalizer != nil { if d.normalizer != nil {
remaining = d.normalizer.Normalize(remaining) remaining = d.normalizer.Normalize(remaining)
} }
@ -92,6 +89,7 @@ func (d *movingAverageETA) NextAmount(n int64, wdd ...time.Duration) {
// `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS] // `style` one of [ET_STYLE_GO|ET_STYLE_HHMMSS|ET_STYLE_HHMM|ET_STYLE_MMSS]
// //
// `wcc` optional WC config // `wcc` optional WC config
//
func AverageETA(style TimeStyle, wcc ...WC) Decorator { func AverageETA(style TimeStyle, wcc ...WC) Decorator {
return NewAverageETA(style, time.Now(), nil, wcc...) return NewAverageETA(style, time.Now(), nil, wcc...)
} }
@ -105,13 +103,10 @@ func AverageETA(style TimeStyle, wcc ...WC) Decorator {
// `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer] // `normalizer` available implementations are [FixedIntervalTimeNormalizer|MaxTolerateTimeNormalizer]
// //
// `wcc` optional WC config // `wcc` optional WC config
//
func NewAverageETA(style TimeStyle, startTime time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator { func NewAverageETA(style TimeStyle, startTime time.Time, normalizer TimeNormalizer, wcc ...WC) Decorator {
var wc WC
for _, widthConf := range wcc {
wc = widthConf
}
d := &averageETA{ d := &averageETA{
WC: wc.Init(), WC: initWC(wcc...),
startTime: startTime, startTime: startTime,
normalizer: normalizer, normalizer: normalizer,
producer: chooseTimeProducer(style), producer: chooseTimeProducer(style),
@ -126,12 +121,12 @@ type averageETA struct {
producer func(time.Duration) string producer func(time.Duration) string
} }
func (d *averageETA) Decor(st *Statistics) string { func (d *averageETA) Decor(s *Statistics) string {
var remaining time.Duration var remaining time.Duration
if st.Current != 0 { if s.Current != 0 {
durPerItem := float64(time.Since(d.startTime)) / float64(st.Current) durPerItem := float64(time.Since(d.startTime)) / float64(s.Current)
durPerItem = math.Round(durPerItem) durPerItem = math.Round(durPerItem)
remaining = time.Duration((st.Total - st.Current) * int64(durPerItem)) remaining = time.Duration((s.Total - s.Current) * int64(durPerItem))
if d.normalizer != nil { if d.normalizer != nil {
remaining = d.normalizer.Normalize(remaining) remaining = d.normalizer.Normalize(remaining)
} }

View File

@ -64,8 +64,8 @@ func (d *mergeDecorator) Base() Decorator {
return d.Decorator return d.Decorator
} }
func (d *mergeDecorator) Decor(st *Statistics) string { func (d *mergeDecorator) Decor(s *Statistics) string {
msg := d.Decorator.Decor(st) msg := d.Decorator.Decor(s)
msgLen := utf8.RuneCountInString(msg) msgLen := utf8.RuneCountInString(msg)
if (d.wc.C & DextraSpace) != 0 { if (d.wc.C & DextraSpace) != 0 {
msgLen++ msgLen++
@ -101,6 +101,6 @@ type placeHolderDecorator struct {
WC WC
} }
func (d *placeHolderDecorator) Decor(_ *Statistics) string { func (d *placeHolderDecorator) Decor(*Statistics) string {
return "" return ""
} }

View File

@ -2,6 +2,7 @@ package decor
import ( import (
"sort" "sort"
"sync"
"github.com/VividCortex/ewma" "github.com/VividCortex/ewma"
) )
@ -11,6 +12,38 @@ import (
// or exponentially decaying. // or exponentially decaying.
type MovingAverage = ewma.MovingAverage type MovingAverage = ewma.MovingAverage
type threadSafeMovingAverage struct {
ewma.MovingAverage
mu sync.Mutex
}
func (s *threadSafeMovingAverage) Add(value float64) {
s.mu.Lock()
s.MovingAverage.Add(value)
s.mu.Unlock()
}
func (s *threadSafeMovingAverage) Value() float64 {
s.mu.Lock()
defer s.mu.Unlock()
return s.MovingAverage.Value()
}
func (s *threadSafeMovingAverage) Set(value float64) {
s.mu.Lock()
s.MovingAverage.Set(value)
s.mu.Unlock()
}
// NewThreadSafeMovingAverage converts provided ewma.MovingAverage
// into thread safe ewma.MovingAverage.
func NewThreadSafeMovingAverage(average ewma.MovingAverage) ewma.MovingAverage {
if tsma, ok := average.(*threadSafeMovingAverage); ok {
return tsma
}
return &threadSafeMovingAverage{MovingAverage: average}
}
type medianWindow [3]float64 type medianWindow [3]float64
func (s *medianWindow) Len() int { return len(s) } func (s *medianWindow) Len() int { return len(s) }
@ -36,5 +69,5 @@ func (s *medianWindow) Set(value float64) {
// NewMedian is fixed last 3 samples median MovingAverage. // NewMedian is fixed last 3 samples median MovingAverage.
func NewMedian() MovingAverage { func NewMedian() MovingAverage {
return new(medianWindow) return NewThreadSafeMovingAverage(new(medianWindow))
} }

View File

@ -1,27 +1,12 @@
package decor package decor
// Name returns name decorator. // Name decorator displays text that is set once and can't be changed
// during decorator's lifetime.
// //
// `name` string to display // `str` string to display
// //
// `wcc` optional WC config // `wcc` optional WC config
func Name(name string, wcc ...WC) Decorator { //
var wc WC func Name(str string, wcc ...WC) Decorator {
for _, widthConf := range wcc { return Any(func(*Statistics) string { return str }, wcc...)
wc = widthConf
}
d := &nameDecorator{
WC: wc.Init(),
msg: name,
}
return d
}
type nameDecorator struct {
WC
msg string
}
func (d *nameDecorator) Decor(st *Statistics) string {
return d.FormatMsg(d.msg)
} }

View File

@ -6,6 +6,7 @@ package decor
// `decorator` Decorator to wrap // `decorator` Decorator to wrap
// //
// `message` message to display on complete event // `message` message to display on complete event
//
func OnComplete(decorator Decorator, message string) Decorator { func OnComplete(decorator Decorator, message string) Decorator {
d := &onCompleteWrapper{ d := &onCompleteWrapper{
Decorator: decorator, Decorator: decorator,
@ -23,12 +24,12 @@ type onCompleteWrapper struct {
msg string msg string
} }
func (d *onCompleteWrapper) Decor(st *Statistics) string { func (d *onCompleteWrapper) Decor(s *Statistics) string {
if st.Completed { if s.Completed {
wc := d.GetConf() wc := d.GetConf()
return wc.FormatMsg(d.msg) return wc.FormatMsg(d.msg)
} }
return d.Decorator.Decor(st) return d.Decorator.Decor(s)
} }
func (d *onCompleteWrapper) Base() Decorator { func (d *onCompleteWrapper) Base() Decorator {

View File

@ -37,36 +37,22 @@ func Percentage(wcc ...WC) Decorator {
return NewPercentage("% d", wcc...) return NewPercentage("% d", wcc...)
} }
// NewPercentage percentage decorator with custom fmt string. // NewPercentage percentage decorator with custom format string.
// //
// fmt examples: // format examples:
// //
// fmt="%.1f" output: "1.0%" // format="%.1f" output: "1.0%"
// fmt="% .1f" output: "1.0 %" // format="% .1f" output: "1.0 %"
// fmt="%d" output: "1%" // format="%d" output: "1%"
// fmt="% d" output: "1 %" // format="% d" output: "1 %"
// //
func NewPercentage(fmt string, wcc ...WC) Decorator { func NewPercentage(format string, wcc ...WC) Decorator {
var wc WC if format == "" {
for _, widthConf := range wcc { format = "% d"
wc = widthConf
} }
if fmt == "" { f := func(s *Statistics) string {
fmt = "% d" p := internal.Percentage(s.Total, s.Current, 100)
return fmt.Sprintf(format, percentageType(p))
} }
d := &percentageDecorator{ return Any(f, wcc...)
WC: wc.Init(),
fmt: fmt,
}
return d
}
type percentageDecorator struct {
WC
fmt string
}
func (d *percentageDecorator) Decor(st *Statistics) string {
p := internal.Percentage(st.Total, st.Current, 100)
return d.FormatMsg(fmt.Sprintf(d.fmt, percentageType(p)))
} }

View File

@ -9,12 +9,20 @@ import (
"github.com/VividCortex/ewma" "github.com/VividCortex/ewma"
) )
// SpeedFormatter is wrapper for SizeB1024 and SizeB1000 to format value as speed/s. // FmtAsSpeed adds "/s" to the end of the input formatter. To be
type SpeedFormatter struct { // used with SizeB1000 or SizeB1024 types, for example:
//
// fmt.Printf("%.1f", FmtAsSpeed(SizeB1024(2048)))
//
func FmtAsSpeed(input fmt.Formatter) fmt.Formatter {
return &speedFormatter{input}
}
type speedFormatter struct {
fmt.Formatter fmt.Formatter
} }
func (self *SpeedFormatter) Format(st fmt.State, verb rune) { func (self *speedFormatter) Format(st fmt.State, verb rune) {
self.Formatter.Format(st, verb) self.Formatter.Format(st, verb)
io.WriteString(st, "/s") io.WriteString(st, "/s")
} }
@ -30,7 +38,7 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator {
} else { } else {
average = ewma.NewMovingAverage(age) average = ewma.NewMovingAverage(age)
} }
return MovingAverageSpeed(unit, format, average, wcc...) return MovingAverageSpeed(unit, format, NewThreadSafeMovingAverage(average), wcc...)
} }
// MovingAverageSpeed decorator relies on MovingAverage implementation // MovingAverageSpeed decorator relies on MovingAverage implementation
@ -52,15 +60,11 @@ func EwmaSpeed(unit int, format string, age float64, wcc ...WC) Decorator {
// unit=UnitKB, format="% .1f" output: "1.0 MB/s" // unit=UnitKB, format="% .1f" output: "1.0 MB/s"
// //
func MovingAverageSpeed(unit int, format string, average MovingAverage, wcc ...WC) Decorator { func MovingAverageSpeed(unit int, format string, average MovingAverage, wcc ...WC) Decorator {
var wc WC
for _, widthConf := range wcc {
wc = widthConf
}
if format == "" { if format == "" {
format = "%.0f" format = "%.0f"
} }
d := &movingAverageSpeed{ d := &movingAverageSpeed{
WC: wc.Init(), WC: initWC(wcc...),
average: average, average: average,
producer: chooseSpeedProducer(unit, format), producer: chooseSpeedProducer(unit, format),
} }
@ -74,8 +78,8 @@ type movingAverageSpeed struct {
msg string msg string
} }
func (d *movingAverageSpeed) Decor(st *Statistics) string { func (d *movingAverageSpeed) Decor(s *Statistics) string {
if !st.Completed { if !s.Completed {
var speed float64 var speed float64
if v := d.average.Value(); v > 0 { if v := d.average.Value(); v > 0 {
speed = 1 / v speed = 1 / v
@ -122,15 +126,11 @@ func AverageSpeed(unit int, format string, wcc ...WC) Decorator {
// unit=UnitKB, format="% .1f" output: "1.0 MB/s" // unit=UnitKB, format="% .1f" output: "1.0 MB/s"
// //
func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator { func NewAverageSpeed(unit int, format string, startTime time.Time, wcc ...WC) Decorator {
var wc WC
for _, widthConf := range wcc {
wc = widthConf
}
if format == "" { if format == "" {
format = "%.0f" format = "%.0f"
} }
d := &averageSpeed{ d := &averageSpeed{
WC: wc.Init(), WC: initWC(wcc...),
startTime: startTime, startTime: startTime,
producer: chooseSpeedProducer(unit, format), producer: chooseSpeedProducer(unit, format),
} }
@ -144,9 +144,9 @@ type averageSpeed struct {
msg string msg string
} }
func (d *averageSpeed) Decor(st *Statistics) string { func (d *averageSpeed) Decor(s *Statistics) string {
if !st.Completed { if !s.Completed {
speed := float64(st.Current) / float64(time.Since(d.startTime)) speed := float64(s.Current) / float64(time.Since(d.startTime))
d.msg = d.producer(speed * 1e9) d.msg = d.producer(speed * 1e9)
} }
@ -161,11 +161,11 @@ func chooseSpeedProducer(unit int, format string) func(float64) string {
switch unit { switch unit {
case UnitKiB: case UnitKiB:
return func(speed float64) string { return func(speed float64) string {
return fmt.Sprintf(format, &SpeedFormatter{SizeB1024(math.Round(speed))}) return fmt.Sprintf(format, FmtAsSpeed(SizeB1024(math.Round(speed))))
} }
case UnitKB: case UnitKB:
return func(speed float64) string { return func(speed float64) string {
return fmt.Sprintf(format, &SpeedFormatter{SizeB1000(math.Round(speed))}) return fmt.Sprintf(format, FmtAsSpeed(SizeB1000(math.Round(speed))))
} }
default: default:
return func(speed float64) string { return func(speed float64) string {

View File

@ -8,28 +8,14 @@ var defaultSpinnerStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "
// //
// `wcc` optional WC config // `wcc` optional WC config
func Spinner(frames []string, wcc ...WC) Decorator { func Spinner(frames []string, wcc ...WC) Decorator {
var wc WC
for _, widthConf := range wcc {
wc = widthConf
}
if len(frames) == 0 { if len(frames) == 0 {
frames = defaultSpinnerStyle frames = defaultSpinnerStyle
} }
d := &spinnerDecorator{ var count uint
WC: wc.Init(), f := func(s *Statistics) string {
frames: frames, frame := frames[count%uint(len(frames))]
count++
return frame
} }
return d return Any(f, wcc...)
}
type spinnerDecorator struct {
WC
frames []string
count uint
}
func (d *spinnerDecorator) Decor(st *Statistics) string {
frame := d.frames[d.count%uint(len(d.frames))]
d.count++
return d.FormatMsg(frame)
} }

View File

@ -3,8 +3,8 @@ module github.com/vbauerster/mpb/v4
require ( require (
github.com/VividCortex/ewma v1.1.1 github.com/VividCortex/ewma v1.1.1
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 // indirect golang.org/x/sys v0.0.0-20200217220822-9197077df867 // indirect
) )
go 1.13 go 1.13

View File

@ -3,11 +3,11 @@ github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmx
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4= golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6 h1:Sy5bstxEqwwbYs6n0/pBuxKENqOeZUgD45Gp3Q3pqLg=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200214034016-1d94cc7ab1c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 h1:dHtDnRWQtSx0Hjq9kvKFpBh9uPPKfQN70NZZmvssGwk= golang.org/x/sys v0.0.0-20200217220822-9197077df867 h1:JoRuNIf+rpHl+VhScRQQvzbHed86tKkqwPMV34T8myw=
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -96,8 +96,8 @@ func PopCompletedMode() ContainerOption {
} }
} }
// ContainerOptOnCond returns option when condition evaluates to true. // ContainerOptOn returns option when condition evaluates to true.
func ContainerOptOnCond(option ContainerOption, condition func() bool) ContainerOption { func ContainerOptOn(option ContainerOption, condition func() bool) ContainerOption {
if condition() { if condition() {
return option return option
} }

View File

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"container/heap" "container/heap"
"context" "context"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
@ -97,18 +98,19 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
return p return p
} }
// AddBar creates a new progress bar and adds to the container. // AddBar creates a new progress bar and adds it to the rendering queue.
func (p *Progress) AddBar(total int64, options ...BarOption) *Bar { func (p *Progress) AddBar(total int64, options ...BarOption) *Bar {
return p.Add(total, NewBarFiller(DefaultBarStyle, false), options...) return p.Add(total, NewBarFiller(DefaultBarStyle, false), options...)
} }
// AddSpinner creates a new spinner bar and adds to the container. // AddSpinner creates a new spinner bar and adds it to the rendering queue.
func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar { func (p *Progress) AddSpinner(total int64, alignment SpinnerAlignment, options ...BarOption) *Bar {
return p.Add(total, NewSpinnerFiller(DefaultSpinnerStyle, alignment), options...) return p.Add(total, NewSpinnerFiller(DefaultSpinnerStyle, alignment), options...)
} }
// Add creates a bar which renders itself by provided filler. // Add creates a bar which renders itself by provided filler.
// Set total to 0, if you plan to update it later. // Set total to 0, if you plan to update it later.
// Panics if *Progress instance is done, i.e. called after *Progress.Wait().
func (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar { func (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar {
if filler == nil { if filler == nil {
filler = NewBarFiller(DefaultBarStyle, false) filler = NewBarFiller(DefaultBarStyle, false)
@ -134,7 +136,7 @@ func (p *Progress) Add(total int64, filler Filler, options ...BarOption) *Bar {
return bar return bar
case <-p.done: case <-p.done:
p.bwg.Done() p.bwg.Done()
return nil panic(fmt.Sprintf("%T instance can't be reused after it's done!", p))
} }
} }
@ -387,7 +389,7 @@ func syncWidth(matrix map[int][]chan int) {
} }
func extractBaseFiller(f Filler) Filler { func extractBaseFiller(f Filler) Filler {
if f, ok := f.(Wrapper); ok { if f, ok := f.(WrapFiller); ok {
return extractBaseFiller(f.Base()) return extractBaseFiller(f.Base())
} }
return f return f

View File

@ -18,7 +18,7 @@ const (
SpinnerOnRight SpinnerOnRight
) )
// DefaultSpinnerStyle is applied when bar constructed with *Progress.AddSpinner method. // DefaultSpinnerStyle is a slice of strings, which makes a spinner.
var DefaultSpinnerStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"} var DefaultSpinnerStyle = []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
type spinnerFiller struct { type spinnerFiller struct {
@ -27,7 +27,7 @@ type spinnerFiller struct {
alignment SpinnerAlignment alignment SpinnerAlignment
} }
// NewSpinnerFiller constucts mpb.Filler, to be used with *Progress.Add method. // NewSpinnerFiller constucts mpb.Filler, to be used with *Progress.Add(...) *Bar method.
func NewSpinnerFiller(style []string, alignment SpinnerAlignment) Filler { func NewSpinnerFiller(style []string, alignment SpinnerAlignment) Filler {
if len(style) == 0 { if len(style) == 0 {
style = DefaultSpinnerStyle style = DefaultSpinnerStyle

View File

@ -62,10 +62,11 @@ var armorEndOfLine = []byte("-----")
// lineReader wraps a line based reader. It watches for the end of an armor // lineReader wraps a line based reader. It watches for the end of an armor
// block and records the expected CRC value. // block and records the expected CRC value.
type lineReader struct { type lineReader struct {
in *bufio.Reader in *bufio.Reader
buf []byte buf []byte
eof bool eof bool
crc uint32 crc uint32
crcSet bool
} }
func (l *lineReader) Read(p []byte) (n int, err error) { func (l *lineReader) Read(p []byte) (n int, err error) {
@ -87,6 +88,11 @@ func (l *lineReader) Read(p []byte) (n int, err error) {
return 0, ArmorCorrupt return 0, ArmorCorrupt
} }
if bytes.HasPrefix(line, armorEnd) {
l.eof = true
return 0, io.EOF
}
if len(line) == 5 && line[0] == '=' { if len(line) == 5 && line[0] == '=' {
// This is the checksum line // This is the checksum line
var expectedBytes [3]byte var expectedBytes [3]byte
@ -108,6 +114,7 @@ func (l *lineReader) Read(p []byte) (n int, err error) {
} }
l.eof = true l.eof = true
l.crcSet = true
return 0, io.EOF return 0, io.EOF
} }
@ -141,10 +148,8 @@ func (r *openpgpReader) Read(p []byte) (n int, err error) {
n, err = r.b64Reader.Read(p) n, err = r.b64Reader.Read(p)
r.currentCRC = crc24(r.currentCRC, p[:n]) r.currentCRC = crc24(r.currentCRC, p[:n])
if err == io.EOF { if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) {
if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { return 0, ArmorCorrupt
return 0, ArmorCorrupt
}
} }
return return

View File

@ -7,6 +7,7 @@ package terminal
import ( import (
"bytes" "bytes"
"io" "io"
"runtime"
"strconv" "strconv"
"sync" "sync"
"unicode/utf8" "unicode/utf8"
@ -939,6 +940,8 @@ func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
// readPasswordLine reads from reader until it finds \n or io.EOF. // readPasswordLine reads from reader until it finds \n or io.EOF.
// The slice returned does not include the \n. // The slice returned does not include the \n.
// readPasswordLine also ignores any \r it finds. // readPasswordLine also ignores any \r it finds.
// Windows uses \r as end of line. So, on Windows, readPasswordLine
// reads until it finds \r and ignores any \n it finds during processing.
func readPasswordLine(reader io.Reader) ([]byte, error) { func readPasswordLine(reader io.Reader) ([]byte, error) {
var buf [1]byte var buf [1]byte
var ret []byte var ret []byte
@ -947,10 +950,20 @@ func readPasswordLine(reader io.Reader) ([]byte, error) {
n, err := reader.Read(buf[:]) n, err := reader.Read(buf[:])
if n > 0 { if n > 0 {
switch buf[0] { switch buf[0] {
case '\b':
if len(ret) > 0 {
ret = ret[:len(ret)-1]
}
case '\n': case '\n':
return ret, nil if runtime.GOOS != "windows" {
return ret, nil
}
// otherwise ignore \n
case '\r': case '\r':
// remove \r from passwords on Windows if runtime.GOOS == "windows" {
return ret, nil
}
// otherwise ignore \r
default: default:
ret = append(ret, buf[0]) ret = append(ret, buf[0])
} }

View File

@ -85,8 +85,8 @@ func ReadPassword(fd int) ([]byte, error) {
} }
old := st old := st
st &^= (windows.ENABLE_ECHO_INPUT) st &^= (windows.ENABLE_ECHO_INPUT | windows.ENABLE_LINE_INPUT)
st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) st |= (windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_PROCESSED_INPUT)
if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil { if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil {
return nil, err return nil, err
} }

View File

@ -23,10 +23,6 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-48
MOV a1+8(FP), A0 MOV a1+8(FP), A0
MOV a2+16(FP), A1 MOV a2+16(FP), A1
MOV a3+24(FP), A2 MOV a3+24(FP), A2
MOV $0, A3
MOV $0, A4
MOV $0, A5
MOV $0, A6
MOV trap+0(FP), A7 // syscall entry MOV trap+0(FP), A7 // syscall entry
ECALL ECALL
MOV A0, r1+32(FP) // r1 MOV A0, r1+32(FP) // r1
@ -44,9 +40,6 @@ TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-48
MOV a1+8(FP), A0 MOV a1+8(FP), A0
MOV a2+16(FP), A1 MOV a2+16(FP), A1
MOV a3+24(FP), A2 MOV a3+24(FP), A2
MOV ZERO, A3
MOV ZERO, A4
MOV ZERO, A5
MOV trap+0(FP), A7 // syscall entry MOV trap+0(FP), A7 // syscall entry
ECALL ECALL
MOV A0, r1+32(FP) MOV A0, r1+32(FP)

View File

@ -9,12 +9,11 @@ package unix
import "unsafe" import "unsafe"
// fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux // fcntl64Syscall is usually SYS_FCNTL, but is overridden on 32-bit Linux
// systems by flock_linux_32bit.go to be SYS_FCNTL64. // systems by fcntl_linux_32bit.go to be SYS_FCNTL64.
var fcntl64Syscall uintptr = SYS_FCNTL var fcntl64Syscall uintptr = SYS_FCNTL
// FcntlInt performs a fcntl syscall on fd with the provided command and argument. func fcntl(fd int, cmd, arg int) (int, error) {
func FcntlInt(fd uintptr, cmd, arg int) (int, error) { valptr, _, errno := Syscall(fcntl64Syscall, uintptr(fd), uintptr(cmd), uintptr(arg))
valptr, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg))
var err error var err error
if errno != 0 { if errno != 0 {
err = errno err = errno
@ -22,6 +21,11 @@ func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return int(valptr), err return int(valptr), err
} }
// FcntlInt performs a fcntl syscall on fd with the provided command and argument.
func FcntlInt(fd uintptr, cmd, arg int) (int, error) {
return fcntl(int(fd), cmd, arg)
}
// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command.
func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error {
_, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk))) _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk)))

View File

@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system # Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run) # Files generated through docker (use $cmd so you can Ctl-C the build or run)
$cmd docker build --tag generate:$GOOS $GOOS $cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(dirname "$(readlink -f "$0")"):/build generate:$GOOS $cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS
exit exit
fi fi

View File

@ -186,6 +186,7 @@ struct ltchars {
#include <sys/select.h> #include <sys/select.h>
#include <sys/signalfd.h> #include <sys/signalfd.h>
#include <sys/socket.h> #include <sys/socket.h>
#include <sys/uio.h>
#include <sys/xattr.h> #include <sys/xattr.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/can.h> #include <linux/can.h>
@ -485,7 +486,7 @@ ccflags="$@"
$2 ~ /^TCSET/ || $2 ~ /^TCSET/ ||
$2 ~ /^TC(FLSH|SBRKP?|XONC)$/ || $2 ~ /^TC(FLSH|SBRKP?|XONC)$/ ||
$2 !~ "RTF_BITS" && $2 !~ "RTF_BITS" &&
$2 ~ /^(IFF|IFT|NET_RT|RTM|RTF|RTV|RTA|RTAX)_/ || $2 ~ /^(IFF|IFT|NET_RT|RTM(GRP)?|RTF|RTV|RTA|RTAX)_/ ||
$2 ~ /^BIOC/ || $2 ~ /^BIOC/ ||
$2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ || $2 ~ /^RUSAGE_(SELF|CHILDREN|THREAD)/ ||
$2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ ||
@ -526,6 +527,7 @@ ccflags="$@"
$2 ~ /^WDIOC_/ || $2 ~ /^WDIOC_/ ||
$2 ~ /^NFN/ || $2 ~ /^NFN/ ||
$2 ~ /^XDP_/ || $2 ~ /^XDP_/ ||
$2 ~ /^RWF_/ ||
$2 ~ /^(HDIO|WIN|SMART)_/ || $2 ~ /^(HDIO|WIN|SMART)_/ ||
$2 ~ /^CRYPTO_/ || $2 ~ /^CRYPTO_/ ||
$2 ~ /^TIPC_/ || $2 ~ /^TIPC_/ ||

View File

@ -510,6 +510,23 @@ func SysctlRaw(name string, args ...int) ([]byte, error) {
return buf[:n], nil return buf[:n], nil
} }
func SysctlClockinfo(name string) (*Clockinfo, error) {
mib, err := sysctlmib(name)
if err != nil {
return nil, err
}
n := uintptr(SizeofClockinfo)
var ci Clockinfo
if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil {
return nil, err
}
if n != SizeofClockinfo {
return nil, EIO
}
return &ci, nil
}
//sys utimes(path string, timeval *[2]Timeval) (err error) //sys utimes(path string, timeval *[2]Timeval) (err error)
func Utimes(path string, tv []Timeval) error { func Utimes(path string, tv []Timeval) error {
@ -577,8 +594,6 @@ func Futimes(fd int, tv []Timeval) error {
return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0]))) return futimes(fd, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
} }
//sys fcntl(fd int, cmd int, arg int) (val int, err error)
//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error) //sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
func Poll(fds []PollFd, timeout int) (n int, err error) { func Poll(fds []PollFd, timeout int) (n int, err error) {

View File

@ -155,23 +155,6 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (
//sys getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) //sys getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error)
func SysctlClockinfo(name string) (*Clockinfo, error) {
mib, err := sysctlmib(name)
if err != nil {
return nil, err
}
n := uintptr(SizeofClockinfo)
var ci Clockinfo
if err := sysctl(mib, (*byte)(unsafe.Pointer(&ci)), &n, nil, 0); err != nil {
return nil, err
}
if n != SizeofClockinfo {
return nil, EIO
}
return &ci, nil
}
//sysnb pipe() (r int, w int, err error) //sysnb pipe() (r int, w int, err error)
func Pipe(p []int) (err error) { func Pipe(p []int) (err error) {
@ -333,6 +316,8 @@ func utimensat(dirfd int, path string, times *[2]Timespec, flags int) error {
* Wrapped * Wrapped
*/ */
//sys fcntl(fd int, cmd int, arg int) (val int, err error)
//sys kill(pid int, signum int, posix int) (err error) //sys kill(pid int, signum int, posix int) (err error)
func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) }

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// +build darwin,386,!go1.12 // +build darwin,arm,!go1.12
package unix package unix

View File

@ -529,12 +529,6 @@ func PtraceGetRegs(pid int, regsout *Reg) (err error) {
return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) return ptrace(PTRACE_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0)
} }
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint(countin)}
err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}
func PtraceLwpEvents(pid int, enable int) (err error) { func PtraceLwpEvents(pid int, enable int) (err error) {
return ptrace(PTRACE_LWPEVENTS, pid, 0, enable) return ptrace(PTRACE_LWPEVENTS, pid, 0, enable)
} }

View File

@ -54,3 +54,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
} }
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)}
err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

View File

@ -54,3 +54,9 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
} }
func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno)
func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) {
ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint64(countin)}
err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0)
return int(ioDesc.Len), err
}

Some files were not shown because too many files have changed in this diff Show More