mirror of
https://github.com/containers/skopeo.git
synced 2025-04-27 19:05:32 +00:00
vendor github.com/containers/image/v5@v5.2.0
See release notes: https://github.com/containers/image/releases/tag/v5.2.0 Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
This commit is contained in:
parent
7cbb8ad3ba
commit
a7297d4db7
6
go.mod
6
go.mod
@ -5,9 +5,9 @@ go 1.12
|
||||
require (
|
||||
github.com/containers/buildah v1.13.1 // indirect
|
||||
github.com/containers/common v0.0.7
|
||||
github.com/containers/image/v5 v5.1.0
|
||||
github.com/containers/image/v5 v5.2.0
|
||||
github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741
|
||||
github.com/containers/storage v1.15.5
|
||||
github.com/containers/storage v1.15.8
|
||||
github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23
|
||||
github.com/dsnet/compress v0.0.1 // indirect
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
@ -16,7 +16,7 @@ require (
|
||||
github.com/opencontainers/image-tools v0.0.0-20170926011501-6d941547fa1d
|
||||
github.com/opencontainers/runtime-spec v1.0.0 // indirect
|
||||
github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/testify v1.4.0
|
||||
|
19
go.sum
19
go.sum
@ -25,6 +25,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdko
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
@ -69,6 +71,8 @@ github.com/containers/image/v5 v5.0.1-0.20191126085826-502848a1358b h1:xUXa/0+KW
|
||||
github.com/containers/image/v5 v5.0.1-0.20191126085826-502848a1358b/go.mod h1:NNGElTgKPvARdKeiJIE/IF+ddvHmNwaLPBupsoZI8eI=
|
||||
github.com/containers/image/v5 v5.1.0 h1:5FjAvPJniamuNNIQHkh4PnsL+n+xzs6Aonzaz5dqTEo=
|
||||
github.com/containers/image/v5 v5.1.0/go.mod h1:BKlMD34WxRo1ruGHHEOrPQP0Qci7SWoPwU6fS7arsCU=
|
||||
github.com/containers/image/v5 v5.2.0 h1:DowY5OII5x9Pb6Pt76vnHU79BgG4/jdwhZjeAj2R+t8=
|
||||
github.com/containers/image/v5 v5.2.0/go.mod h1:IAub4gDGvXoxaIAdNy4e3FbVTDPVNMv9F0UfVVFbYCU=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741 h1:8tQkOcednLJtUcZgK7sPglscXtxvMOnFOa6wd09VWLM=
|
||||
@ -87,6 +91,8 @@ github.com/containers/storage v1.15.3 h1:+lFSQZnnKUFyUEtguIgdoQLJfWSuYz+j/wg5GxL
|
||||
github.com/containers/storage v1.15.3/go.mod h1:v0lq/3f+cXH3Y/HiDaFYRR0zilwDve7I4W7U5xQxvF8=
|
||||
github.com/containers/storage v1.15.5 h1:dBZx9yRFHod9c8FVaXlVtRqr2cmlAhpl+9rt87cE7J4=
|
||||
github.com/containers/storage v1.15.5/go.mod h1:v0lq/3f+cXH3Y/HiDaFYRR0zilwDve7I4W7U5xQxvF8=
|
||||
github.com/containers/storage v1.15.8 h1:ef7OfUMTpyq0PIVAhV7qfufEI92gAldk25nItrip+6Q=
|
||||
github.com/containers/storage v1.15.8/go.mod h1:zhvjIIl/fR6wt/lgqQAC+xanHQ+8gUQ0GBVeXYN81qI=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@ -229,6 +235,8 @@ github.com/klauspost/compress v1.9.3 h1:hkFELABwacUEgBfiguNeQydKv3M9pawBq8o24Ypw
|
||||
github.com/klauspost/compress v1.9.3/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.4 h1:xhvAeUPQ2drNUhKtrGdTGNvV9nNafHMUkRyLkzxJoB4=
|
||||
github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
|
||||
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
@ -256,6 +264,8 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI=
|
||||
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.9 h1:eaB5JspOwiKKcHdqcjbfe5lA9cNn/4NRRtddXJCimqk=
|
||||
github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
||||
@ -272,6 +282,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c h1:xa+eQWKuJ9MbB9FBL/eoNvDFvveAkz2LQoz8PzX7Q/4=
|
||||
github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c/go.mod h1:GhAqVMEWnTcW2dxoD/SO3n2enrgWl3y6Dnx4m59GvcA=
|
||||
github.com/mtrmac/gpgme v0.1.1 h1:a5ISnvahzTzBH0m/klhehN68N+9+/jLwhpPFtH3oPAQ=
|
||||
github.com/mtrmac/gpgme v0.1.1/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
@ -312,6 +324,8 @@ github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pK
|
||||
github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
|
||||
github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
|
||||
github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
|
||||
github.com/opencontainers/selinux v1.3.1 h1:dn2Rc3wTEvTB6iVqoFrKKeMb0uZ38ZheeyMu2h5C1TI=
|
||||
github.com/opencontainers/selinux v1.3.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
|
||||
github.com/openshift/api v0.0.0-20200106203948-7ab22a2c8316/go.mod h1:dv+J0b/HWai0QnMVb37/H0v36klkLBi2TNpPeWDxX10=
|
||||
github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible h1:s55wx8JIG/CKnewev892HifTBrtKzMdvgB3rm4rxC2s=
|
||||
github.com/openshift/api v3.9.1-0.20190810003144-27fb16909b15+incompatible/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY=
|
||||
@ -325,6 +339,8 @@ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.0 h1:J8lpUdobwIeCI7OiSxHqEwJUKvJwicL5+3v1oe2Yb4k=
|
||||
github.com/pkg/errors v0.9.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
@ -396,6 +412,8 @@ github.com/vbauerster/mpb v3.4.0+incompatible h1:mfiiYw87ARaeRW6x5gWwYRUawxaW1tL
|
||||
github.com/vbauerster/mpb v3.4.0+incompatible/go.mod h1:zAHG26FUhVKETRu+MWqYXcI70POlC6N8up9p1dID7SU=
|
||||
github.com/vbauerster/mpb/v4 v4.11.1 h1:ZOYQSVHgmeanXsbyC44aDg76tBGCS/54Rk8VkL8dJGA=
|
||||
github.com/vbauerster/mpb/v4 v4.11.1/go.mod h1:vMLa1J/ZKC83G2lB/52XpqT+ZZtFG4aZOdKhmpRL1uM=
|
||||
github.com/vbauerster/mpb/v4 v4.11.2 h1:ynkUoKzi65DZ1UsQPx7sgi/KN6G9f7br+Us2nKm35AM=
|
||||
github.com/vbauerster/mpb/v4 v4.11.2/go.mod h1:jIuIRCltGJUnm6DCyPVkwjlLUk4nHTH+m4eD14CdFF0=
|
||||
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@ -476,6 +494,7 @@ golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE=
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
21
vendor/github.com/acarl005/stripansi/LICENSE
generated
vendored
Normal file
21
vendor/github.com/acarl005/stripansi/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2018 Andrew Carlson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
30
vendor/github.com/acarl005/stripansi/README.md
generated
vendored
Normal file
30
vendor/github.com/acarl005/stripansi/README.md
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
Strip ANSI
|
||||
==========
|
||||
|
||||
This Go package removes ANSI escape codes from strings.
|
||||
|
||||
Ideally, we would prevent these from appearing in any text we want to process.
|
||||
However, sometimes this can't be helped, and we need to be able to deal with that noise.
|
||||
This will use a regexp to remove those unwanted escape codes.
|
||||
|
||||
|
||||
## Install
|
||||
|
||||
```sh
|
||||
$ go get -u github.com/acarl005/stripansi
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/acarl005/stripansi"
|
||||
)
|
||||
|
||||
func main() {
|
||||
msg := "\x1b[38;5;140m foo\x1b[0m bar"
|
||||
cleanMsg := stripansi.Strip(msg)
|
||||
fmt.Println(cleanMsg) // " foo bar"
|
||||
}
|
||||
```
|
13
vendor/github.com/acarl005/stripansi/stripansi.go
generated
vendored
Normal file
13
vendor/github.com/acarl005/stripansi/stripansi.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package stripansi
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
|
||||
|
||||
var re = regexp.MustCompile(ansi)
|
||||
|
||||
func Strip(str string) string {
|
||||
return re.ReplaceAllString(str, "")
|
||||
}
|
4
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@ -709,7 +709,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
wantedOS = sys.OSChoice
|
||||
}
|
||||
if wantedOS != c.OS {
|
||||
return fmt.Errorf("Image operating system mismatch: image uses %q, expecting %q", c.OS, wantedOS)
|
||||
logrus.Infof("Image operating system mismatch: image uses %q, expecting %q", c.OS, wantedOS)
|
||||
}
|
||||
|
||||
wantedArch := runtime.GOARCH
|
||||
@ -717,7 +717,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
wantedArch = sys.ArchitectureChoice
|
||||
}
|
||||
if wantedArch != c.Architecture {
|
||||
return fmt.Errorf("Image architecture mismatch: image uses %q, expecting %q", c.Architecture, wantedArch)
|
||||
logrus.Infof("Image architecture mismatch: image uses %q, expecting %q", c.Architecture, wantedArch)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
21
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
21
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@ -142,8 +143,11 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
succeeded := false
|
||||
explicitClosed := false
|
||||
defer func() {
|
||||
blobFile.Close()
|
||||
if !explicitClosed {
|
||||
blobFile.Close()
|
||||
}
|
||||
if !succeeded {
|
||||
os.Remove(blobFile.Name())
|
||||
}
|
||||
@ -164,10 +168,21 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if err := blobFile.Chmod(0644); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
|
||||
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
||||
// On Windows, the “permissions of newly created files” argument to syscall.Open is
|
||||
// ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod,
|
||||
// always fails on Windows.
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := blobFile.Chmod(0644); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
}
|
||||
|
||||
blobPath := d.ref.layerPath(computedDigest)
|
||||
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
||||
blobFile.Close()
|
||||
explicitClosed = true
|
||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
128
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
128
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@ -17,6 +16,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/pkg/docker/config"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/pkg/tlsclientconfig"
|
||||
@ -45,6 +45,10 @@ const (
|
||||
|
||||
extensionSignatureSchemaVersion = 2 // extensionSignature.Version
|
||||
extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type
|
||||
|
||||
backoffNumIterations = 5
|
||||
backoffInitialDelay = 2 * time.Second
|
||||
backoffMaxDelay = 60 * time.Second
|
||||
)
|
||||
|
||||
var systemPerHostCertDirPaths = [2]string{"/etc/containers/certs.d", "/etc/docker/certs.d"}
|
||||
@ -277,7 +281,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return httpResponseToError(resp)
|
||||
return httpResponseToError(resp, "")
|
||||
}
|
||||
|
||||
// SearchResult holds the information of each matching image
|
||||
@ -351,7 +355,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp))
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp, ""))
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
|
||||
return nil, err
|
||||
@ -368,7 +372,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp))
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, httpResponseToError(resp, ""))
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
|
||||
return nil, err
|
||||
@ -400,74 +404,64 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
|
||||
}
|
||||
|
||||
// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it,
|
||||
// silently falling back to fallbackDelay if the header is missing or invalid.
|
||||
func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration {
|
||||
after := res.Header.Get("Retry-After")
|
||||
if after == "" {
|
||||
return fallbackDelay
|
||||
}
|
||||
logrus.Debugf("Detected 'Retry-After' header %q", after)
|
||||
// First, check if we have a numerical value.
|
||||
if num, err := strconv.ParseInt(after, 10, 64); err == nil {
|
||||
return time.Duration(num) * time.Second
|
||||
}
|
||||
// Second, check if we have an HTTP date.
|
||||
// If the delta between the date and now is positive, use it.
|
||||
// Otherwise, fall back to using the default exponential back off.
|
||||
if t, err := http.ParseTime(after); err == nil {
|
||||
delta := time.Until(t)
|
||||
if delta > 0 {
|
||||
return delta
|
||||
}
|
||||
logrus.Debugf("Retry-After date in the past, ignoring it")
|
||||
return fallbackDelay
|
||||
}
|
||||
// If the header contents are bogus, fall back to using the default exponential back off.
|
||||
logrus.Debugf("Invalid Retry-After format, ignoring it")
|
||||
return fallbackDelay
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// In case of an http 429 status code in the response, it performs an exponential back off starting at 2 seconds for at most 5 iterations.
|
||||
// If the `Retry-After` header is set in the response, the specified value or date is
|
||||
// If the stream is non-nil, no back off will be performed.
|
||||
// In case of an HTTP 429 status code in the response, it may automatically retry a few times.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
var (
|
||||
res *http.Response
|
||||
err error
|
||||
delay int64
|
||||
)
|
||||
delay = 2
|
||||
const numIterations = 5
|
||||
const maxDelay = 60
|
||||
delay := backoffInitialDelay
|
||||
attempts := 0
|
||||
for {
|
||||
res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
|
||||
attempts++
|
||||
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
|
||||
stream != nil || // We can't retry with a body (which is not restartable in the general case)
|
||||
attempts == backoffNumIterations {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// math.Min() only supports float64, so have an anonymous func to avoid
|
||||
// casting.
|
||||
min := func(a int64, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
delay = parseRetryAfter(res, delay)
|
||||
if delay > backoffMaxDelay {
|
||||
delay = backoffMaxDelay
|
||||
}
|
||||
return b
|
||||
logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url, delay.Seconds())
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(delay):
|
||||
// Nothing
|
||||
}
|
||||
delay = delay * 2 // exponential back off
|
||||
}
|
||||
|
||||
nextDelay := func(r *http.Response, delay int64) int64 {
|
||||
after := res.Header.Get("Retry-After")
|
||||
if after == "" {
|
||||
return min(delay, maxDelay)
|
||||
}
|
||||
logrus.Debugf("detected 'Retry-After' header %q", after)
|
||||
// First check if we have a numerical value.
|
||||
if num, err := strconv.ParseInt(after, 10, 64); err == nil {
|
||||
return min(num, maxDelay)
|
||||
}
|
||||
// Secondly check if we have an http date.
|
||||
// If the delta between the date and now is positive, use it.
|
||||
// Otherwise, fall back to using the default exponential back off.
|
||||
if t, err := http.ParseTime(after); err == nil {
|
||||
delta := int64(time.Until(t).Seconds())
|
||||
if delta > 0 {
|
||||
return min(delta, maxDelay)
|
||||
}
|
||||
logrus.Debugf("negative date: falling back to using %d seconds", delay)
|
||||
return min(delay, maxDelay)
|
||||
}
|
||||
// If the header contains bogus, fall back to using the default
|
||||
// exponential back off.
|
||||
logrus.Debugf("invalid format: falling back to using %d seconds", delay)
|
||||
return min(delay, maxDelay)
|
||||
}
|
||||
|
||||
for i := 0; i < numIterations; i++ {
|
||||
res, err = c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
|
||||
if stream == nil && res != nil && res.StatusCode == http.StatusTooManyRequests {
|
||||
if i < numIterations-1 {
|
||||
logrus.Errorf("HEADER %v", res.Header)
|
||||
delay = nextDelay(res, delay) // compute next delay - does NOT exceed maxDelay
|
||||
logrus.Debugf("too many request to %s: sleeping for %d seconds before next attempt", url, delay)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
delay = delay * 2 // exponential back off
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
@ -597,7 +591,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
||||
default:
|
||||
return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL)
|
||||
}
|
||||
tokenBlob, err := ioutil.ReadAll(res.Body)
|
||||
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -627,7 +621,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return httpResponseToError(resp)
|
||||
return httpResponseToError(resp, "")
|
||||
}
|
||||
c.challenges = parseAuthHeader(resp.Header)
|
||||
c.scheme = scheme
|
||||
@ -690,7 +684,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
||||
return nil, errors.Wrapf(clientLib.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
@ -70,7 +70,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if err := httpResponseToError(res); err != nil {
|
||||
if err := httpResponseToError(res, "Error fetching tags list"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
11
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
11
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -58,14 +59,16 @@ func (d *dockerImageDestination) Close() error {
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
||||
return []string{
|
||||
mimeTypes := []string{
|
||||
imgspecv1.MediaTypeImageManifest,
|
||||
manifest.DockerV2Schema2MediaType,
|
||||
imgspecv1.MediaTypeImageIndex,
|
||||
manifest.DockerV2ListMediaType,
|
||||
manifest.DockerV2Schema1SignedMediaType,
|
||||
manifest.DockerV2Schema1MediaType,
|
||||
}
|
||||
if d.c.sys == nil || !d.c.sys.DockerDisableDestSchema1MIMETypes {
|
||||
mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType)
|
||||
}
|
||||
return mimeTypes
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
@ -620,7 +623,7 @@ sigExists:
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxErrorBodySize)
|
||||
if err == nil {
|
||||
logrus.Debugf("Error body %s", string(body))
|
||||
}
|
||||
|
111
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
111
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@ -10,8 +10,10 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -53,43 +55,77 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
||||
// non-mirror original location last; this both transparently handles the case
|
||||
// of no mirrors configured, and ensures we return the error encountered when
|
||||
// acessing the upstream location if all endpoints fail.
|
||||
manifestLoadErr := errors.New("Internal error: newImageSource returned without trying any endpoint")
|
||||
pullSources, err := registry.PullSourcesFromReference(ref.ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pullSource := range pullSources {
|
||||
logrus.Debugf("Trying to pull %q", pullSource.Reference)
|
||||
dockerRef, err := newReference(pullSource.Reference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpointSys := sys
|
||||
// sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
|
||||
if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(dockerRef.ref) != primaryDomain {
|
||||
copy := *endpointSys
|
||||
copy.DockerAuthConfig = nil
|
||||
endpointSys = ©
|
||||
}
|
||||
|
||||
client, err := newDockerClientFromRef(endpointSys, dockerRef, false, "pull")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
|
||||
|
||||
testImageSource := &dockerImageSource{
|
||||
ref: dockerRef,
|
||||
c: client,
|
||||
}
|
||||
|
||||
manifestLoadErr = testImageSource.ensureManifestIsLoaded(ctx)
|
||||
if manifestLoadErr == nil {
|
||||
return testImageSource, nil
|
||||
}
|
||||
type attempt struct {
|
||||
ref reference.Named
|
||||
err error
|
||||
}
|
||||
return nil, manifestLoadErr
|
||||
attempts := []attempt{}
|
||||
for _, pullSource := range pullSources {
|
||||
logrus.Debugf("Trying to access %q", pullSource.Reference)
|
||||
s, err := newImageSourceAttempt(ctx, sys, pullSource, primaryDomain)
|
||||
if err == nil {
|
||||
return s, nil
|
||||
}
|
||||
logrus.Debugf("Accessing %q failed: %v", pullSource.Reference, err)
|
||||
attempts = append(attempts, attempt{
|
||||
ref: pullSource.Reference,
|
||||
err: err,
|
||||
})
|
||||
}
|
||||
switch len(attempts) {
|
||||
case 0:
|
||||
return nil, errors.New("Internal error: newImageSource returned without trying any endpoint")
|
||||
case 1:
|
||||
return nil, attempts[0].err // If no mirrors are used, perfectly preserve the error type and add no noise.
|
||||
default:
|
||||
// Don’t just build a string, try to preserve the typed error.
|
||||
primary := &attempts[len(attempts)-1]
|
||||
extras := []string{}
|
||||
for i := 0; i < len(attempts)-1; i++ {
|
||||
// This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use.
|
||||
// The paired [] at least have some chance of being unambiguous.
|
||||
extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
|
||||
}
|
||||
return nil, errors.Wrapf(primary.err, "(Mirrors also failed: %s): %s", strings.Join(extras, "\n"), primary.ref.String())
|
||||
}
|
||||
}
|
||||
|
||||
// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource.
|
||||
// Given a pullSource and primaryDomain, return a dockerImageSource if it is reachable.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, pullSource sysregistriesv2.PullSource, primaryDomain string) (*dockerImageSource, error) {
|
||||
ref, err := newReference(pullSource.Reference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
endpointSys := sys
|
||||
// sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors.
|
||||
if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(ref.ref) != primaryDomain {
|
||||
copy := *endpointSys
|
||||
copy.DockerAuthConfig = nil
|
||||
endpointSys = ©
|
||||
}
|
||||
|
||||
client, err := newDockerClientFromRef(endpointSys, ref, false, "pull")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure
|
||||
|
||||
s := &dockerImageSource{
|
||||
ref: ref,
|
||||
c: client,
|
||||
}
|
||||
|
||||
if err := s.ensureManifestIsLoaded(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
@ -156,7 +192,8 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name())
|
||||
}
|
||||
manblob, err := ioutil.ReadAll(res.Body)
|
||||
|
||||
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@ -239,7 +276,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if err := httpResponseToError(res); err != nil {
|
||||
if err := httpResponseToError(res, "Error fetching blob"); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
|
||||
@ -342,7 +379,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
sig, err := ioutil.ReadAll(res.Body)
|
||||
sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@ -401,7 +438,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return err
|
||||
}
|
||||
defer get.Body.Close()
|
||||
manifestBody, err := ioutil.ReadAll(get.Body)
|
||||
manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -424,7 +461,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
}
|
||||
defer delete.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(delete.Body)
|
||||
body, err := iolimits.ReadAtMost(delete.Body, iolimits.MaxErrorBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
11
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
11
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
@ -14,7 +14,7 @@ var (
|
||||
// docker V1 registry.
|
||||
ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
|
||||
// ErrTooManyRequests is returned when the status code returned is 429
|
||||
ErrTooManyRequests = errors.New("too many request to registry")
|
||||
ErrTooManyRequests = errors.New("too many requests to registry")
|
||||
)
|
||||
|
||||
// ErrUnauthorizedForCredentials is returned when the status code returned is 401
|
||||
@ -26,9 +26,9 @@ func (e ErrUnauthorizedForCredentials) Error() string {
|
||||
return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error())
|
||||
}
|
||||
|
||||
// httpResponseToError translates the https.Response into an error. It returns
|
||||
// httpResponseToError translates the https.Response into an error, possibly prefixing it with the supplied context. It returns
|
||||
// nil if the response is not considered an error.
|
||||
func httpResponseToError(res *http.Response) error {
|
||||
func httpResponseToError(res *http.Response, context string) error {
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
return nil
|
||||
@ -38,6 +38,9 @@ func httpResponseToError(res *http.Response) error {
|
||||
err := client.HandleErrorResponse(res)
|
||||
return ErrUnauthorizedForCredentials{Err: err}
|
||||
default:
|
||||
return perrors.Errorf("invalid status code from registry %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
if context != "" {
|
||||
context = context + ": "
|
||||
}
|
||||
return perrors.Errorf("%sinvalid status code from registry %d (%s)", context, res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
3
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -143,7 +144,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
buf, err := ioutil.ReadAll(stream)
|
||||
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
|
||||
}
|
||||
|
9
vendor/github.com/containers/image/v5/docker/tarfile/src.go
generated
vendored
9
vendor/github.com/containers/image/v5/docker/tarfile/src.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
@ -203,13 +204,13 @@ func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Heade
|
||||
}
|
||||
|
||||
// readTarComponent returns full contents of componentPath.
|
||||
func (s *Source) readTarComponent(path string) ([]byte, error) {
|
||||
func (s *Source) readTarComponent(path string, limit int) ([]byte, error) {
|
||||
file, err := s.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := ioutil.ReadAll(file)
|
||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -240,7 +241,7 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
||||
}
|
||||
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config)
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -266,7 +267,7 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
||||
// loadTarManifest loads and decodes the manifest.json.
|
||||
func (s *Source) loadTarManifest() ([]ManifestItem, error) {
|
||||
// FIXME? Do we need to deal with the legacy format?
|
||||
bytes, err := s.readTarComponent(manifestFileName)
|
||||
bytes, err := s.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/image/docker_schema2.go
generated
vendored
4
vendor/github.com/containers/image/v5/image/docker_schema2.go
generated
vendored
@ -7,10 +7,10 @@ import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -102,7 +102,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
blob, err := ioutil.ReadAll(stream)
|
||||
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/image/oci.go
generated
vendored
4
vendor/github.com/containers/image/v5/image/oci.go
generated
vendored
@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -67,7 +67,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
blob, err := ioutil.ReadAll(stream)
|
||||
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
60
vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
generated
vendored
Normal file
60
vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package iolimits
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// All constants below are intended to be used as limits for `ReadAtMost`. The
|
||||
// immediate use-case for limiting the size of in-memory copied data is to
|
||||
// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of
|
||||
// copying data until running out of memory, we error out after hitting the
|
||||
// specified limit.
|
||||
const (
|
||||
// megaByte denotes one megabyte and is intended to be used as a limit in
|
||||
// `ReadAtMost`.
|
||||
megaByte = 1 << 20
|
||||
// MaxManifestBodySize is the maximum allowed size of a manifest. The limit
|
||||
// of 4 MB aligns with the one of a Docker registry:
|
||||
// https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30
|
||||
MaxManifestBodySize = 4 * megaByte
|
||||
// MaxAuthTokenBodySize is the maximum allowed size of an auth token.
|
||||
// The limit of 1 MB is considered to be greatly sufficient.
|
||||
MaxAuthTokenBodySize = megaByte
|
||||
// MaxSignatureListBodySize is the maximum allowed size of a signature list.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxSignatureListBodySize = 4 * megaByte
|
||||
// MaxSignatureBodySize is the maximum allowed size of a signature.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxSignatureBodySize = 4 * megaByte
|
||||
// MaxErrorBodySize is the maximum allowed size of an error-response body.
|
||||
// The limit of 1 MB is considered to be greatly sufficient.
|
||||
MaxErrorBodySize = megaByte
|
||||
// MaxConfigBodySize is the maximum allowed size of a config blob.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxConfigBodySize = 4 * megaByte
|
||||
// MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxOpenShiftStatusBody = 4 * megaByte
|
||||
// MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images)
|
||||
// The limit of 1 MB is considered to be greatly sufficient.
|
||||
MaxTarFileManifestSize = megaByte
|
||||
)
|
||||
|
||||
// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded.
|
||||
func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
|
||||
limitedReader := io.LimitReader(reader, int64(limit+1))
|
||||
|
||||
res, err := ioutil.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(res) > limit {
|
||||
return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
4
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
4
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
@ -7,13 +7,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/image/v5/version"
|
||||
@ -102,7 +102,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxOpenShiftStatusBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
generated
vendored
4
vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
generated
vendored
@ -139,7 +139,7 @@ func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte,
|
||||
}
|
||||
|
||||
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||
func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||
func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||
signedBuffer := bytes.Buffer{}
|
||||
signedData, err := gpgme.NewDataWriter(&signedBuffer)
|
||||
if err != nil {
|
||||
@ -170,6 +170,6 @@ func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []by
|
||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
||||
// the values may have no recognizable relationship if the public key is not available.
|
||||
func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||
func (m *gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||
return gpgUntrustedSignatureContents(untrustedSignature)
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
generated
vendored
@ -154,6 +154,6 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
|
||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
||||
// the values may have no recognizable relationship if the public key is not available.
|
||||
func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||
func (m *openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||
return gpgUntrustedSignatureContents(untrustedSignature)
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
2
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
@ -547,6 +547,8 @@ type SystemContext struct {
|
||||
// Note that this field is used mainly to integrate containers/image into projectatomic/docker
|
||||
// in order to not break any existing docker's integration tests.
|
||||
DockerDisableV1Ping bool
|
||||
// If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list
|
||||
DockerDisableDestSchema1MIMETypes bool
|
||||
// Directory to use for OSTree temporary files
|
||||
OSTreeTmpDirPath string
|
||||
|
||||
|
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -6,7 +6,7 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 1
|
||||
VersionMinor = 2
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
|
66
vendor/github.com/containers/storage/.golangci.yml
generated
vendored
66
vendor/github.com/containers/storage/.golangci.yml
generated
vendored
@ -3,37 +3,35 @@ run:
|
||||
concurrency: 6
|
||||
deadline: 5m
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- depguard
|
||||
- gofmt
|
||||
- interfacer
|
||||
- typecheck
|
||||
# - deadcode
|
||||
# - dupl
|
||||
# - errcheck
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - goconst
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - goimports
|
||||
# - golint
|
||||
# - gosec
|
||||
# - gosimple
|
||||
# - govet
|
||||
# - ineffassign
|
||||
# - lll
|
||||
# - maligned
|
||||
# - misspell
|
||||
# - nakedret
|
||||
# - prealloc
|
||||
# - scopelint
|
||||
# - staticcheck
|
||||
# - structcheck
|
||||
# - stylecheck
|
||||
# - unconvert
|
||||
# - unparam
|
||||
# - unused
|
||||
# - varcheck
|
||||
enable-all: true
|
||||
disable:
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- funlen
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gocognit
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godox
|
||||
- gomnd
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- lll
|
||||
- maligned
|
||||
- misspell
|
||||
- nakedret
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- varcheck
|
||||
- whitespace
|
||||
- wsl
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.15.5
|
||||
1.15.8
|
||||
|
2
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@ -35,7 +35,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
|
9
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
9
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@ -26,7 +26,7 @@ import (
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
@ -627,7 +627,12 @@ func (d *Driver) Remove(id string) error {
|
||||
d.updateQuotaStatus()
|
||||
|
||||
if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil {
|
||||
return err
|
||||
if d.quotaEnabled {
|
||||
return err
|
||||
}
|
||||
// If quota is not enabled, fallback to rmdir syscall to delete subvolumes.
|
||||
// This would allow unprivileged user to delete their owned subvolumes
|
||||
// in kernel >= 4.18 without user_subvol_rm_alowed mount option.
|
||||
}
|
||||
if err := system.EnsureRemoveAll(dir); err != nil {
|
||||
return err
|
||||
|
6
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
6
vendor/github.com/containers/storage/drivers/chown_unix.go
generated
vendored
@ -25,14 +25,14 @@ func platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.
|
||||
UID: uid,
|
||||
GID: gid,
|
||||
}
|
||||
mappedUid, mappedGid, err := toContainer.ToContainer(pair)
|
||||
mappedUID, mappedGID, err := toContainer.ToContainer(pair)
|
||||
if err != nil {
|
||||
if (uid != 0) || (gid != 0) {
|
||||
return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err)
|
||||
}
|
||||
mappedUid, mappedGid = uid, gid
|
||||
mappedUID, mappedGID = uid, gid
|
||||
}
|
||||
uid, gid = mappedUid, mappedGid
|
||||
uid, gid = mappedUID, mappedGID
|
||||
}
|
||||
if toHost != nil {
|
||||
pair := idtools.IDPair{
|
||||
|
43
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
43
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
@ -18,7 +18,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/devicemapper"
|
||||
"github.com/containers/storage/pkg/dmesg"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
@ -49,8 +49,13 @@ var (
|
||||
lvmSetupConfigForce bool
|
||||
)
|
||||
|
||||
const deviceSetMetaFile string = "deviceset-metadata"
|
||||
const transactionMetaFile string = "transaction-metadata"
|
||||
const (
|
||||
deviceSetMetaFile = "deviceset-metadata"
|
||||
transactionMetaFile = "transaction-metadata"
|
||||
xfs = "xfs"
|
||||
ext4 = "ext4"
|
||||
base = "base"
|
||||
)
|
||||
|
||||
type transaction struct {
|
||||
OpenTransactionID uint64 `json:"open_transaction_id"`
|
||||
@ -199,7 +204,7 @@ func getDevName(name string) string {
|
||||
func (info *devInfo) Name() string {
|
||||
hash := info.Hash
|
||||
if hash == "" {
|
||||
hash = "base"
|
||||
hash = base
|
||||
}
|
||||
return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash)
|
||||
}
|
||||
@ -219,7 +224,7 @@ func (devices *DeviceSet) metadataDir() string {
|
||||
func (devices *DeviceSet) metadataFile(info *devInfo) string {
|
||||
file := info.Hash
|
||||
if file == "" {
|
||||
file = "base"
|
||||
file = base
|
||||
}
|
||||
return path.Join(devices.metadataDir(), file)
|
||||
}
|
||||
@ -440,7 +445,7 @@ func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo)
|
||||
logrus.Debugf("devmapper: Loading data for file %s", path)
|
||||
|
||||
hash := finfo.Name()
|
||||
if hash == "base" {
|
||||
if hash == base {
|
||||
hash = ""
|
||||
}
|
||||
|
||||
@ -542,7 +547,7 @@ func xfsSupported() error {
|
||||
}
|
||||
|
||||
// Check if kernel supports xfs filesystem or not.
|
||||
exec.Command("modprobe", "xfs").Run()
|
||||
exec.Command("modprobe", xfs).Run()
|
||||
|
||||
f, err := os.Open("/proc/filesystems")
|
||||
if err != nil {
|
||||
@ -567,16 +572,16 @@ func xfsSupported() error {
|
||||
func determineDefaultFS() string {
|
||||
err := xfsSupported()
|
||||
if err == nil {
|
||||
return "xfs"
|
||||
return xfs
|
||||
}
|
||||
|
||||
logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err)
|
||||
return "ext4"
|
||||
logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to %s filesystem", ext4, err)
|
||||
return ext4
|
||||
}
|
||||
|
||||
// mkfsOptions tries to figure out whether some additional mkfs options are required
|
||||
func mkfsOptions(fs string) []string {
|
||||
if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) {
|
||||
if fs == xfs && !kernel.CheckKernelVersion(3, 16, 0) {
|
||||
// For kernels earlier than 3.16 (and newer xfsutils),
|
||||
// some xfs features need to be explicitly disabled.
|
||||
return []string{"-m", "crc=0,finobt=0"}
|
||||
@ -609,9 +614,9 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
|
||||
}()
|
||||
|
||||
switch devices.filesystem {
|
||||
case "xfs":
|
||||
case xfs:
|
||||
err = exec.Command("mkfs.xfs", args...).Run()
|
||||
case "ext4":
|
||||
case ext4:
|
||||
err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run()
|
||||
if err != nil {
|
||||
err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run()
|
||||
@ -1197,7 +1202,7 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
|
||||
}
|
||||
|
||||
options := ""
|
||||
if devices.BaseDeviceFilesystem == "xfs" {
|
||||
if devices.BaseDeviceFilesystem == xfs {
|
||||
// XFS needs nouuid or it can't mount filesystems with the same fs
|
||||
options = joinMountOptions(options, "nouuid")
|
||||
}
|
||||
@ -1210,11 +1215,11 @@ func (devices *DeviceSet) growFS(info *devInfo) error {
|
||||
defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
|
||||
|
||||
switch devices.BaseDeviceFilesystem {
|
||||
case "ext4":
|
||||
case ext4:
|
||||
if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
|
||||
}
|
||||
case "xfs":
|
||||
case xfs:
|
||||
if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out))
|
||||
}
|
||||
@ -2391,7 +2396,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.Mo
|
||||
|
||||
options := ""
|
||||
|
||||
if fstype == "xfs" {
|
||||
if fstype == xfs {
|
||||
// XFS needs nouuid or it can't mount filesystems with the same fs
|
||||
options = joinMountOptions(options, "nouuid")
|
||||
}
|
||||
@ -2412,7 +2417,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.Mo
|
||||
return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
|
||||
}
|
||||
|
||||
if fstype == "xfs" && devices.xfsNospaceRetries != "" {
|
||||
if fstype == xfs && devices.xfsNospaceRetries != "" {
|
||||
if err := devices.xfsSetNospaceRetries(info); err != nil {
|
||||
unix.Unmount(path, unix.MNT_DETACH)
|
||||
devices.deactivateDevice(info)
|
||||
@ -2693,7 +2698,7 @@ func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps [
|
||||
}
|
||||
devices.metaDataLoopbackSize = size
|
||||
case "dm.fs":
|
||||
if val != "ext4" && val != "xfs" {
|
||||
if val != ext4 && val != xfs {
|
||||
return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val)
|
||||
}
|
||||
devices.filesystem = val
|
||||
|
2
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@ -9,7 +9,7 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/devicemapper"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/locker"
|
||||
|
4
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -49,8 +49,8 @@ type MountOpts struct {
|
||||
// Mount label is the MAC Labels to assign to mount point (SELINUX)
|
||||
MountLabel string
|
||||
// UidMaps & GidMaps are the User Namespace mappings to be assigned to content in the mount point
|
||||
UidMaps []idtools.IDMap
|
||||
GidMaps []idtools.IDMap
|
||||
UidMaps []idtools.IDMap // nolint: golint
|
||||
GidMaps []idtools.IDMap // nolint: golint
|
||||
Options []string
|
||||
}
|
||||
|
||||
|
18
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
18
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -142,8 +142,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||
if opts.mountProgram == "" {
|
||||
switch fsMagic {
|
||||
case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs:
|
||||
logrus.Errorf("'overlay' is not supported over %s", backingFs)
|
||||
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs)
|
||||
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s, a mount_program is required", backingFs)
|
||||
}
|
||||
}
|
||||
|
||||
@ -402,9 +401,8 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
||||
if err == nil {
|
||||
logrus.Debugf("overlay test mount with multiple lowers succeeded")
|
||||
return supportsDType, nil
|
||||
} else {
|
||||
logrus.Debugf("overlay test mount with multiple lowers failed %v", err)
|
||||
}
|
||||
logrus.Debugf("overlay test mount with multiple lowers failed %v", err)
|
||||
}
|
||||
flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir)
|
||||
if len(flags) < unix.Getpagesize() {
|
||||
@ -412,9 +410,8 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
||||
if err == nil {
|
||||
logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower")
|
||||
return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay")
|
||||
} else {
|
||||
logrus.Debugf("overlay test mount with a single lower failed %v", err)
|
||||
}
|
||||
logrus.Debugf("overlay test mount with a single lower failed %v", err)
|
||||
}
|
||||
logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home)
|
||||
return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home)
|
||||
@ -811,15 +808,6 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
return "", err
|
||||
}
|
||||
readWrite := true
|
||||
// fuse-overlayfs doesn't support working without an upperdir.
|
||||
if d.options.mountProgram == "" {
|
||||
for _, o := range options.Options {
|
||||
if o == "ro" {
|
||||
readWrite = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
2
vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go
generated
vendored
@ -5,7 +5,7 @@ package overlayutils
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
2
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -8,7 +8,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
|
2
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@ -12,7 +12,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
|
2
vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
generated
vendored
@ -1,7 +1,7 @@
|
||||
package zfs
|
||||
|
||||
import (
|
||||
"github.com/containers/storage/drivers"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
10
vendor/github.com/containers/storage/go.mod
generated
vendored
10
vendor/github.com/containers/storage/go.mod
generated
vendored
@ -7,15 +7,15 @@ require (
|
||||
github.com/Microsoft/hcsshim v0.8.7
|
||||
github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/klauspost/compress v1.9.4
|
||||
github.com/klauspost/compress v1.9.8
|
||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/mattn/go-shellwords v1.0.6
|
||||
github.com/mattn/go-shellwords v1.0.9
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/runc v1.0.0-rc9
|
||||
github.com/opencontainers/selinux v1.3.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/opencontainers/selinux v1.3.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/spf13/pflag v1.0.3 // indirect
|
||||
@ -24,7 +24,7 @@ require (
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible
|
||||
github.com/vbatts/tar-split v0.11.1
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777
|
||||
gotest.tools v2.2.0+incompatible
|
||||
)
|
||||
|
||||
|
18
vendor/github.com/containers/storage/go.sum
generated
vendored
18
vendor/github.com/containers/storage/go.sum
generated
vendored
@ -73,6 +73,12 @@ github.com/klauspost/compress v1.9.3 h1:hkFELABwacUEgBfiguNeQydKv3M9pawBq8o24Ypw
|
||||
github.com/klauspost/compress v1.9.3/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.4 h1:xhvAeUPQ2drNUhKtrGdTGNvV9nNafHMUkRyLkzxJoB4=
|
||||
github.com/klauspost/compress v1.9.4/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.5 h1:U+CaK85mrNNb4k8BNOfgJtJ/gr6kswUCFj6miSzVC6M=
|
||||
github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug26aA=
|
||||
github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA=
|
||||
github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
|
||||
@ -83,6 +89,10 @@ github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9w
|
||||
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI=
|
||||
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.7 h1:KqhVjVZomx2puPACkj9vrGFqnp42Htvo9SEAWePHKOs=
|
||||
github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.9 h1:eaB5JspOwiKKcHdqcjbfe5lA9cNn/4NRRtddXJCimqk=
|
||||
github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
|
||||
@ -105,9 +115,15 @@ github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4Ft
|
||||
github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
|
||||
github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
|
||||
github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
|
||||
github.com/opencontainers/selinux v1.3.1 h1:dn2Rc3wTEvTB6iVqoFrKKeMb0uZ38ZheeyMu2h5C1TI=
|
||||
github.com/opencontainers/selinux v1.3.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.0 h1:J8lpUdobwIeCI7OiSxHqEwJUKvJwicL5+3v1oe2Yb4k=
|
||||
github.com/pkg/errors v0.9.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE=
|
||||
@ -184,6 +200,8 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339 h1:zSqWKgm/o7HAnlAzBQ+aetp9fpuyytsXnKA8eiLHYQM=
|
||||
golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777 h1:wejkGHRTr38uaKRqECZlsCsJ1/TGxIyFbH32x5zUdu4=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2 h1:/J2nHFg1MTqaRLFO7M+J78ASNsJoz3r0cvHBPQ77fsE=
|
||||
golang.org/x/sys v0.0.0-20191127021746-63cb32ae39b2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
22
vendor/github.com/containers/storage/images.go
generated
vendored
22
vendor/github.com/containers/storage/images.go
generated
vendored
@ -214,17 +214,17 @@ func bigDataNameIsManifest(name string) bool {
|
||||
|
||||
// recomputeDigests takes a fixed digest and a name-to-digest map and builds a
|
||||
// list of the unique values that would identify the image.
|
||||
func (image *Image) recomputeDigests() error {
|
||||
validDigests := make([]digest.Digest, 0, len(image.BigDataDigests)+1)
|
||||
func (i *Image) recomputeDigests() error {
|
||||
validDigests := make([]digest.Digest, 0, len(i.BigDataDigests)+1)
|
||||
digests := make(map[digest.Digest]struct{})
|
||||
if image.Digest != "" {
|
||||
if err := image.Digest.Validate(); err != nil {
|
||||
return errors.Wrapf(err, "error validating image digest %q", string(image.Digest))
|
||||
if i.Digest != "" {
|
||||
if err := i.Digest.Validate(); err != nil {
|
||||
return errors.Wrapf(err, "error validating image digest %q", string(i.Digest))
|
||||
}
|
||||
digests[image.Digest] = struct{}{}
|
||||
validDigests = append(validDigests, image.Digest)
|
||||
digests[i.Digest] = struct{}{}
|
||||
validDigests = append(validDigests, i.Digest)
|
||||
}
|
||||
for name, digest := range image.BigDataDigests {
|
||||
for name, digest := range i.BigDataDigests {
|
||||
if !bigDataNameIsManifest(name) {
|
||||
continue
|
||||
}
|
||||
@ -237,10 +237,10 @@ func (image *Image) recomputeDigests() error {
|
||||
validDigests = append(validDigests, digest)
|
||||
}
|
||||
}
|
||||
if image.Digest == "" && len(validDigests) > 0 {
|
||||
image.Digest = validDigests[0]
|
||||
if i.Digest == "" && len(validDigests) > 0 {
|
||||
i.Digest = validDigests[0]
|
||||
}
|
||||
image.Digests = validDigests
|
||||
i.Digests = validDigests
|
||||
return nil
|
||||
}
|
||||
|
||||
|
47
vendor/github.com/containers/storage/layers.go
generated
vendored
47
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -239,6 +239,10 @@ type LayerStore interface {
|
||||
// ApplyDiff reads a tarstream which was created by a previous call to Diff and
|
||||
// applies its changes to a specified layer.
|
||||
ApplyDiff(to string, diff io.Reader) (int64, error)
|
||||
|
||||
// LoadLocked wraps Load in a locked state. This means it loads the store
|
||||
// and cleans-up invalid layers if needed.
|
||||
LoadLocked() error
|
||||
}
|
||||
|
||||
type layerStore struct {
|
||||
@ -346,6 +350,7 @@ func (r *layerStore) Load() error {
|
||||
r.byname = names
|
||||
r.bycompressedsum = compressedsums
|
||||
r.byuncompressedsum = uncompressedsums
|
||||
|
||||
// Load and merge information about which layers are mounted, and where.
|
||||
if r.IsReadWrite() {
|
||||
r.mountsLockfile.RLock()
|
||||
@ -353,22 +358,23 @@ func (r *layerStore) Load() error {
|
||||
if err = r.loadMounts(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Last step: if we're writable, try to remove anything that a previous
|
||||
// user of this storage area marked for deletion but didn't manage to
|
||||
// actually delete.
|
||||
if r.IsReadWrite() && r.Locked() {
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
|
||||
if b, ok := cleanup.(bool); ok && b {
|
||||
err = r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
break
|
||||
|
||||
// Last step: as we’re writable, try to remove anything that a previous
|
||||
// user of this storage area marked for deletion but didn't manage to
|
||||
// actually delete.
|
||||
if r.Locked() {
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
if cleanup, ok := layer.Flags[incompleteFlag]; ok {
|
||||
if b, ok := cleanup.(bool); ok && b {
|
||||
err = r.deleteInternal(layer.ID)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
shouldSave = true
|
||||
}
|
||||
shouldSave = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -376,9 +382,16 @@ func (r *layerStore) Load() error {
|
||||
return r.saveLayers()
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *layerStore) LoadLocked() error {
|
||||
r.lockfile.Lock()
|
||||
defer r.lockfile.Unlock()
|
||||
return r.Load()
|
||||
}
|
||||
|
||||
func (r *layerStore) loadMounts() error {
|
||||
mounts := make(map[string]*Layer)
|
||||
mpath := r.mountspath()
|
||||
@ -487,8 +500,6 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockfile.Lock()
|
||||
defer lockfile.Unlock()
|
||||
mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -516,8 +527,6 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockfile.RLock()
|
||||
defer lockfile.Unlock()
|
||||
rlstore := layerStore{
|
||||
lockfile: lockfile,
|
||||
mountsLockfile: nil,
|
||||
|
2156
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
2156
vendor/github.com/containers/storage/layers_ffjson.go
generated
vendored
File diff suppressed because it is too large
Load Diff
18
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
18
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -68,6 +68,12 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
tarExt = "tar"
|
||||
solaris = "solaris"
|
||||
windows = "windows"
|
||||
)
|
||||
|
||||
// Archiver allows the reuse of most utility functions of this package with a
|
||||
// pluggable Untar function. To facilitate the passing of specific id mappings
|
||||
// for untar, an archiver can be created with maps which will then be passed to
|
||||
@ -325,15 +331,15 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi
|
||||
func (compression *Compression) Extension() string {
|
||||
switch *compression {
|
||||
case Uncompressed:
|
||||
return "tar"
|
||||
return tarExt
|
||||
case Bzip2:
|
||||
return "tar.bz2"
|
||||
return tarExt + ".bz2"
|
||||
case Gzip:
|
||||
return "tar.gz"
|
||||
return tarExt + ".gz"
|
||||
case Xz:
|
||||
return "tar.xz"
|
||||
return tarExt + ".xz"
|
||||
case Zstd:
|
||||
return "tar.zst"
|
||||
return tarExt + ".zst"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@ -670,7 +676,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
}
|
||||
|
||||
// Lchown is not supported on Windows.
|
||||
if Lchown && runtime.GOOS != "windows" {
|
||||
if Lchown && runtime.GOOS != windows {
|
||||
if chownOpts == nil {
|
||||
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
|
||||
}
|
||||
|
10
vendor/github.com/containers/storage/pkg/archive/changes_unix.go
generated
vendored
10
vendor/github.com/containers/storage/pkg/archive/changes_unix.go
generated
vendored
@ -13,17 +13,17 @@ import (
|
||||
|
||||
func statDifferent(oldStat *system.StatT, oldInfo *FileInfo, newStat *system.StatT, newInfo *FileInfo) bool {
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
oldUid, oldGid := oldStat.UID(), oldStat.GID()
|
||||
oldUID, oldGID := oldStat.UID(), oldStat.GID()
|
||||
uid, gid := newStat.UID(), newStat.GID()
|
||||
if cuid, cgid, err := newInfo.idMappings.ToContainer(idtools.IDPair{UID: int(uid), GID: int(gid)}); err == nil {
|
||||
uid = uint32(cuid)
|
||||
gid = uint32(cgid)
|
||||
if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUid), GID: int(oldGid)}); err == nil {
|
||||
oldUid = uint32(oldcuid)
|
||||
oldGid = uint32(oldcgid)
|
||||
if oldcuid, oldcgid, err := oldInfo.idMappings.ToContainer(idtools.IDPair{UID: int(oldUID), GID: int(oldGID)}); err == nil {
|
||||
oldUID = uint32(oldcuid)
|
||||
oldGID = uint32(oldcgid)
|
||||
}
|
||||
}
|
||||
ownerChanged := uid != oldUid || gid != oldGid
|
||||
ownerChanged := uid != oldUID || gid != oldGID
|
||||
if oldStat.Mode() != newStat.Mode() ||
|
||||
ownerChanged ||
|
||||
oldStat.Rdev() != newStat.Rdev() ||
|
||||
|
2
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
@ -68,7 +68,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertently.
|
||||
if runtime.GOOS == "windows" {
|
||||
if runtime.GOOS == windows {
|
||||
if strings.Contains(hdr.Name, ":") {
|
||||
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||
continue
|
||||
|
2
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
@ -236,7 +236,7 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries))
|
||||
}
|
||||
|
||||
case "overlay":
|
||||
case "overlay", "overlay2":
|
||||
if options.Overlay.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors))
|
||||
} else if options.IgnoreChownErrors != "" {
|
||||
|
13
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
13
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
@ -226,8 +226,9 @@ func (p *Pattern) compile() error {
|
||||
|
||||
sl := string(os.PathSeparator)
|
||||
escSL := sl
|
||||
if sl == `\` {
|
||||
escSL += `\`
|
||||
const bs = `\`
|
||||
if sl == bs {
|
||||
escSL += bs
|
||||
}
|
||||
|
||||
for scan.Peek() != scanner.EOF {
|
||||
@ -262,11 +263,11 @@ func (p *Pattern) compile() error {
|
||||
} else if ch == '.' || ch == '$' {
|
||||
// Escape some regexp special chars that have no meaning
|
||||
// in golang's filepath.Match
|
||||
regStr += `\` + string(ch)
|
||||
regStr += bs + string(ch)
|
||||
} else if ch == '\\' {
|
||||
// escape next char. Note that a trailing \ in the pattern
|
||||
// will be left alone (but need to escape it)
|
||||
if sl == `\` {
|
||||
if sl == bs {
|
||||
// On windows map "\" to "\\", meaning an escaped backslash,
|
||||
// and then just continue because filepath.Match on
|
||||
// Windows doesn't allow escaping at all
|
||||
@ -274,9 +275,9 @@ func (p *Pattern) compile() error {
|
||||
continue
|
||||
}
|
||||
if scan.Peek() != scanner.EOF {
|
||||
regStr += `\` + string(scan.Next())
|
||||
regStr += bs + string(scan.Next())
|
||||
} else {
|
||||
regStr += `\`
|
||||
regStr += bs
|
||||
}
|
||||
} else {
|
||||
regStr += string(ch)
|
||||
|
10
vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
generated
vendored
10
vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go
generated
vendored
@ -77,14 +77,14 @@ func createLockerForPath(path string, ro bool) (Locker, error) {
|
||||
|
||||
// lock locks the lockfile via FCTNL(2) based on the specified type and
|
||||
// command.
|
||||
func (l *lockfile) lock(l_type int16, recursive bool) {
|
||||
func (l *lockfile) lock(lType int16, recursive bool) {
|
||||
lk := unix.Flock_t{
|
||||
Type: l_type,
|
||||
Type: lType,
|
||||
Whence: int16(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0,
|
||||
}
|
||||
switch l_type {
|
||||
switch lType {
|
||||
case unix.F_RDLCK:
|
||||
l.rwMutex.RLock()
|
||||
case unix.F_WRLCK:
|
||||
@ -96,7 +96,7 @@ func (l *lockfile) lock(l_type int16, recursive bool) {
|
||||
l.rwMutex.Lock()
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
|
||||
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType))
|
||||
}
|
||||
l.stateMutex.Lock()
|
||||
defer l.stateMutex.Unlock()
|
||||
@ -116,7 +116,7 @@ func (l *lockfile) lock(l_type int16, recursive bool) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
l.locktype = l_type
|
||||
l.locktype = lType
|
||||
l.locked = true
|
||||
l.recursive = recursive
|
||||
l.counter++
|
||||
|
2
vendor/github.com/containers/storage/pkg/mount/flags_linux.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/mount/flags_linux.go
generated
vendored
@ -82,6 +82,4 @@ const (
|
||||
// it possible for the kernel to default to relatime or noatime but still
|
||||
// allow userspace to override it.
|
||||
STRICTATIME = unix.MS_STRICTATIME
|
||||
|
||||
mntDetach = unix.MNT_DETACH
|
||||
)
|
||||
|
4
vendor/github.com/containers/storage/pkg/mount/mounter_linux.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/mount/mounter_linux.go
generated
vendored
@ -13,6 +13,8 @@ const (
|
||||
|
||||
// broflags is the combination of bind and read only
|
||||
broflags = unix.MS_BIND | unix.MS_RDONLY
|
||||
|
||||
none = "none"
|
||||
)
|
||||
|
||||
// isremount returns true if either device name or flags identify a remount request, false otherwise.
|
||||
@ -20,7 +22,7 @@ func isremount(device string, flags uintptr) bool {
|
||||
switch {
|
||||
// We treat device "" and "none" as a remount request to provide compatibility with
|
||||
// requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
|
||||
case flags&unix.MS_REMOUNT != 0, device == "", device == "none":
|
||||
case flags&unix.MS_REMOUNT != 0, device == "", device == none:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
10
vendor/github.com/containers/storage/store.go
generated
vendored
10
vendor/github.com/containers/storage/store.go
generated
vendored
@ -2783,18 +2783,24 @@ func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
|
||||
}
|
||||
|
||||
func (s *store) Layers() ([]Layer, error) {
|
||||
var layers []Layer
|
||||
lstore, err := s.LayerStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := lstore.LoadLocked(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layers, err := lstore.Layers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lstores, err := s.ROLayerStores()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
|
||||
for _, s := range lstores {
|
||||
store := s
|
||||
store.RLock()
|
||||
defer store.Unlock()
|
||||
|
32
vendor/github.com/containers/storage/utils.go
generated
vendored
32
vendor/github.com/containers/storage/utils.go
generated
vendored
@ -69,8 +69,8 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
|
||||
}
|
||||
|
||||
// GetRootlessRuntimeDir returns the runtime directory when running as non root
|
||||
func GetRootlessRuntimeDir(rootlessUid int) (string, error) {
|
||||
path, err := getRootlessRuntimeDir(rootlessUid)
|
||||
func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
|
||||
path, err := getRootlessRuntimeDir(rootlessUID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -81,18 +81,18 @@ func GetRootlessRuntimeDir(rootlessUid int) (string, error) {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func getRootlessRuntimeDir(rootlessUid int) (string, error) {
|
||||
func getRootlessRuntimeDir(rootlessUID int) (string, error) {
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
|
||||
if runtimeDir != "" {
|
||||
return runtimeDir, nil
|
||||
}
|
||||
tmpDir := fmt.Sprintf("/run/user/%d", rootlessUid)
|
||||
tmpDir := fmt.Sprintf("/run/user/%d", rootlessUID)
|
||||
st, err := system.Stat(tmpDir)
|
||||
if err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0700 == 0700 && st.Mode()&0066 == 0000 {
|
||||
return tmpDir, nil
|
||||
}
|
||||
tmpDir = fmt.Sprintf("%s/%d", os.TempDir(), rootlessUid)
|
||||
tmpDir = fmt.Sprintf("%s/%d", os.TempDir(), rootlessUID)
|
||||
if err := os.MkdirAll(tmpDir, 0700); err != nil {
|
||||
logrus.Errorf("failed to create %s: %v", tmpDir, err)
|
||||
} else {
|
||||
@ -111,8 +111,8 @@ func getRootlessRuntimeDir(rootlessUid int) (string, error) {
|
||||
|
||||
// getRootlessDirInfo returns the parent path of where the storage for containers and
|
||||
// volumes will be in rootless mode
|
||||
func getRootlessDirInfo(rootlessUid int) (string, string, error) {
|
||||
rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUid)
|
||||
func getRootlessDirInfo(rootlessUID int) (string, string, error) {
|
||||
rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
@ -135,10 +135,10 @@ func getRootlessDirInfo(rootlessUid int) (string, string, error) {
|
||||
}
|
||||
|
||||
// getRootlessStorageOpts returns the storage opts for containers running as non root
|
||||
func getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) {
|
||||
func getRootlessStorageOpts(rootlessUID int) (StoreOptions, error) {
|
||||
var opts StoreOptions
|
||||
|
||||
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUid)
|
||||
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID)
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
@ -153,10 +153,6 @@ func getRootlessStorageOpts(rootlessUid int) (StoreOptions, error) {
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
type tomlOptionsConfig struct {
|
||||
MountProgram string `toml:"mount_program"`
|
||||
}
|
||||
|
||||
func getTomlStorage(storeOptions *StoreOptions) *tomlConfig {
|
||||
config := new(tomlConfig)
|
||||
|
||||
@ -189,21 +185,21 @@ func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {
|
||||
}
|
||||
|
||||
// DefaultStoreOptions returns the default storage ops for containers
|
||||
func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) {
|
||||
func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
|
||||
var (
|
||||
defaultRootlessRunRoot string
|
||||
defaultRootlessGraphRoot string
|
||||
err error
|
||||
)
|
||||
storageOpts := defaultStoreOptions
|
||||
if rootless && rootlessUid != 0 {
|
||||
storageOpts, err = getRootlessStorageOpts(rootlessUid)
|
||||
if rootless && rootlessUID != 0 {
|
||||
storageOpts, err = getRootlessStorageOpts(rootlessUID)
|
||||
if err != nil {
|
||||
return storageOpts, err
|
||||
}
|
||||
}
|
||||
|
||||
storageConf, err := DefaultConfigFile(rootless && rootlessUid != 0)
|
||||
storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0)
|
||||
if err != nil {
|
||||
return storageOpts, err
|
||||
}
|
||||
@ -218,7 +214,7 @@ func DefaultStoreOptions(rootless bool, rootlessUid int) (StoreOptions, error) {
|
||||
ReloadConfigurationFile(storageConf, &storageOpts)
|
||||
}
|
||||
|
||||
if rootless && rootlessUid != 0 {
|
||||
if rootless && rootlessUID != 0 {
|
||||
if err == nil {
|
||||
// If the file did not specify a graphroot or runroot,
|
||||
// set sane defaults so we don't try and use root-owned
|
||||
|
6
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
6
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
@ -644,7 +644,7 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).store
|
||||
case level == ConstantCompression:
|
||||
d.w.logReusePenalty = uint(4)
|
||||
d.w.logNewTablePenalty = 4
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).storeHuff
|
||||
@ -652,13 +652,13 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
|
||||
level = 5
|
||||
fallthrough
|
||||
case level >= 1 && level <= 6:
|
||||
d.w.logReusePenalty = uint(level + 1)
|
||||
d.w.logNewTablePenalty = 6
|
||||
d.fast = newFastEnc(level)
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).storeFast
|
||||
case 7 <= level && level <= 9:
|
||||
d.w.logReusePenalty = uint(level)
|
||||
d.w.logNewTablePenalty = 10
|
||||
d.state = &advancedState{}
|
||||
d.compressionLevel = levels[level]
|
||||
d.initDeflate()
|
||||
|
52
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
52
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
@ -93,12 +93,12 @@ type huffmanBitWriter struct {
|
||||
err error
|
||||
lastHeader int
|
||||
// Set between 0 (reused block can be up to 2x the size)
|
||||
logReusePenalty uint
|
||||
lastHuffMan bool
|
||||
bytes [256]byte
|
||||
literalFreq [lengthCodesStart + 32]uint16
|
||||
offsetFreq [32]uint16
|
||||
codegenFreq [codegenCodeCount]uint16
|
||||
logNewTablePenalty uint
|
||||
lastHuffMan bool
|
||||
bytes [256]byte
|
||||
literalFreq [lengthCodesStart + 32]uint16
|
||||
offsetFreq [32]uint16
|
||||
codegenFreq [codegenCodeCount]uint16
|
||||
|
||||
// codegen must have an extra space for the final symbol.
|
||||
codegen [literalCount + offsetCodeCount + 1]uint8
|
||||
@ -119,7 +119,7 @@ type huffmanBitWriter struct {
|
||||
// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
|
||||
//
|
||||
// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
|
||||
// optimal size and adding a penalty in 'logReusePenalty'.
|
||||
// optimal size and adding a penalty in 'logNewTablePenalty'.
|
||||
// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
|
||||
// is slower both for compression and decompression.
|
||||
|
||||
@ -349,6 +349,13 @@ func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
|
||||
int(w.codegenFreq[18])*7, numCodegens
|
||||
}
|
||||
|
||||
// dynamicSize returns the size of dynamically encoded data in bits.
|
||||
func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
|
||||
size = litEnc.bitLength(w.literalFreq[:]) +
|
||||
offEnc.bitLength(w.offsetFreq[:])
|
||||
return size
|
||||
}
|
||||
|
||||
// dynamicSize returns the size of dynamically encoded data in bits.
|
||||
func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
|
||||
header, numCodegens := w.headerSize()
|
||||
@ -451,12 +458,12 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n
|
||||
|
||||
i := 0
|
||||
for {
|
||||
var codeWord int = int(w.codegen[i])
|
||||
var codeWord = uint32(w.codegen[i])
|
||||
i++
|
||||
if codeWord == badCode {
|
||||
break
|
||||
}
|
||||
w.writeCode(w.codegenEncoding.codes[uint32(codeWord)])
|
||||
w.writeCode(w.codegenEncoding.codes[codeWord])
|
||||
|
||||
switch codeWord {
|
||||
case 16:
|
||||
@ -602,14 +609,14 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||
var size int
|
||||
// Check if we should reuse.
|
||||
if w.lastHeader > 0 {
|
||||
// Estimate size for using a new table
|
||||
// Estimate size for using a new table.
|
||||
// Use the previous header size as the best estimate.
|
||||
newSize := w.lastHeader + tokens.EstimatedBits()
|
||||
newSize += newSize >> w.logNewTablePenalty
|
||||
|
||||
// The estimated size is calculated as an optimal table.
|
||||
// We add a penalty to make it more realistic and re-use a bit more.
|
||||
newSize += newSize >> (w.logReusePenalty & 31)
|
||||
extra := w.extraBitSize()
|
||||
reuseSize, _ := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extra)
|
||||
reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + w.extraBitSize()
|
||||
|
||||
// Check if a new table is better.
|
||||
if newSize < reuseSize {
|
||||
@ -801,21 +808,30 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
}
|
||||
|
||||
// Add everything as literals
|
||||
estBits := histogramSize(input, w.literalFreq[:], !eof && !sync) + 15
|
||||
// We have to estimate the header size.
|
||||
// Assume header is around 70 bytes:
|
||||
// https://stackoverflow.com/a/25454430
|
||||
const guessHeaderSizeBits = 70 * 8
|
||||
estBits, estExtra := histogramSize(input, w.literalFreq[:], !eof && !sync)
|
||||
estBits += w.lastHeader + 15
|
||||
if w.lastHeader == 0 {
|
||||
estBits += guessHeaderSizeBits
|
||||
}
|
||||
estBits += estBits >> w.logNewTablePenalty
|
||||
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
ssize, storable := w.storedSize(input)
|
||||
if storable && ssize < (estBits+estBits>>4) {
|
||||
if storable && ssize < estBits {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
|
||||
if w.lastHeader > 0 {
|
||||
size, _ := w.dynamicSize(w.literalEncoding, huffOffset, w.lastHeader)
|
||||
estBits += estBits >> (w.logReusePenalty)
|
||||
reuseSize := w.literalEncoding.bitLength(w.literalFreq[:256])
|
||||
estBits += estExtra
|
||||
|
||||
if estBits < size {
|
||||
if estBits < reuseSize {
|
||||
// We owe an EOB
|
||||
w.writeCode(w.literalEncoding.codes[endBlockMarker])
|
||||
w.lastHeader = 0
|
||||
|
76
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
76
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
@ -7,7 +7,6 @@ package flate
|
||||
import (
|
||||
"math"
|
||||
"math/bits"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -25,8 +24,6 @@ type huffmanEncoder struct {
|
||||
codes []hcode
|
||||
freqcache []literalNode
|
||||
bitCount [17]int32
|
||||
lns byLiteral // stored to avoid repeated allocation in generate
|
||||
lfs byFreq // stored to avoid repeated allocation in generate
|
||||
}
|
||||
|
||||
type literalNode struct {
|
||||
@ -270,7 +267,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
|
||||
// assigned in literal order (not frequency order).
|
||||
chunk := list[len(list)-int(bits):]
|
||||
|
||||
h.lns.sort(chunk)
|
||||
sortByLiteral(chunk)
|
||||
for _, node := range chunk {
|
||||
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
|
||||
code++
|
||||
@ -315,7 +312,7 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
|
||||
}
|
||||
return
|
||||
}
|
||||
h.lfs.sort(list)
|
||||
sortByFreq(list)
|
||||
|
||||
// Get the number of literals for each bit count
|
||||
bitCount := h.bitCounts(list, maxBits)
|
||||
@ -323,59 +320,44 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
|
||||
h.assignEncodingAndSize(bitCount, list)
|
||||
}
|
||||
|
||||
type byLiteral []literalNode
|
||||
|
||||
func (s *byLiteral) sort(a []literalNode) {
|
||||
*s = byLiteral(a)
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s byLiteral) Len() int { return len(s) }
|
||||
|
||||
func (s byLiteral) Less(i, j int) bool {
|
||||
return s[i].literal < s[j].literal
|
||||
}
|
||||
|
||||
func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
type byFreq []literalNode
|
||||
|
||||
func (s *byFreq) sort(a []literalNode) {
|
||||
*s = byFreq(a)
|
||||
sort.Sort(s)
|
||||
}
|
||||
|
||||
func (s byFreq) Len() int { return len(s) }
|
||||
|
||||
func (s byFreq) Less(i, j int) bool {
|
||||
if s[i].freq == s[j].freq {
|
||||
return s[i].literal < s[j].literal
|
||||
func atLeastOne(v float32) float32 {
|
||||
if v < 1 {
|
||||
return 1
|
||||
}
|
||||
return s[i].freq < s[j].freq
|
||||
return v
|
||||
}
|
||||
|
||||
func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// histogramSize accumulates a histogram of b in h.
|
||||
// An estimated size in bits is returned.
|
||||
// Unassigned values are assigned '1' in the histogram.
|
||||
// len(h) must be >= 256, and h's elements must be all zeroes.
|
||||
func histogramSize(b []byte, h []uint16, fill bool) int {
|
||||
func histogramSize(b []byte, h []uint16, fill bool) (int, int) {
|
||||
h = h[:256]
|
||||
for _, t := range b {
|
||||
h[t]++
|
||||
}
|
||||
invTotal := 1.0 / float64(len(b))
|
||||
shannon := 0.0
|
||||
single := math.Ceil(-math.Log2(invTotal))
|
||||
for i, v := range h[:] {
|
||||
if v > 0 {
|
||||
n := float64(v)
|
||||
shannon += math.Ceil(-math.Log2(n*invTotal) * n)
|
||||
} else if fill {
|
||||
shannon += single
|
||||
h[i] = 1
|
||||
invTotal := 1.0 / float32(len(b))
|
||||
shannon := float32(0.0)
|
||||
var extra float32
|
||||
if fill {
|
||||
oneBits := atLeastOne(-mFastLog2(invTotal))
|
||||
for i, v := range h[:] {
|
||||
if v > 0 {
|
||||
n := float32(v)
|
||||
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
|
||||
} else {
|
||||
h[i] = 1
|
||||
extra += oneBits
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, v := range h[:] {
|
||||
if v > 0 {
|
||||
n := float32(v)
|
||||
shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
|
||||
}
|
||||
}
|
||||
}
|
||||
return int(shannon + 0.99)
|
||||
|
||||
return int(shannon + 0.99), int(extra + 0.99)
|
||||
}
|
||||
|
178
vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
generated
vendored
Normal file
178
vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
generated
vendored
Normal file
@ -0,0 +1,178 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// Sort sorts data.
|
||||
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
|
||||
// data.Less and data.Swap. The sort is not guaranteed to be stable.
|
||||
func sortByFreq(data []literalNode) {
|
||||
n := len(data)
|
||||
quickSortByFreq(data, 0, n, maxDepth(n))
|
||||
}
|
||||
|
||||
func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
|
||||
for b-a > 12 { // Use ShellSort for slices <= 12 elements
|
||||
if maxDepth == 0 {
|
||||
heapSort(data, a, b)
|
||||
return
|
||||
}
|
||||
maxDepth--
|
||||
mlo, mhi := doPivotByFreq(data, a, b)
|
||||
// Avoiding recursion on the larger subproblem guarantees
|
||||
// a stack depth of at most lg(b-a).
|
||||
if mlo-a < b-mhi {
|
||||
quickSortByFreq(data, a, mlo, maxDepth)
|
||||
a = mhi // i.e., quickSortByFreq(data, mhi, b)
|
||||
} else {
|
||||
quickSortByFreq(data, mhi, b, maxDepth)
|
||||
b = mlo // i.e., quickSortByFreq(data, a, mlo)
|
||||
}
|
||||
}
|
||||
if b-a > 1 {
|
||||
// Do ShellSort pass with gap 6
|
||||
// It could be written in this simplified form cause b-a <= 12
|
||||
for i := a + 6; i < b; i++ {
|
||||
if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
|
||||
data[i], data[i-6] = data[i-6], data[i]
|
||||
}
|
||||
}
|
||||
insertionSortByFreq(data, a, b)
|
||||
}
|
||||
}
|
||||
|
||||
// siftDownByFreq implements the heap property on data[lo, hi).
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDownByFreq(data []literalNode, lo, hi, first int) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && (data[first+child].freq == data[first+child+1].freq && data[first+child].literal < data[first+child+1].literal || data[first+child].freq < data[first+child+1].freq) {
|
||||
child++
|
||||
}
|
||||
if data[first+root].freq == data[first+child].freq && data[first+root].literal > data[first+child].literal || data[first+root].freq > data[first+child].freq {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
root = child
|
||||
}
|
||||
}
|
||||
func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
|
||||
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
|
||||
if hi-lo > 40 {
|
||||
// Tukey's ``Ninther,'' median of three medians of three.
|
||||
s := (hi - lo) / 8
|
||||
medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
|
||||
medianOfThreeSortByFreq(data, m, m-s, m+s)
|
||||
medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
|
||||
}
|
||||
medianOfThreeSortByFreq(data, lo, m, hi-1)
|
||||
|
||||
// Invariants are:
|
||||
// data[lo] = pivot (set up by ChoosePivot)
|
||||
// data[lo < i < a] < pivot
|
||||
// data[a <= i < b] <= pivot
|
||||
// data[b <= i < c] unexamined
|
||||
// data[c <= i < hi-1] > pivot
|
||||
// data[hi-1] >= pivot
|
||||
pivot := lo
|
||||
a, c := lo+1, hi-1
|
||||
|
||||
for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
|
||||
}
|
||||
b := a
|
||||
for {
|
||||
for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
|
||||
}
|
||||
for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
|
||||
}
|
||||
if b >= c {
|
||||
break
|
||||
}
|
||||
// data[b] > pivot; data[c-1] <= pivot
|
||||
data[b], data[c-1] = data[c-1], data[b]
|
||||
b++
|
||||
c--
|
||||
}
|
||||
// If hi-c<3 then there are duplicates (by property of median of nine).
|
||||
// Let's be a bit more conservative, and set border to 5.
|
||||
protect := hi-c < 5
|
||||
if !protect && hi-c < (hi-lo)/4 {
|
||||
// Lets test some points for equality to pivot
|
||||
dups := 0
|
||||
if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
|
||||
data[c], data[hi-1] = data[hi-1], data[c]
|
||||
c++
|
||||
dups++
|
||||
}
|
||||
if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
|
||||
b--
|
||||
dups++
|
||||
}
|
||||
// m-lo = (hi-lo)/2 > 6
|
||||
// b-lo > (hi-lo)*3/4-1 > 8
|
||||
// ==> m < b ==> data[m] <= pivot
|
||||
if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
|
||||
data[m], data[b-1] = data[b-1], data[m]
|
||||
b--
|
||||
dups++
|
||||
}
|
||||
// if at least 2 points are equal to pivot, assume skewed distribution
|
||||
protect = dups > 1
|
||||
}
|
||||
if protect {
|
||||
// Protect against a lot of duplicates
|
||||
// Add invariant:
|
||||
// data[a <= i < b] unexamined
|
||||
// data[b <= i < c] = pivot
|
||||
for {
|
||||
for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
|
||||
}
|
||||
for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
|
||||
}
|
||||
if a >= b {
|
||||
break
|
||||
}
|
||||
// data[a] == pivot; data[b-1] < pivot
|
||||
data[a], data[b-1] = data[b-1], data[a]
|
||||
a++
|
||||
b--
|
||||
}
|
||||
}
|
||||
// Swap pivot into middle
|
||||
data[pivot], data[b-1] = data[b-1], data[pivot]
|
||||
return b - 1, c
|
||||
}
|
||||
|
||||
// Insertion sort
|
||||
func insertionSortByFreq(data []literalNode, a, b int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// quickSortByFreq, loosely following Bentley and McIlroy,
|
||||
// ``Engineering a Sort Function,'' SP&E November 1993.
|
||||
|
||||
// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
|
||||
func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
|
||||
// sort 3 elements
|
||||
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
|
||||
data[m1], data[m0] = data[m0], data[m1]
|
||||
}
|
||||
// data[m0] <= data[m1]
|
||||
if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
|
||||
data[m2], data[m1] = data[m1], data[m2]
|
||||
// data[m0] <= data[m2] && data[m1] < data[m2]
|
||||
if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
|
||||
data[m1], data[m0] = data[m0], data[m1]
|
||||
}
|
||||
}
|
||||
// now data[m0] <= data[m1] <= data[m2]
|
||||
}
|
201
vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
generated
vendored
Normal file
201
vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// Sort sorts data.
|
||||
// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
|
||||
// data.Less and data.Swap. The sort is not guaranteed to be stable.
|
||||
func sortByLiteral(data []literalNode) {
|
||||
n := len(data)
|
||||
quickSort(data, 0, n, maxDepth(n))
|
||||
}
|
||||
|
||||
func quickSort(data []literalNode, a, b, maxDepth int) {
|
||||
for b-a > 12 { // Use ShellSort for slices <= 12 elements
|
||||
if maxDepth == 0 {
|
||||
heapSort(data, a, b)
|
||||
return
|
||||
}
|
||||
maxDepth--
|
||||
mlo, mhi := doPivot(data, a, b)
|
||||
// Avoiding recursion on the larger subproblem guarantees
|
||||
// a stack depth of at most lg(b-a).
|
||||
if mlo-a < b-mhi {
|
||||
quickSort(data, a, mlo, maxDepth)
|
||||
a = mhi // i.e., quickSort(data, mhi, b)
|
||||
} else {
|
||||
quickSort(data, mhi, b, maxDepth)
|
||||
b = mlo // i.e., quickSort(data, a, mlo)
|
||||
}
|
||||
}
|
||||
if b-a > 1 {
|
||||
// Do ShellSort pass with gap 6
|
||||
// It could be written in this simplified form cause b-a <= 12
|
||||
for i := a + 6; i < b; i++ {
|
||||
if data[i].literal < data[i-6].literal {
|
||||
data[i], data[i-6] = data[i-6], data[i]
|
||||
}
|
||||
}
|
||||
insertionSort(data, a, b)
|
||||
}
|
||||
}
|
||||
func heapSort(data []literalNode, a, b int) {
|
||||
first := a
|
||||
lo := 0
|
||||
hi := b - a
|
||||
|
||||
// Build heap with greatest element at top.
|
||||
for i := (hi - 1) / 2; i >= 0; i-- {
|
||||
siftDown(data, i, hi, first)
|
||||
}
|
||||
|
||||
// Pop elements, largest first, into end of data.
|
||||
for i := hi - 1; i >= 0; i-- {
|
||||
data[first], data[first+i] = data[first+i], data[first]
|
||||
siftDown(data, lo, i, first)
|
||||
}
|
||||
}
|
||||
|
||||
// siftDown implements the heap property on data[lo, hi).
|
||||
// first is an offset into the array where the root of the heap lies.
|
||||
func siftDown(data []literalNode, lo, hi, first int) {
|
||||
root := lo
|
||||
for {
|
||||
child := 2*root + 1
|
||||
if child >= hi {
|
||||
break
|
||||
}
|
||||
if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
|
||||
child++
|
||||
}
|
||||
if data[first+root].literal > data[first+child].literal {
|
||||
return
|
||||
}
|
||||
data[first+root], data[first+child] = data[first+child], data[first+root]
|
||||
root = child
|
||||
}
|
||||
}
|
||||
func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
|
||||
m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
|
||||
if hi-lo > 40 {
|
||||
// Tukey's ``Ninther,'' median of three medians of three.
|
||||
s := (hi - lo) / 8
|
||||
medianOfThree(data, lo, lo+s, lo+2*s)
|
||||
medianOfThree(data, m, m-s, m+s)
|
||||
medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
|
||||
}
|
||||
medianOfThree(data, lo, m, hi-1)
|
||||
|
||||
// Invariants are:
|
||||
// data[lo] = pivot (set up by ChoosePivot)
|
||||
// data[lo < i < a] < pivot
|
||||
// data[a <= i < b] <= pivot
|
||||
// data[b <= i < c] unexamined
|
||||
// data[c <= i < hi-1] > pivot
|
||||
// data[hi-1] >= pivot
|
||||
pivot := lo
|
||||
a, c := lo+1, hi-1
|
||||
|
||||
for ; a < c && data[a].literal < data[pivot].literal; a++ {
|
||||
}
|
||||
b := a
|
||||
for {
|
||||
for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
|
||||
}
|
||||
for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
|
||||
}
|
||||
if b >= c {
|
||||
break
|
||||
}
|
||||
// data[b] > pivot; data[c-1] <= pivot
|
||||
data[b], data[c-1] = data[c-1], data[b]
|
||||
b++
|
||||
c--
|
||||
}
|
||||
// If hi-c<3 then there are duplicates (by property of median of nine).
|
||||
// Let's be a bit more conservative, and set border to 5.
|
||||
protect := hi-c < 5
|
||||
if !protect && hi-c < (hi-lo)/4 {
|
||||
// Lets test some points for equality to pivot
|
||||
dups := 0
|
||||
if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
|
||||
data[c], data[hi-1] = data[hi-1], data[c]
|
||||
c++
|
||||
dups++
|
||||
}
|
||||
if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
|
||||
b--
|
||||
dups++
|
||||
}
|
||||
// m-lo = (hi-lo)/2 > 6
|
||||
// b-lo > (hi-lo)*3/4-1 > 8
|
||||
// ==> m < b ==> data[m] <= pivot
|
||||
if data[m].literal > data[pivot].literal { // data[m] = pivot
|
||||
data[m], data[b-1] = data[b-1], data[m]
|
||||
b--
|
||||
dups++
|
||||
}
|
||||
// if at least 2 points are equal to pivot, assume skewed distribution
|
||||
protect = dups > 1
|
||||
}
|
||||
if protect {
|
||||
// Protect against a lot of duplicates
|
||||
// Add invariant:
|
||||
// data[a <= i < b] unexamined
|
||||
// data[b <= i < c] = pivot
|
||||
for {
|
||||
for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
|
||||
}
|
||||
for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
|
||||
}
|
||||
if a >= b {
|
||||
break
|
||||
}
|
||||
// data[a] == pivot; data[b-1] < pivot
|
||||
data[a], data[b-1] = data[b-1], data[a]
|
||||
a++
|
||||
b--
|
||||
}
|
||||
}
|
||||
// Swap pivot into middle
|
||||
data[pivot], data[b-1] = data[b-1], data[pivot]
|
||||
return b - 1, c
|
||||
}
|
||||
|
||||
// Insertion sort
|
||||
func insertionSort(data []literalNode, a, b int) {
|
||||
for i := a + 1; i < b; i++ {
|
||||
for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
|
||||
data[j], data[j-1] = data[j-1], data[j]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// maxDepth returns a threshold at which quicksort should switch
|
||||
// to heapsort. It returns 2*ceil(lg(n+1)).
|
||||
func maxDepth(n int) int {
|
||||
var depth int
|
||||
for i := n; i > 0; i >>= 1 {
|
||||
depth++
|
||||
}
|
||||
return depth * 2
|
||||
}
|
||||
|
||||
// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
|
||||
func medianOfThree(data []literalNode, m1, m0, m2 int) {
|
||||
// sort 3 elements
|
||||
if data[m1].literal < data[m0].literal {
|
||||
data[m1], data[m0] = data[m0], data[m1]
|
||||
}
|
||||
// data[m0] <= data[m1]
|
||||
if data[m2].literal < data[m1].literal {
|
||||
data[m2], data[m1] = data[m1], data[m2]
|
||||
// data[m0] <= data[m2] && data[m1] < data[m2]
|
||||
if data[m1].literal < data[m0].literal {
|
||||
data[m1], data[m0] = data[m0], data[m1]
|
||||
}
|
||||
}
|
||||
// now data[m0] <= data[m1] <= data[m2]
|
||||
}
|
42
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
42
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
@ -184,9 +184,7 @@ func (t *tokens) indexTokens(in []token) {
|
||||
t.Reset()
|
||||
for _, tok := range in {
|
||||
if tok < matchType {
|
||||
t.tokens[t.n] = tok
|
||||
t.litHist[tok]++
|
||||
t.n++
|
||||
t.AddLiteral(tok.literal())
|
||||
continue
|
||||
}
|
||||
t.AddMatch(uint32(tok.length()), tok.offset())
|
||||
@ -211,43 +209,53 @@ func (t *tokens) AddLiteral(lit byte) {
|
||||
t.nLits++
|
||||
}
|
||||
|
||||
// from https://stackoverflow.com/a/28730362
|
||||
func mFastLog2(val float32) float32 {
|
||||
ux := int32(math.Float32bits(val))
|
||||
log2 := (float32)(((ux >> 23) & 255) - 128)
|
||||
ux &= -0x7f800001
|
||||
ux += 127 << 23
|
||||
uval := math.Float32frombits(uint32(ux))
|
||||
log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
|
||||
return log2
|
||||
}
|
||||
|
||||
// EstimatedBits will return an minimum size estimated by an *optimal*
|
||||
// compression of the block.
|
||||
// The size of the block
|
||||
func (t *tokens) EstimatedBits() int {
|
||||
shannon := float64(0)
|
||||
shannon := float32(0)
|
||||
bits := int(0)
|
||||
nMatches := 0
|
||||
if t.nLits > 0 {
|
||||
invTotal := 1.0 / float64(t.nLits)
|
||||
invTotal := 1.0 / float32(t.nLits)
|
||||
for _, v := range t.litHist[:] {
|
||||
if v > 0 {
|
||||
n := float64(v)
|
||||
shannon += math.Ceil(-math.Log2(n*invTotal) * n)
|
||||
n := float32(v)
|
||||
shannon += -mFastLog2(n*invTotal) * n
|
||||
}
|
||||
}
|
||||
// Just add 15 for EOB
|
||||
shannon += 15
|
||||
for _, v := range t.extraHist[1 : literalCount-256] {
|
||||
for i, v := range t.extraHist[1 : literalCount-256] {
|
||||
if v > 0 {
|
||||
n := float64(v)
|
||||
shannon += math.Ceil(-math.Log2(n*invTotal) * n)
|
||||
bits += int(lengthExtraBits[v&31]) * int(v)
|
||||
n := float32(v)
|
||||
shannon += -mFastLog2(n*invTotal) * n
|
||||
bits += int(lengthExtraBits[i&31]) * int(v)
|
||||
nMatches += int(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
if nMatches > 0 {
|
||||
invTotal := 1.0 / float64(nMatches)
|
||||
for _, v := range t.offHist[:offsetCodeCount] {
|
||||
invTotal := 1.0 / float32(nMatches)
|
||||
for i, v := range t.offHist[:offsetCodeCount] {
|
||||
if v > 0 {
|
||||
n := float64(v)
|
||||
shannon += math.Ceil(-math.Log2(n*invTotal) * n)
|
||||
bits += int(offsetExtraBits[v&31]) * int(n)
|
||||
n := float32(v)
|
||||
shannon += -mFastLog2(n*invTotal) * n
|
||||
bits += int(offsetExtraBits[i&31]) * int(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return int(shannon) + bits
|
||||
}
|
||||
|
||||
|
13
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
13
vendor/github.com/klauspost/compress/huff0/bitwriter.go
generated
vendored
@ -38,7 +38,7 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
|
||||
b.nBits += bits
|
||||
}
|
||||
|
||||
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
|
||||
// encSymbol will add up to 16 bits. value may not contain more set bits than indicated.
|
||||
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
||||
func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
|
||||
enc := ct[symbol]
|
||||
@ -46,6 +46,17 @@ func (b *bitWriter) encSymbol(ct cTable, symbol byte) {
|
||||
b.nBits += enc.nBits
|
||||
}
|
||||
|
||||
// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated.
|
||||
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
|
||||
func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
|
||||
encA := ct[av]
|
||||
encB := ct[bv]
|
||||
sh := b.nBits & 63
|
||||
combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63))
|
||||
b.bitContainer |= combined << sh
|
||||
b.nBits += encA.nBits + encB.nBits
|
||||
}
|
||||
|
||||
// addBits16ZeroNC will add up to 16 bits.
|
||||
// It will not check if there is space for them,
|
||||
// so the caller must ensure that it has flushed recently.
|
||||
|
70
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
70
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@ -80,9 +80,12 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
|
||||
|
||||
if s.Reuse == ReusePolicyPrefer && canReuse {
|
||||
keepTable := s.cTable
|
||||
keepTL := s.actualTableLog
|
||||
s.cTable = s.prevTable
|
||||
s.actualTableLog = s.prevTableLog
|
||||
s.Out, err = compressor(in)
|
||||
s.cTable = keepTable
|
||||
s.actualTableLog = keepTL
|
||||
if err == nil && len(s.Out) < wantSize {
|
||||
s.OutData = s.Out
|
||||
return s.Out, true, nil
|
||||
@ -92,7 +95,6 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// Calculate new table.
|
||||
s.optimalTableLog()
|
||||
err = s.buildCTable()
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@ -109,9 +111,15 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
|
||||
if oldSize <= hSize+newSize || hSize+12 >= wantSize {
|
||||
// Retain cTable even if we re-use.
|
||||
keepTable := s.cTable
|
||||
keepTL := s.actualTableLog
|
||||
|
||||
s.cTable = s.prevTable
|
||||
s.actualTableLog = s.prevTableLog
|
||||
s.Out, err = compressor(in)
|
||||
|
||||
// Restore ctable.
|
||||
s.cTable = keepTable
|
||||
s.actualTableLog = keepTL
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@ -142,7 +150,7 @@ func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)
|
||||
return nil, false, ErrIncompressible
|
||||
}
|
||||
// Move current table into previous.
|
||||
s.prevTable, s.cTable = s.cTable, s.prevTable[:0]
|
||||
s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0]
|
||||
s.OutData = s.Out[len(s.OutTable):]
|
||||
return s.Out, false, nil
|
||||
}
|
||||
@ -163,28 +171,23 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
|
||||
for i := len(src) & 3; i > 0; i-- {
|
||||
bw.encSymbol(cTable, src[n+i-1])
|
||||
}
|
||||
n -= 4
|
||||
if s.actualTableLog <= 8 {
|
||||
n -= 4
|
||||
for ; n >= 0; n -= 4 {
|
||||
tmp := src[n : n+4]
|
||||
// tmp should be len 4
|
||||
bw.flush32()
|
||||
bw.encSymbol(cTable, tmp[3])
|
||||
bw.encSymbol(cTable, tmp[2])
|
||||
bw.encSymbol(cTable, tmp[1])
|
||||
bw.encSymbol(cTable, tmp[0])
|
||||
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
|
||||
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
||||
}
|
||||
} else {
|
||||
n -= 4
|
||||
for ; n >= 0; n -= 4 {
|
||||
tmp := src[n : n+4]
|
||||
// tmp should be len 4
|
||||
bw.flush32()
|
||||
bw.encSymbol(cTable, tmp[3])
|
||||
bw.encSymbol(cTable, tmp[2])
|
||||
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
|
||||
bw.flush32()
|
||||
bw.encSymbol(cTable, tmp[1])
|
||||
bw.encSymbol(cTable, tmp[0])
|
||||
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
|
||||
}
|
||||
}
|
||||
err := bw.close()
|
||||
@ -322,9 +325,26 @@ func (s *Scratch) canUseTable(c cTable) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Scratch) validateTable(c cTable) bool {
|
||||
if len(c) < int(s.symbolLen) {
|
||||
return false
|
||||
}
|
||||
for i, v := range s.count[:s.symbolLen] {
|
||||
if v != 0 {
|
||||
if c[i].nBits == 0 {
|
||||
return false
|
||||
}
|
||||
if c[i].nBits > s.actualTableLog {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// minTableLog provides the minimum logSize to safely represent a distribution.
|
||||
func (s *Scratch) minTableLog() uint8 {
|
||||
minBitsSrc := highBit32(uint32(s.br.remain()-1)) + 1
|
||||
minBitsSrc := highBit32(uint32(s.br.remain())) + 1
|
||||
minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
|
||||
if minBitsSrc < minBitsSymbols {
|
||||
return uint8(minBitsSrc)
|
||||
@ -336,7 +356,7 @@ func (s *Scratch) minTableLog() uint8 {
|
||||
func (s *Scratch) optimalTableLog() {
|
||||
tableLog := s.TableLog
|
||||
minBits := s.minTableLog()
|
||||
maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 2
|
||||
maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
|
||||
if maxBitsSrc < tableLog {
|
||||
// Accuracy can be reduced
|
||||
tableLog = maxBitsSrc
|
||||
@ -363,6 +383,7 @@ type cTableEntry struct {
|
||||
const huffNodesMask = huffNodesLen - 1
|
||||
|
||||
func (s *Scratch) buildCTable() error {
|
||||
s.optimalTableLog()
|
||||
s.huffSort()
|
||||
if cap(s.cTable) < maxSymbolValue+1 {
|
||||
s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1)
|
||||
@ -439,7 +460,7 @@ func (s *Scratch) buildCTable() error {
|
||||
return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax)
|
||||
}
|
||||
var nbPerRank [tableLogMax + 1]uint16
|
||||
var valPerRank [tableLogMax + 1]uint16
|
||||
var valPerRank [16]uint16
|
||||
for _, v := range huffNode[:nonNullRank+1] {
|
||||
nbPerRank[v.nbBits]++
|
||||
}
|
||||
@ -455,16 +476,17 @@ func (s *Scratch) buildCTable() error {
|
||||
}
|
||||
|
||||
// push nbBits per symbol, symbol order
|
||||
// TODO: changed `s.symbolLen` -> `nonNullRank+1` (micro-opt)
|
||||
for _, v := range huffNode[:nonNullRank+1] {
|
||||
s.cTable[v.symbol].nBits = v.nbBits
|
||||
}
|
||||
|
||||
// assign value within rank, symbol order
|
||||
for n, val := range s.cTable[:s.symbolLen] {
|
||||
v := valPerRank[val.nBits]
|
||||
s.cTable[n].val = v
|
||||
valPerRank[val.nBits] = v + 1
|
||||
t := s.cTable[:s.symbolLen]
|
||||
for n, val := range t {
|
||||
nbits := val.nBits & 15
|
||||
v := valPerRank[nbits]
|
||||
t[n].val = v
|
||||
valPerRank[nbits] = v + 1
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -488,10 +510,12 @@ func (s *Scratch) huffSort() {
|
||||
r := highBit32(v+1) & 31
|
||||
rank[r].base++
|
||||
}
|
||||
for n := 30; n > 0; n-- {
|
||||
// maxBitLength is log2(BlockSizeMax) + 1
|
||||
const maxBitLength = 18 + 1
|
||||
for n := maxBitLength; n > 0; n-- {
|
||||
rank[n-1].base += rank[n].base
|
||||
}
|
||||
for n := range rank[:] {
|
||||
for n := range rank[:maxBitLength] {
|
||||
rank[n].current = rank[n].base
|
||||
}
|
||||
for n, c := range s.count[:s.symbolLen] {
|
||||
@ -510,7 +534,7 @@ func (s *Scratch) huffSort() {
|
||||
}
|
||||
|
||||
func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
||||
maxNbBits := s.TableLog
|
||||
maxNbBits := s.actualTableLog
|
||||
huffNode := s.nodes[1 : huffNodesLen+1]
|
||||
//huffNode = huffNode[: huffNodesLen]
|
||||
|
||||
|
7
vendor/github.com/klauspost/compress/huff0/huff0.go
generated
vendored
7
vendor/github.com/klauspost/compress/huff0/huff0.go
generated
vendored
@ -83,7 +83,7 @@ type Scratch struct {
|
||||
MaxSymbolValue uint8
|
||||
|
||||
// TableLog will attempt to override the tablelog for the next block.
|
||||
// Must be <= 11.
|
||||
// Must be <= 11 and >= 5.
|
||||
TableLog uint8
|
||||
|
||||
// Reuse will specify the reuse policy
|
||||
@ -105,6 +105,7 @@ type Scratch struct {
|
||||
maxCount int // count of the most probable symbol
|
||||
clearCount bool // clear count
|
||||
actualTableLog uint8 // Selected tablelog.
|
||||
prevTableLog uint8 // Tablelog for previous table
|
||||
prevTable cTable // Table used for previous compression.
|
||||
cTable cTable // compression table
|
||||
dt dTable // decompression table
|
||||
@ -127,8 +128,8 @@ func (s *Scratch) prepare(in []byte) (*Scratch, error) {
|
||||
if s.TableLog == 0 {
|
||||
s.TableLog = tableLogDefault
|
||||
}
|
||||
if s.TableLog > tableLogMax {
|
||||
return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, tableLogMax)
|
||||
if s.TableLog > tableLogMax || s.TableLog < minTablelog {
|
||||
return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax)
|
||||
}
|
||||
if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax {
|
||||
s.MaxDecodedSize = BlockSizeMax
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@ -36,7 +36,7 @@ so as always, testing is recommended.
|
||||
For now, a high speed (fastest) and medium-fast (default) compressor has been implemented.
|
||||
|
||||
The "Fastest" compression ratio is roughly equivalent to zstd level 1.
|
||||
The "Default" compression ration is roughly equivalent to zstd level 3 (default).
|
||||
The "Default" compression ratio is roughly equivalent to zstd level 3 (default).
|
||||
|
||||
In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode.
|
||||
The compression ratio compared to stdlib is around level 3, but usually 3x as fast.
|
||||
@ -390,4 +390,4 @@ For sending files for reproducing errors use a service like [goobox](https://goo
|
||||
|
||||
For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan).
|
||||
|
||||
This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
|
||||
This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare.
|
||||
|
33
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
33
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
@ -299,6 +299,20 @@ func (b *blockEnc) encodeRaw(a []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
// encodeRaw can be used to set the output to a raw representation of supplied bytes.
|
||||
func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {
|
||||
var bh blockHeader
|
||||
bh.setLast(b.last)
|
||||
bh.setSize(uint32(len(src)))
|
||||
bh.setType(blockTypeRaw)
|
||||
dst = bh.appendTo(dst)
|
||||
dst = append(dst, src...)
|
||||
if debug {
|
||||
println("Adding RAW block, length", len(src))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// encodeLits can be used if the block is only litLen.
|
||||
func (b *blockEnc) encodeLits(raw bool) error {
|
||||
var bh blockHeader
|
||||
@ -324,18 +338,10 @@ func (b *blockEnc) encodeLits(raw bool) error {
|
||||
if len(b.literals) >= 1024 {
|
||||
// Use 4 Streams.
|
||||
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
|
||||
if len(out) > len(b.literals)-len(b.literals)>>4 {
|
||||
// Bail out of compression is too little.
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
} else if len(b.literals) > 32 {
|
||||
// Use 1 stream
|
||||
single = true
|
||||
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
|
||||
if len(out) > len(b.literals)-len(b.literals)>>4 {
|
||||
// Bail out of compression is too little.
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
} else {
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
@ -437,7 +443,7 @@ func fuzzFseEncoder(data []byte) int {
|
||||
return 1
|
||||
}
|
||||
|
||||
// encode will encode the block and put the output in b.output.
|
||||
// encode will encode the block and append the output in b.output.
|
||||
func (b *blockEnc) encode(raw bool) error {
|
||||
if len(b.sequences) == 0 {
|
||||
return b.encodeLits(raw)
|
||||
@ -451,6 +457,8 @@ func (b *blockEnc) encode(raw bool) error {
|
||||
var lh literalsHeader
|
||||
bh.setLast(b.last)
|
||||
bh.setType(blockTypeCompressed)
|
||||
// Store offset of the block header. Needed when we know the size.
|
||||
bhOffset := len(b.output)
|
||||
b.output = bh.appendTo(b.output)
|
||||
|
||||
var (
|
||||
@ -468,6 +476,7 @@ func (b *blockEnc) encode(raw bool) error {
|
||||
} else {
|
||||
err = huff0.ErrIncompressible
|
||||
}
|
||||
|
||||
switch err {
|
||||
case huff0.ErrIncompressible:
|
||||
lh.setType(literalsBlockRaw)
|
||||
@ -735,18 +744,18 @@ func (b *blockEnc) encode(raw bool) error {
|
||||
}
|
||||
b.output = wr.out
|
||||
|
||||
if len(b.output)-3 >= b.size {
|
||||
if len(b.output)-3-bhOffset >= b.size {
|
||||
// Maybe even add a bigger margin.
|
||||
b.litEnc.Reuse = huff0.ReusePolicyNone
|
||||
return errIncompressible
|
||||
}
|
||||
|
||||
// Size is output minus block header.
|
||||
bh.setSize(uint32(len(b.output)) - 3)
|
||||
bh.setSize(uint32(len(b.output)-bhOffset) - 3)
|
||||
if debug {
|
||||
println("Rewriting block header", bh)
|
||||
}
|
||||
_ = bh.appendTo(b.output[:0])
|
||||
_ = bh.appendTo(b.output[bhOffset:bhOffset])
|
||||
b.coders.setPrev(llEnc, mlEnc, ofEnc)
|
||||
return nil
|
||||
}
|
||||
|
29
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
29
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -388,6 +388,35 @@ func (d *Decoder) Close() {
|
||||
d.current.err = ErrDecoderClosed
|
||||
}
|
||||
|
||||
// IOReadCloser returns the decoder as an io.ReadCloser for convenience.
|
||||
// Any changes to the decoder will be reflected, so the returned ReadCloser
|
||||
// can be reused along with the decoder.
|
||||
// io.WriterTo is also supported by the returned ReadCloser.
|
||||
func (d *Decoder) IOReadCloser() io.ReadCloser {
|
||||
return closeWrapper{d: d}
|
||||
}
|
||||
|
||||
// closeWrapper wraps a function call as a closer.
|
||||
type closeWrapper struct {
|
||||
d *Decoder
|
||||
}
|
||||
|
||||
// WriteTo forwards WriteTo calls to the decoder.
|
||||
func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) {
|
||||
return c.d.WriteTo(w)
|
||||
}
|
||||
|
||||
// Read forwards read calls to the decoder.
|
||||
func (c closeWrapper) Read(p []byte) (n int, err error) {
|
||||
return c.d.Read(p)
|
||||
}
|
||||
|
||||
// Close closes the decoder.
|
||||
func (c closeWrapper) Close() error {
|
||||
c.d.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
type decodeOutput struct {
|
||||
d *blockDec
|
||||
b []byte
|
||||
|
313
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
313
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@ -411,3 +411,316 @@ encodeLoop:
|
||||
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeNoHist will encode a block with no history and no following blocks.
|
||||
// Most notable difference is that src will not be copied for history and
|
||||
// we do not need to check for max match length.
|
||||
func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||
const (
|
||||
// Input margin is the number of bytes we read (8)
|
||||
// and the maximum we will read ahead (2)
|
||||
inputMargin = 8 + 2
|
||||
minNonLiteralBlockSize = 16
|
||||
)
|
||||
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur > (1<<30)+e.maxMatchOff {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
for i := range e.longTable[:] {
|
||||
e.longTable[i] = tableEntry{}
|
||||
}
|
||||
e.cur = e.maxMatchOff
|
||||
}
|
||||
|
||||
s := int32(0)
|
||||
blk.size = len(src)
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
blk.extraLits = len(src)
|
||||
blk.literals = blk.literals[:len(src)]
|
||||
copy(blk.literals, src)
|
||||
return
|
||||
}
|
||||
|
||||
// Override src
|
||||
sLimit := int32(len(src)) - inputMargin
|
||||
// stepSize is the number of bytes to skip on every main loop iteration.
|
||||
// It should be >= 1.
|
||||
stepSize := int32(e.o.targetLength)
|
||||
if stepSize == 0 {
|
||||
stepSize++
|
||||
}
|
||||
|
||||
const kSearchStrength = 8
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := s
|
||||
cv := load6432(src, s)
|
||||
|
||||
// Relative offsets
|
||||
offset1 := int32(blk.recentOffsets[0])
|
||||
offset2 := int32(blk.recentOffsets[1])
|
||||
|
||||
addLiterals := func(s *seq, until int32) {
|
||||
if until == nextEmit {
|
||||
return
|
||||
}
|
||||
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
||||
s.litLen = uint32(until - nextEmit)
|
||||
}
|
||||
if debug {
|
||||
println("recent offsets:", blk.recentOffsets)
|
||||
}
|
||||
|
||||
encodeLoop:
|
||||
for {
|
||||
var t int32
|
||||
for {
|
||||
|
||||
nextHashS := hash5(cv, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
candidateS := e.table[nextHashS]
|
||||
|
||||
const repOff = 1
|
||||
repIndex := s - offset1 + repOff
|
||||
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
e.longTable[nextHashL] = entry
|
||||
e.table[nextHashS] = entry
|
||||
|
||||
if len(blk.sequences) > 2 {
|
||||
if load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
//length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
|
||||
length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:]))
|
||||
|
||||
seq.matchLen = uint32(length - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
start := s + repOff
|
||||
// We end the search early, so we don't risk 0 literals
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] {
|
||||
repIndex--
|
||||
start--
|
||||
seq.matchLen++
|
||||
}
|
||||
addLiterals(&seq, start)
|
||||
|
||||
// rep 0
|
||||
seq.offset = 1
|
||||
if debugSequences {
|
||||
println("repeat sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
s += length + repOff
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debug {
|
||||
println("repeat ended", s, length)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Find the offsets of our two matches.
|
||||
coffsetL := s - (candidateL.offset - e.cur)
|
||||
coffsetS := s - (candidateS.offset - e.cur)
|
||||
|
||||
// Check if we have a long match.
|
||||
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
|
||||
// Found a long match, likely at least 8 bytes.
|
||||
// Reference encoder checks all 8 bytes, we only check 4,
|
||||
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
|
||||
t = candidateL.offset - e.cur
|
||||
if debug && s <= t {
|
||||
panic("s <= t")
|
||||
}
|
||||
if debug && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debugMatches {
|
||||
println("long match")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Check if we have a short match.
|
||||
if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val {
|
||||
// found a regular match
|
||||
// See if we can find a long match at s+1
|
||||
const checkAt = 1
|
||||
cv := load6432(src, s+checkAt)
|
||||
nextHashL = hash8(cv, dFastLongTableBits)
|
||||
candidateL = e.longTable[nextHashL]
|
||||
coffsetL = s - (candidateL.offset - e.cur) + checkAt
|
||||
|
||||
// We can store it, since we have at least a 4 byte match.
|
||||
e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)}
|
||||
if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val {
|
||||
// Found a long match, likely at least 8 bytes.
|
||||
// Reference encoder checks all 8 bytes, we only check 4,
|
||||
// but the likelihood of both the first 4 bytes and the hash matching should be enough.
|
||||
t = candidateL.offset - e.cur
|
||||
s += checkAt
|
||||
if debugMatches {
|
||||
println("long match (after short)")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
t = candidateS.offset - e.cur
|
||||
if debug && s <= t {
|
||||
panic("s <= t")
|
||||
}
|
||||
if debug && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debug && t < 0 {
|
||||
panic("t<0")
|
||||
}
|
||||
if debugMatches {
|
||||
println("short match")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// No match found, move forward in input.
|
||||
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. Update recent offsets.
|
||||
// We'll later see if more than 4 bytes.
|
||||
offset2 = offset1
|
||||
offset1 = s - t
|
||||
|
||||
if debug && s <= t {
|
||||
panic("s <= t")
|
||||
}
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//l := e.matchlen(s+4, t+4, src) + 4
|
||||
l := int32(matchLen(src[s+4:], src[t+4:])) + 4
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
|
||||
// Write our sequence
|
||||
var seq seq
|
||||
seq.litLen = uint32(s - nextEmit)
|
||||
seq.matchLen = uint32(l - zstdMinMatch)
|
||||
if seq.litLen > 0 {
|
||||
blk.literals = append(blk.literals, src[nextEmit:s]...)
|
||||
}
|
||||
seq.offset = uint32(s-t) + 3
|
||||
s += l
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
|
||||
// Index match start+1 (long) and start+2 (short)
|
||||
index0 := s - l + 1
|
||||
// Index match end-2 (long) and end-1 (short)
|
||||
index1 := s - 2
|
||||
|
||||
cv0 := load6432(src, index0)
|
||||
cv1 := load6432(src, index1)
|
||||
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)}
|
||||
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)}
|
||||
e.longTable[hash8(cv0, dFastLongTableBits)] = te0
|
||||
e.longTable[hash8(cv1, dFastLongTableBits)] = te1
|
||||
cv0 >>= 8
|
||||
cv1 >>= 8
|
||||
te0.offset++
|
||||
te1.offset++
|
||||
te0.val = uint32(cv0)
|
||||
te1.val = uint32(cv1)
|
||||
e.table[hash5(cv0, dFastShortTableBits)] = te0
|
||||
e.table[hash5(cv1, dFastShortTableBits)] = te1
|
||||
|
||||
cv = load6432(src, s)
|
||||
|
||||
if len(blk.sequences) <= 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check offset 2
|
||||
for {
|
||||
o2 := s - offset2
|
||||
if load3232(src, o2) != uint32(cv) {
|
||||
// Do regular search
|
||||
break
|
||||
}
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHashS := hash5(cv1>>8, dFastShortTableBits)
|
||||
nextHashL := hash8(cv, dFastLongTableBits)
|
||||
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
//l := 4 + e.matchlen(s+4, o2+4, src)
|
||||
l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
|
||||
|
||||
entry := tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
e.longTable[nextHashL] = entry
|
||||
e.table[nextHashS] = entry
|
||||
seq.matchLen = uint32(l) - zstdMinMatch
|
||||
seq.litLen = 0
|
||||
|
||||
// Since litlen is always 0, this is offset 1.
|
||||
seq.offset = 1
|
||||
s += l
|
||||
nextEmit = s
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Swap offset 1 and 2.
|
||||
offset1, offset2 = offset2, offset1
|
||||
if s >= sLimit {
|
||||
// Finished
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
}
|
||||
|
||||
if int(nextEmit) < len(src) {
|
||||
blk.literals = append(blk.literals, src[nextEmit:]...)
|
||||
blk.extraLits = len(src) - int(nextEmit)
|
||||
}
|
||||
if debug {
|
||||
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
||||
}
|
||||
|
||||
}
|
||||
|
245
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
245
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@ -329,6 +329,246 @@ encodeLoop:
|
||||
}
|
||||
}
|
||||
|
||||
// EncodeNoHist will encode a block with no history and no following blocks.
|
||||
// Most notable difference is that src will not be copied for history and
|
||||
// we do not need to check for max match length.
|
||||
func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||
const (
|
||||
inputMargin = 8
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
if debug {
|
||||
if len(src) > maxBlockSize {
|
||||
panic("src too big")
|
||||
}
|
||||
}
|
||||
// Protect against e.cur wraparound.
|
||||
if e.cur > (1<<30)+e.maxMatchOff {
|
||||
for i := range e.table[:] {
|
||||
e.table[i] = tableEntry{}
|
||||
}
|
||||
e.cur = e.maxMatchOff
|
||||
}
|
||||
|
||||
s := int32(0)
|
||||
blk.size = len(src)
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
blk.extraLits = len(src)
|
||||
blk.literals = blk.literals[:len(src)]
|
||||
copy(blk.literals, src)
|
||||
return
|
||||
}
|
||||
|
||||
sLimit := int32(len(src)) - inputMargin
|
||||
// stepSize is the number of bytes to skip on every main loop iteration.
|
||||
// It should be >= 2.
|
||||
const stepSize = 2
|
||||
|
||||
// TEMPLATE
|
||||
const hashLog = tableBits
|
||||
// seems global, but would be nice to tweak.
|
||||
const kSearchStrength = 8
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := s
|
||||
cv := load6432(src, s)
|
||||
|
||||
// Relative offsets
|
||||
offset1 := int32(blk.recentOffsets[0])
|
||||
offset2 := int32(blk.recentOffsets[1])
|
||||
|
||||
addLiterals := func(s *seq, until int32) {
|
||||
if until == nextEmit {
|
||||
return
|
||||
}
|
||||
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
||||
s.litLen = uint32(until - nextEmit)
|
||||
}
|
||||
if debug {
|
||||
println("recent offsets:", blk.recentOffsets)
|
||||
}
|
||||
|
||||
encodeLoop:
|
||||
for {
|
||||
// t will contain the match offset when we find one.
|
||||
// When existing the search loop, we have already checked 4 bytes.
|
||||
var t int32
|
||||
|
||||
// We will not use repeat offsets across blocks.
|
||||
// By not using them for the first 3 matches
|
||||
|
||||
for {
|
||||
nextHash := hash6(cv, hashLog)
|
||||
nextHash2 := hash6(cv>>8, hashLog)
|
||||
candidate := e.table[nextHash]
|
||||
candidate2 := e.table[nextHash2]
|
||||
repIndex := s - offset1 + 2
|
||||
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)}
|
||||
|
||||
if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) {
|
||||
// Consider history as well.
|
||||
var seq seq
|
||||
// lenght := 4 + e.matchlen(s+6, repIndex+4, src)
|
||||
lenght := 4 + int32(matchLen(src[s+6:], src[repIndex+4:]))
|
||||
|
||||
seq.matchLen = uint32(lenght - zstdMinMatch)
|
||||
|
||||
// We might be able to match backwards.
|
||||
// Extend as long as we can.
|
||||
start := s + 2
|
||||
// We end the search early, so we don't risk 0 literals
|
||||
// and have to do special offset treatment.
|
||||
startLimit := nextEmit + 1
|
||||
|
||||
sMin := s - e.maxMatchOff
|
||||
if sMin < 0 {
|
||||
sMin = 0
|
||||
}
|
||||
for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] {
|
||||
repIndex--
|
||||
start--
|
||||
seq.matchLen++
|
||||
}
|
||||
addLiterals(&seq, start)
|
||||
|
||||
// rep 0
|
||||
seq.offset = 1
|
||||
if debugSequences {
|
||||
println("repeat sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
s += lenght + 2
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
if debug {
|
||||
println("repeat ended", s, lenght)
|
||||
|
||||
}
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
continue
|
||||
}
|
||||
coffset0 := s - (candidate.offset - e.cur)
|
||||
coffset1 := s - (candidate2.offset - e.cur) + 1
|
||||
if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val {
|
||||
// found a regular match
|
||||
t = candidate.offset - e.cur
|
||||
if debug && s <= t {
|
||||
panic("s <= t")
|
||||
}
|
||||
if debug && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val {
|
||||
// found a regular match
|
||||
t = candidate2.offset - e.cur
|
||||
s++
|
||||
if debug && s <= t {
|
||||
panic("s <= t")
|
||||
}
|
||||
if debug && s-t > e.maxMatchOff {
|
||||
panic("s - t >e.maxMatchOff")
|
||||
}
|
||||
if debug && t < 0 {
|
||||
panic("t<0")
|
||||
}
|
||||
break
|
||||
}
|
||||
s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1))
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes.
|
||||
offset2 = offset1
|
||||
offset1 = s - t
|
||||
|
||||
if debug && s <= t {
|
||||
panic("s <= t")
|
||||
}
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//l := e.matchlenNoHist(s+4, t+4, src) + 4
|
||||
l := int32(matchLen(src[s+4:], src[t+4:])) + 4
|
||||
|
||||
// Extend backwards
|
||||
tMin := s - e.maxMatchOff
|
||||
if tMin < 0 {
|
||||
tMin = 0
|
||||
}
|
||||
for t > tMin && s > nextEmit && src[t-1] == src[s-1] {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
}
|
||||
|
||||
// Write our sequence.
|
||||
var seq seq
|
||||
seq.litLen = uint32(s - nextEmit)
|
||||
seq.matchLen = uint32(l - zstdMinMatch)
|
||||
if seq.litLen > 0 {
|
||||
blk.literals = append(blk.literals, src[nextEmit:s]...)
|
||||
}
|
||||
// Don't use repeat offsets
|
||||
seq.offset = uint32(s-t) + 3
|
||||
s += l
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
cv = load6432(src, s)
|
||||
|
||||
// Check offset 2
|
||||
if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) {
|
||||
// We have at least 4 byte match.
|
||||
// No need to check backwards. We come straight from a match
|
||||
//l := 4 + e.matchlenNoHist(s+4, o2+4, src)
|
||||
l := 4 + int32(matchLen(src[s+4:], src[o2+4:]))
|
||||
|
||||
// Store this, since we have it.
|
||||
nextHash := hash6(cv, hashLog)
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)}
|
||||
seq.matchLen = uint32(l) - zstdMinMatch
|
||||
seq.litLen = 0
|
||||
// Since litlen is always 0, this is offset 1.
|
||||
seq.offset = 1
|
||||
s += l
|
||||
nextEmit = s
|
||||
if debugSequences {
|
||||
println("sequence", seq, "next s:", s)
|
||||
}
|
||||
blk.sequences = append(blk.sequences, seq)
|
||||
|
||||
// Swap offset 1 and 2.
|
||||
offset1, offset2 = offset2, offset1
|
||||
if s >= sLimit {
|
||||
break encodeLoop
|
||||
}
|
||||
// Prepare next loop.
|
||||
cv = load6432(src, s)
|
||||
}
|
||||
}
|
||||
|
||||
if int(nextEmit) < len(src) {
|
||||
blk.literals = append(blk.literals, src[nextEmit:]...)
|
||||
blk.extraLits = len(src) - int(nextEmit)
|
||||
}
|
||||
if debug {
|
||||
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *fastEncoder) addBlock(src []byte) int32 {
|
||||
// check if we have space already
|
||||
if len(e.hist)+len(src) > cap(e.hist) {
|
||||
@ -362,6 +602,11 @@ func (e *fastEncoder) UseBlock(enc *blockEnc) {
|
||||
e.blk = enc
|
||||
}
|
||||
|
||||
func (e *fastEncoder) matchlenNoHist(s, t int32, src []byte) int32 {
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:], src[t:]))
|
||||
}
|
||||
|
||||
func (e *fastEncoder) matchlen(s, t int32, src []byte) int32 {
|
||||
if debug {
|
||||
if s < 0 {
|
||||
|
71
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
71
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@ -29,6 +29,7 @@ type Encoder struct {
|
||||
|
||||
type encoder interface {
|
||||
Encode(blk *blockEnc, src []byte)
|
||||
EncodeNoHist(blk *blockEnc, src []byte)
|
||||
Block() *blockEnc
|
||||
CRC() *xxhash.Digest
|
||||
AppendCRC([]byte) []byte
|
||||
@ -433,7 +434,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||
}()
|
||||
enc.Reset()
|
||||
blk := enc.Block()
|
||||
single := len(src) > 1<<20
|
||||
// Use single segments when above minimum window and below 1MB.
|
||||
single := len(src) < 1<<20 && len(src) > MinWindowSize
|
||||
if e.o.single != nil {
|
||||
single = *e.o.single
|
||||
}
|
||||
@ -454,25 +456,22 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for len(src) > 0 {
|
||||
todo := src
|
||||
if len(todo) > e.o.blockSize {
|
||||
todo = todo[:e.o.blockSize]
|
||||
}
|
||||
src = src[len(todo):]
|
||||
if len(src) <= e.o.blockSize && len(src) <= maxBlockSize {
|
||||
// Slightly faster with no history and everything in one block.
|
||||
if e.o.crc {
|
||||
_, _ = enc.CRC().Write(todo)
|
||||
_, _ = enc.CRC().Write(src)
|
||||
}
|
||||
blk.reset(nil)
|
||||
blk.pushOffsets()
|
||||
enc.Encode(blk, todo)
|
||||
if len(src) == 0 {
|
||||
blk.last = true
|
||||
}
|
||||
err := errIncompressible
|
||||
blk.last = true
|
||||
enc.EncodeNoHist(blk, src)
|
||||
|
||||
// If we got the exact same number of literals as input,
|
||||
// assume the literals cannot be compressed.
|
||||
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
|
||||
err := errIncompressible
|
||||
oldout := blk.output
|
||||
if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
|
||||
// Output directly to dst
|
||||
blk.output = dst
|
||||
err = blk.encode(e.o.noEntropy)
|
||||
}
|
||||
|
||||
@ -481,13 +480,49 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||
if debug {
|
||||
println("Storing incompressible block as raw")
|
||||
}
|
||||
blk.encodeRaw(todo)
|
||||
blk.popOffsets()
|
||||
dst = blk.encodeRawTo(dst, src)
|
||||
case nil:
|
||||
dst = blk.output
|
||||
default:
|
||||
panic(err)
|
||||
}
|
||||
dst = append(dst, blk.output...)
|
||||
blk.output = oldout
|
||||
} else {
|
||||
for len(src) > 0 {
|
||||
todo := src
|
||||
if len(todo) > e.o.blockSize {
|
||||
todo = todo[:e.o.blockSize]
|
||||
}
|
||||
src = src[len(todo):]
|
||||
if e.o.crc {
|
||||
_, _ = enc.CRC().Write(todo)
|
||||
}
|
||||
blk.reset(nil)
|
||||
blk.pushOffsets()
|
||||
enc.Encode(blk, todo)
|
||||
if len(src) == 0 {
|
||||
blk.last = true
|
||||
}
|
||||
err := errIncompressible
|
||||
// If we got the exact same number of literals as input,
|
||||
// assume the literals cannot be compressed.
|
||||
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
|
||||
err = blk.encode(e.o.noEntropy)
|
||||
}
|
||||
|
||||
switch err {
|
||||
case errIncompressible:
|
||||
if debug {
|
||||
println("Storing incompressible block as raw")
|
||||
}
|
||||
dst = blk.encodeRawTo(dst, todo)
|
||||
blk.popOffsets()
|
||||
case nil:
|
||||
dst = append(dst, blk.output...)
|
||||
default:
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if e.o.crc {
|
||||
dst = enc.AppendCRC(dst)
|
||||
|
1
vendor/github.com/mattn/go-shellwords/.travis.yml
generated
vendored
1
vendor/github.com/mattn/go-shellwords/.travis.yml
generated
vendored
@ -11,4 +11,3 @@ script:
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
|
||||
|
1
vendor/github.com/mattn/go-shellwords/README.md
generated
vendored
1
vendor/github.com/mattn/go-shellwords/README.md
generated
vendored
@ -2,6 +2,7 @@
|
||||
|
||||
[](https://codecov.io/gh/mattn/go-shellwords)
|
||||
[](https://travis-ci.org/mattn/go-shellwords)
|
||||
[](http://godoc.org/github.com/mattn/go-shellwords)
|
||||
|
||||
Parse line as shell words.
|
||||
|
||||
|
2
vendor/github.com/mattn/go-shellwords/go.mod
generated
vendored
2
vendor/github.com/mattn/go-shellwords/go.mod
generated
vendored
@ -1 +1,3 @@
|
||||
module github.com/mattn/go-shellwords
|
||||
|
||||
go 1.13
|
||||
|
30
vendor/github.com/mattn/go-shellwords/shellwords.go
generated
vendored
30
vendor/github.com/mattn/go-shellwords/shellwords.go
generated
vendored
@ -88,9 +88,17 @@ loop:
|
||||
backtick += string(r)
|
||||
} else if got {
|
||||
if p.ParseEnv {
|
||||
buf = replaceEnv(p.Getenv, buf)
|
||||
parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir}
|
||||
strs, err := parser.Parse(replaceEnv(p.Getenv, buf))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, str := range strs {
|
||||
args = append(args, str)
|
||||
}
|
||||
} else {
|
||||
args = append(args, buf)
|
||||
}
|
||||
args = append(args, buf)
|
||||
buf = ""
|
||||
got = false
|
||||
}
|
||||
@ -144,11 +152,17 @@ loop:
|
||||
}
|
||||
case '"':
|
||||
if !singleQuoted && !dollarQuote {
|
||||
if doubleQuoted {
|
||||
got = true
|
||||
}
|
||||
doubleQuoted = !doubleQuoted
|
||||
continue
|
||||
}
|
||||
case '\'':
|
||||
if !doubleQuoted && !dollarQuote {
|
||||
if singleQuoted {
|
||||
got = true
|
||||
}
|
||||
singleQuoted = !singleQuoted
|
||||
continue
|
||||
}
|
||||
@ -174,9 +188,17 @@ loop:
|
||||
|
||||
if got {
|
||||
if p.ParseEnv {
|
||||
buf = replaceEnv(p.Getenv, buf)
|
||||
parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir}
|
||||
strs, err := parser.Parse(replaceEnv(p.Getenv, buf))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, str := range strs {
|
||||
args = append(args, str)
|
||||
}
|
||||
} else {
|
||||
args = append(args, buf)
|
||||
}
|
||||
args = append(args, buf)
|
||||
}
|
||||
|
||||
if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote {
|
||||
|
40
vendor/github.com/mtrmac/gpgme/.appveyor.yml
generated
vendored
Normal file
40
vendor/github.com/mtrmac/gpgme/.appveyor.yml
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
---
|
||||
version: 0.{build}
|
||||
platform: x86
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\proglottis\gpgme
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GOROOT: C:\go-x86
|
||||
CGO_LDFLAGS: -LC:\gpg\lib
|
||||
CGO_CFLAGS: -IC:\gpg\include
|
||||
GPG_DIR: C:\gpg
|
||||
|
||||
install:
|
||||
- nuget install 7ZipCLI -ExcludeVersion
|
||||
- set PATH=%appveyor_build_folder%\7ZipCLI\tools;%PATH%
|
||||
- appveyor DownloadFile https://www.gnupg.org/ftp/gcrypt/binary/gnupg-w32-2.1.20_20170403.exe -FileName gnupg-w32-2.1.20_20170403.exe
|
||||
- 7z x -o%GPG_DIR% gnupg-w32-2.1.20_20170403.exe
|
||||
- copy "%GPG_DIR%\lib\libgpg-error.imp" "%GPG_DIR%\lib\libgpg-error.a"
|
||||
- copy "%GPG_DIR%\lib\libassuan.imp" "%GPG_DIR%\lib\libassuan.a"
|
||||
- copy "%GPG_DIR%\lib\libgpgme.imp" "%GPG_DIR%\lib\libgpgme.a"
|
||||
- set PATH=%GOPATH%\bin;%GOROOT%\bin;%GPG_DIR%\bin;C:\MinGW\bin;%PATH%
|
||||
- C:\cygwin\bin\sed -i 's/"GPG_AGENT_INFO"/"GPG_AGENT_INFO="/;s/C.unsetenv(v)/C.putenv(v)/' %APPVEYOR_BUILD_FOLDER%\gpgme.go
|
||||
|
||||
test_script:
|
||||
- go test -v github.com/proglottis/gpgme
|
||||
|
||||
|
||||
build_script:
|
||||
- go build -o example_decrypt.exe -i %APPVEYOR_BUILD_FOLDER%\examples\decrypt.go
|
||||
- go build -o example_encrypt.exe -i %APPVEYOR_BUILD_FOLDER%\examples\encrypt.go
|
||||
|
||||
artifacts:
|
||||
- path: example_decrypt.exe
|
||||
name: decrypt example binary
|
||||
- path: example_encrypt.exe
|
||||
name: encrypt example binary
|
32
vendor/github.com/mtrmac/gpgme/.travis.yml
generated
vendored
Normal file
32
vendor/github.com/mtrmac/gpgme/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
language: go
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
- windows
|
||||
dist: xenial
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.11
|
||||
- 1.12
|
||||
- 1.13
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- libgpgme11-dev
|
||||
homebrew:
|
||||
packages:
|
||||
- gnupg
|
||||
- gnupg@1.4
|
||||
- gpgme
|
||||
update: true
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- os: windows
|
||||
|
||||
before_install:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install msys2; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then choco install gpg4win; fi
|
18
vendor/github.com/mtrmac/gpgme/data.go
generated
vendored
18
vendor/github.com/mtrmac/gpgme/data.go
generated
vendored
@ -50,25 +50,25 @@ func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
||||
}
|
||||
|
||||
//export gogpgme_seekfunc
|
||||
func gogpgme_seekfunc(handle unsafe.Pointer, offset C.off_t, whence C.int) C.off_t {
|
||||
func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t {
|
||||
d := callbackLookup(uintptr(handle)).(*Data)
|
||||
n, err := d.s.Seek(int64(offset), int(whence))
|
||||
if err != nil {
|
||||
C.gpgme_err_set_errno(C.EIO)
|
||||
return -1
|
||||
}
|
||||
return C.off_t(n)
|
||||
return C.gpgme_off_t(n)
|
||||
}
|
||||
|
||||
// The Data buffer used to communicate with GPGME
|
||||
type Data struct {
|
||||
dh C.gpgme_data_t
|
||||
dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C
|
||||
buf []byte
|
||||
cbs C.struct_gpgme_data_cbs
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
s io.Seeker
|
||||
cbc uintptr
|
||||
cbc uintptr // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh)
|
||||
}
|
||||
|
||||
func newData() *Data {
|
||||
@ -154,12 +154,14 @@ func (d *Data) Close() error {
|
||||
callbackDelete(d.cbc)
|
||||
}
|
||||
_, err := C.gpgme_data_release(d.dh)
|
||||
runtime.KeepAlive(d)
|
||||
d.dh = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Data) Write(p []byte) (int, error) {
|
||||
n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
|
||||
runtime.KeepAlive(d)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -171,6 +173,7 @@ func (d *Data) Write(p []byte) (int, error) {
|
||||
|
||||
func (d *Data) Read(p []byte) (int, error) {
|
||||
n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
|
||||
runtime.KeepAlive(d)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -181,11 +184,14 @@ func (d *Data) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
func (d *Data) Seek(offset int64, whence int) (int64, error) {
|
||||
n, err := C.gpgme_data_seek(d.dh, C.off_t(offset), C.int(whence))
|
||||
n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence))
|
||||
runtime.KeepAlive(d)
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// Name returns the associated filename if any
|
||||
func (d *Data) Name() string {
|
||||
return C.GoString(C.gpgme_data_get_file_name(d.dh))
|
||||
res := C.GoString(C.gpgme_data_get_file_name(d.dh))
|
||||
runtime.KeepAlive(d)
|
||||
return res
|
||||
}
|
||||
|
3
vendor/github.com/mtrmac/gpgme/go.mod
generated
vendored
Normal file
3
vendor/github.com/mtrmac/gpgme/go.mod
generated
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
module github.com/mtrmac/gpgme
|
||||
|
||||
go 1.11
|
22
vendor/github.com/mtrmac/gpgme/go_gpgme.c
generated
vendored
22
vendor/github.com/mtrmac/gpgme/go_gpgme.c
generated
vendored
@ -8,6 +8,28 @@ void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintpt
|
||||
gpgme_set_passphrase_cb(ctx, cb, (void *)handle);
|
||||
}
|
||||
|
||||
gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) {
|
||||
return gpgme_data_seek(dh, offset, whence);
|
||||
}
|
||||
|
||||
gpgme_error_t gogpgme_op_assuan_transact_ext(
|
||||
gpgme_ctx_t ctx,
|
||||
char* cmd,
|
||||
uintptr_t data_h,
|
||||
uintptr_t inquiry_h,
|
||||
uintptr_t status_h,
|
||||
gpgme_error_t *operr
|
||||
){
|
||||
return gpgme_op_assuan_transact_ext(
|
||||
ctx,
|
||||
cmd,
|
||||
(gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, (void *)data_h,
|
||||
(gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, (void *)inquiry_h,
|
||||
(gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, (void *)status_h,
|
||||
operr
|
||||
);
|
||||
}
|
||||
|
||||
unsigned int key_revoked(gpgme_key_t k) {
|
||||
return k->revoked;
|
||||
}
|
||||
|
7
vendor/github.com/mtrmac/gpgme/go_gpgme.h
generated
vendored
7
vendor/github.com/mtrmac/gpgme/go_gpgme.h
generated
vendored
@ -12,6 +12,13 @@ extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence);
|
||||
extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd);
|
||||
extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle);
|
||||
extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle);
|
||||
extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence);
|
||||
|
||||
extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, uintptr_t data_h, uintptr_t inquiry_h , uintptr_t status_h, gpgme_error_t *operr);
|
||||
|
||||
extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen );
|
||||
extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args);
|
||||
extern gpgme_error_t gogpgme_assuan_status_callback(void *opaque, char* status, char* args);
|
||||
|
||||
extern unsigned int key_revoked(gpgme_key_t k);
|
||||
extern unsigned int key_expired(gpgme_key_t k);
|
||||
|
346
vendor/github.com/mtrmac/gpgme/gpgme.go
generated
vendored
346
vendor/github.com/mtrmac/gpgme/gpgme.go
generated
vendored
@ -7,7 +7,6 @@ package gpgme
|
||||
// #include <gpgme.h>
|
||||
// #include "go_gpgme.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
@ -48,9 +47,8 @@ const (
|
||||
ProtocolAssuan Protocol = C.GPGME_PROTOCOL_ASSUAN
|
||||
ProtocolG13 Protocol = C.GPGME_PROTOCOL_G13
|
||||
ProtocolUIServer Protocol = C.GPGME_PROTOCOL_UISERVER
|
||||
// ProtocolSpawn Protocol = C.GPGME_PROTOCOL_SPAWN // Unavailable in 1.4.3
|
||||
ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT
|
||||
ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN
|
||||
ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT
|
||||
ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN
|
||||
)
|
||||
|
||||
type PinEntryMode int
|
||||
@ -70,7 +68,6 @@ const (
|
||||
EncryptNoEncryptTo EncryptFlag = C.GPGME_ENCRYPT_NO_ENCRYPT_TO
|
||||
EncryptPrepare EncryptFlag = C.GPGME_ENCRYPT_PREPARE
|
||||
EncryptExceptSign EncryptFlag = C.GPGME_ENCRYPT_EXPECT_SIGN
|
||||
// EncryptNoCompress EncryptFlag = C.GPGME_ENCRYPT_NO_COMPRESS // Unavailable in 1.4.3
|
||||
)
|
||||
|
||||
type HashAlgo int
|
||||
@ -84,7 +81,6 @@ const (
|
||||
KeyListModeExtern KeyListMode = C.GPGME_KEYLIST_MODE_EXTERN
|
||||
KeyListModeSigs KeyListMode = C.GPGME_KEYLIST_MODE_SIGS
|
||||
KeyListModeSigNotations KeyListMode = C.GPGME_KEYLIST_MODE_SIG_NOTATIONS
|
||||
// KeyListModeWithSecret KeyListMode = C.GPGME_KEYLIST_MODE_WITH_SECRET // Unavailable in 1.4.3
|
||||
KeyListModeEphemeral KeyListMode = C.GPGME_KEYLIST_MODE_EPHEMERAL
|
||||
KeyListModeModeValidate KeyListMode = C.GPGME_KEYLIST_MODE_VALIDATE
|
||||
)
|
||||
@ -168,39 +164,60 @@ func EngineCheckVersion(p Protocol) error {
|
||||
}
|
||||
|
||||
type EngineInfo struct {
|
||||
info C.gpgme_engine_info_t
|
||||
next *EngineInfo
|
||||
protocol Protocol
|
||||
fileName string
|
||||
homeDir string
|
||||
version string
|
||||
requiredVersion string
|
||||
}
|
||||
|
||||
func copyEngineInfo(info C.gpgme_engine_info_t) *EngineInfo {
|
||||
res := &EngineInfo{
|
||||
next: nil,
|
||||
protocol: Protocol(info.protocol),
|
||||
fileName: C.GoString(info.file_name),
|
||||
homeDir: C.GoString(info.home_dir),
|
||||
version: C.GoString(info.version),
|
||||
requiredVersion: C.GoString(info.req_version),
|
||||
}
|
||||
if info.next != nil {
|
||||
res.next = copyEngineInfo(info.next)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (e *EngineInfo) Next() *EngineInfo {
|
||||
if e.info.next == nil {
|
||||
return nil
|
||||
}
|
||||
return &EngineInfo{info: e.info.next}
|
||||
return e.next
|
||||
}
|
||||
|
||||
func (e *EngineInfo) Protocol() Protocol {
|
||||
return Protocol(e.info.protocol)
|
||||
return e.protocol
|
||||
}
|
||||
|
||||
func (e *EngineInfo) FileName() string {
|
||||
return C.GoString(e.info.file_name)
|
||||
return e.fileName
|
||||
}
|
||||
|
||||
func (e *EngineInfo) Version() string {
|
||||
return C.GoString(e.info.version)
|
||||
return e.version
|
||||
}
|
||||
|
||||
func (e *EngineInfo) RequiredVersion() string {
|
||||
return C.GoString(e.info.req_version)
|
||||
return e.requiredVersion
|
||||
}
|
||||
|
||||
func (e *EngineInfo) HomeDir() string {
|
||||
return C.GoString(e.info.home_dir)
|
||||
return e.homeDir
|
||||
}
|
||||
|
||||
func GetEngineInfo() (*EngineInfo, error) {
|
||||
info := &EngineInfo{}
|
||||
return info, handleError(C.gpgme_get_engine_info(&info.info))
|
||||
var cInfo C.gpgme_engine_info_t
|
||||
err := handleError(C.gpgme_get_engine_info(&cInfo))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return copyEngineInfo(cInfo), nil // It is up to the caller not to invalidate cInfo concurrently until this is done.
|
||||
}
|
||||
|
||||
func SetEngineInfo(proto Protocol, fileName, homeDir string) error {
|
||||
@ -261,9 +278,9 @@ type Context struct {
|
||||
KeyError error
|
||||
|
||||
callback Callback
|
||||
cbc uintptr
|
||||
cbc uintptr // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx)
|
||||
|
||||
ctx C.gpgme_ctx_t
|
||||
ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C
|
||||
}
|
||||
|
||||
func New() (*Context, error) {
|
||||
@ -281,49 +298,68 @@ func (c *Context) Release() {
|
||||
callbackDelete(c.cbc)
|
||||
}
|
||||
C.gpgme_release(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
c.ctx = nil
|
||||
}
|
||||
|
||||
func (c *Context) SetArmor(yes bool) {
|
||||
C.gpgme_set_armor(c.ctx, cbool(yes))
|
||||
runtime.KeepAlive(c)
|
||||
}
|
||||
|
||||
func (c *Context) Armor() bool {
|
||||
return C.gpgme_get_armor(c.ctx) != 0
|
||||
res := C.gpgme_get_armor(c.ctx) != 0
|
||||
runtime.KeepAlive(c)
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Context) SetTextMode(yes bool) {
|
||||
C.gpgme_set_textmode(c.ctx, cbool(yes))
|
||||
runtime.KeepAlive(c)
|
||||
}
|
||||
|
||||
func (c *Context) TextMode() bool {
|
||||
return C.gpgme_get_textmode(c.ctx) != 0
|
||||
res := C.gpgme_get_textmode(c.ctx) != 0
|
||||
runtime.KeepAlive(c)
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Context) SetProtocol(p Protocol) error {
|
||||
return handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p)))
|
||||
err := handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p)))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) Protocol() Protocol {
|
||||
return Protocol(C.gpgme_get_protocol(c.ctx))
|
||||
res := Protocol(C.gpgme_get_protocol(c.ctx))
|
||||
runtime.KeepAlive(c)
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Context) SetKeyListMode(m KeyListMode) error {
|
||||
return handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m)))
|
||||
err := handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m)))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) KeyListMode() KeyListMode {
|
||||
return KeyListMode(C.gpgme_get_keylist_mode(c.ctx))
|
||||
res := KeyListMode(C.gpgme_get_keylist_mode(c.ctx))
|
||||
runtime.KeepAlive(c)
|
||||
return res
|
||||
}
|
||||
|
||||
// Unavailable in 1.3.2:
|
||||
// func (c *Context) SetPinEntryMode(m PinEntryMode) error {
|
||||
// return handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
|
||||
// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
|
||||
// runtime.KeepAlive(c)
|
||||
// return err
|
||||
// }
|
||||
|
||||
// Unavailable in 1.3.2:
|
||||
// func (c *Context) PinEntryMode() PinEntryMode {
|
||||
// return PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
|
||||
// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
|
||||
// runtime.KeepAlive(c)
|
||||
// return res
|
||||
// }
|
||||
|
||||
func (c *Context) SetCallback(callback Callback) error {
|
||||
@ -340,11 +376,17 @@ func (c *Context) SetCallback(callback Callback) error {
|
||||
c.cbc = 0
|
||||
_, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0)
|
||||
}
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) EngineInfo() *EngineInfo {
|
||||
return &EngineInfo{info: C.gpgme_ctx_get_engine_info(c.ctx)}
|
||||
cInfo := C.gpgme_ctx_get_engine_info(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
// NOTE: c must be live as long as we are accessing cInfo.
|
||||
res := copyEngineInfo(cInfo)
|
||||
runtime.KeepAlive(c) // for accesses to cInfo
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error {
|
||||
@ -357,19 +399,23 @@ func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error
|
||||
chome = C.CString(homeDir)
|
||||
defer C.free(unsafe.Pointer(chome))
|
||||
}
|
||||
return handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome))
|
||||
err := handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) KeyListStart(pattern string, secretOnly bool) error {
|
||||
cpattern := C.CString(pattern)
|
||||
defer C.free(unsafe.Pointer(cpattern))
|
||||
err := C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly))
|
||||
return handleError(err)
|
||||
err := handleError(C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly)))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) KeyListNext() bool {
|
||||
c.Key = newKey()
|
||||
err := handleError(C.gpgme_op_keylist_next(c.ctx, &c.Key.k))
|
||||
runtime.KeepAlive(c) // implies runtime.KeepAlive(c.Key)
|
||||
if err != nil {
|
||||
if e, ok := err.(Error); ok && e.Code() == ErrorEOF {
|
||||
c.KeyError = nil
|
||||
@ -383,7 +429,9 @@ func (c *Context) KeyListNext() bool {
|
||||
}
|
||||
|
||||
func (c *Context) KeyListEnd() error {
|
||||
return handleError(C.gpgme_op_keylist_end(c.ctx))
|
||||
err := handleError(C.gpgme_op_keylist_end(c.ctx))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) {
|
||||
@ -391,7 +439,11 @@ func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) {
|
||||
cfpr := C.CString(fingerprint)
|
||||
defer C.free(unsafe.Pointer(cfpr))
|
||||
err := handleError(C.gpgme_get_key(c.ctx, cfpr, &key.k, cbool(secret)))
|
||||
if e, ok := err.(Error); key.k == nil && ok && e.Code() == ErrorEOF {
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(key)
|
||||
keyKIsNil := key.k == nil
|
||||
runtime.KeepAlive(key)
|
||||
if e, ok := err.(Error); keyKIsNil && ok && e.Code() == ErrorEOF {
|
||||
return nil, fmt.Errorf("key %q not found", fingerprint)
|
||||
}
|
||||
if err != nil {
|
||||
@ -401,11 +453,19 @@ func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) {
|
||||
}
|
||||
|
||||
func (c *Context) Decrypt(ciphertext, plaintext *Data) error {
|
||||
return handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh))
|
||||
err := handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(ciphertext)
|
||||
runtime.KeepAlive(plaintext)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) DecryptVerify(ciphertext, plaintext *Data) error {
|
||||
return handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh))
|
||||
err := handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(ciphertext)
|
||||
runtime.KeepAlive(plaintext)
|
||||
return err
|
||||
}
|
||||
|
||||
type Signature struct {
|
||||
@ -432,10 +492,20 @@ func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, err
|
||||
plainPtr = plain.dh
|
||||
}
|
||||
err := handleError(C.gpgme_op_verify(c.ctx, sig.dh, signedTextPtr, plainPtr))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(sig)
|
||||
if signedText != nil {
|
||||
runtime.KeepAlive(signedText)
|
||||
}
|
||||
if plain != nil {
|
||||
runtime.KeepAlive(plain)
|
||||
}
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
res := C.gpgme_op_verify_result(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
// NOTE: c must be live as long as we are accessing res.
|
||||
sigs := []Signature{}
|
||||
for s := res.signatures; s != nil; s = s.next {
|
||||
sig := Signature{
|
||||
@ -455,7 +525,9 @@ func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, err
|
||||
}
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
return C.GoString(res.file_name), sigs, nil
|
||||
fileName := C.GoString(res.file_name)
|
||||
runtime.KeepAlive(c) // for all accesses to res above
|
||||
return fileName, sigs, nil
|
||||
}
|
||||
|
||||
func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphertext *Data) error {
|
||||
@ -467,18 +539,116 @@ func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphe
|
||||
*ptr = recipients[i].k
|
||||
}
|
||||
err := C.gpgme_op_encrypt(c.ctx, (*C.gpgme_key_t)(recp), C.gpgme_encrypt_flags_t(flags), plaintext.dh, ciphertext.dh)
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(recipients)
|
||||
runtime.KeepAlive(plaintext)
|
||||
runtime.KeepAlive(ciphertext)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error {
|
||||
C.gpgme_signers_clear(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
for _, k := range signers {
|
||||
if err := handleError(C.gpgme_signers_add(c.ctx, k.k)); err != nil {
|
||||
err := handleError(C.gpgme_signers_add(c.ctx, k.k))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(k)
|
||||
if err != nil {
|
||||
C.gpgme_signers_clear(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode)))
|
||||
err := handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode)))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(plain)
|
||||
runtime.KeepAlive(sig)
|
||||
return err
|
||||
}
|
||||
|
||||
type AssuanDataCallback func(data []byte) error
|
||||
type AssuanInquireCallback func(name, args string) error
|
||||
type AssuanStatusCallback func(status, args string) error
|
||||
|
||||
// AssuanSend sends a raw Assuan command to gpg-agent
|
||||
func (c *Context) AssuanSend(
|
||||
cmd string,
|
||||
data AssuanDataCallback,
|
||||
inquiry AssuanInquireCallback,
|
||||
status AssuanStatusCallback,
|
||||
) error {
|
||||
var operr C.gpgme_error_t
|
||||
|
||||
dataPtr := callbackAdd(&data)
|
||||
inquiryPtr := callbackAdd(&inquiry)
|
||||
statusPtr := callbackAdd(&status)
|
||||
cmdCStr := C.CString(cmd)
|
||||
defer C.free(unsafe.Pointer(cmdCStr))
|
||||
err := C.gogpgme_op_assuan_transact_ext(
|
||||
c.ctx,
|
||||
cmdCStr,
|
||||
C.uintptr_t(dataPtr),
|
||||
C.uintptr_t(inquiryPtr),
|
||||
C.uintptr_t(statusPtr),
|
||||
&operr,
|
||||
)
|
||||
runtime.KeepAlive(c)
|
||||
|
||||
if handleError(operr) != nil {
|
||||
return handleError(operr)
|
||||
}
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
//export gogpgme_assuan_data_callback
|
||||
func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t {
|
||||
c := callbackLookup(uintptr(handle)).(*AssuanDataCallback)
|
||||
if *c == nil {
|
||||
return 0
|
||||
}
|
||||
(*c)(C.GoBytes(data, C.int(datalen)))
|
||||
return 0
|
||||
}
|
||||
|
||||
//export gogpgme_assuan_inquiry_callback
|
||||
func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t {
|
||||
name := C.GoString(cName)
|
||||
args := C.GoString(cArgs)
|
||||
c := callbackLookup(uintptr(handle)).(*AssuanInquireCallback)
|
||||
if *c == nil {
|
||||
return 0
|
||||
}
|
||||
(*c)(name, args)
|
||||
return 0
|
||||
}
|
||||
|
||||
//export gogpgme_assuan_status_callback
|
||||
func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t {
|
||||
status := C.GoString(cStatus)
|
||||
args := C.GoString(cArgs)
|
||||
c := callbackLookup(uintptr(handle)).(*AssuanStatusCallback)
|
||||
if *c == nil {
|
||||
return 0
|
||||
}
|
||||
(*c)(status, args)
|
||||
return 0
|
||||
}
|
||||
|
||||
// ExportModeFlags defines how keys are exported from Export
|
||||
type ExportModeFlags uint
|
||||
|
||||
const (
|
||||
ExportModeExtern ExportModeFlags = C.GPGME_EXPORT_MODE_EXTERN
|
||||
ExportModeMinimal ExportModeFlags = C.GPGME_EXPORT_MODE_MINIMAL
|
||||
)
|
||||
|
||||
func (c *Context) Export(pattern string, mode ExportModeFlags, data *Data) error {
|
||||
pat := C.CString(pattern)
|
||||
defer C.free(unsafe.Pointer(pat))
|
||||
err := handleError(C.gpgme_op_export(c.ctx, pat, C.gpgme_export_mode_t(mode), data.dh))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(data)
|
||||
return err
|
||||
}
|
||||
|
||||
// ImportStatusFlags describes the type of ImportStatus.Status. The C API in gpgme.h simply uses "unsigned".
|
||||
@ -517,10 +687,14 @@ type ImportResult struct {
|
||||
|
||||
func (c *Context) Import(keyData *Data) (*ImportResult, error) {
|
||||
err := handleError(C.gpgme_op_import(c.ctx, keyData.dh))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(keyData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := C.gpgme_op_import_result(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
// NOTE: c must be live as long as we are accessing res.
|
||||
imports := []ImportStatus{}
|
||||
for s := res.imports; s != nil; s = s.next {
|
||||
imports = append(imports, ImportStatus{
|
||||
@ -529,7 +703,7 @@ func (c *Context) Import(keyData *Data) (*ImportResult, error) {
|
||||
Status: ImportStatusFlags(s.status),
|
||||
})
|
||||
}
|
||||
return &ImportResult{
|
||||
importResult := &ImportResult{
|
||||
Considered: int(res.considered),
|
||||
NoUserID: int(res.no_user_id),
|
||||
Imported: int(res.imported),
|
||||
@ -544,11 +718,13 @@ func (c *Context) Import(keyData *Data) (*ImportResult, error) {
|
||||
SecretUnchanged: int(res.secret_unchanged),
|
||||
NotImported: int(res.not_imported),
|
||||
Imports: imports,
|
||||
}, nil
|
||||
}
|
||||
runtime.KeepAlive(c) // for all accesses to res above
|
||||
return importResult, nil
|
||||
}
|
||||
|
||||
type Key struct {
|
||||
k C.gpgme_key_t
|
||||
k C.gpgme_key_t // WARNING: Call Runtime.KeepAlive(k) after ANY passing of k.k to C
|
||||
}
|
||||
|
||||
func newKey() *Key {
|
||||
@ -559,85 +735,122 @@ func newKey() *Key {
|
||||
|
||||
func (k *Key) Release() {
|
||||
C.gpgme_key_release(k.k)
|
||||
runtime.KeepAlive(k)
|
||||
k.k = nil
|
||||
}
|
||||
|
||||
func (k *Key) Revoked() bool {
|
||||
return C.key_revoked(k.k) != 0
|
||||
res := C.key_revoked(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Expired() bool {
|
||||
return C.key_expired(k.k) != 0
|
||||
res := C.key_expired(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Disabled() bool {
|
||||
return C.key_disabled(k.k) != 0
|
||||
res := C.key_disabled(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Invalid() bool {
|
||||
return C.key_invalid(k.k) != 0
|
||||
res := C.key_invalid(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) CanEncrypt() bool {
|
||||
return C.key_can_encrypt(k.k) != 0
|
||||
res := C.key_can_encrypt(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) CanSign() bool {
|
||||
return C.key_can_sign(k.k) != 0
|
||||
res := C.key_can_sign(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) CanCertify() bool {
|
||||
return C.key_can_certify(k.k) != 0
|
||||
res := C.key_can_certify(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Secret() bool {
|
||||
return C.key_secret(k.k) != 0
|
||||
res := C.key_secret(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) CanAuthenticate() bool {
|
||||
return C.key_can_authenticate(k.k) != 0
|
||||
res := C.key_can_authenticate(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) IsQualified() bool {
|
||||
return C.key_is_qualified(k.k) != 0
|
||||
res := C.key_is_qualified(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Protocol() Protocol {
|
||||
return Protocol(k.k.protocol)
|
||||
res := Protocol(k.k.protocol)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) IssuerSerial() string {
|
||||
return C.GoString(k.k.issuer_serial)
|
||||
res := C.GoString(k.k.issuer_serial)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) IssuerName() string {
|
||||
return C.GoString(k.k.issuer_name)
|
||||
res := C.GoString(k.k.issuer_name)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) ChainID() string {
|
||||
return C.GoString(k.k.chain_id)
|
||||
res := C.GoString(k.k.chain_id)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) OwnerTrust() Validity {
|
||||
return Validity(k.k.owner_trust)
|
||||
res := Validity(k.k.owner_trust)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) SubKeys() *SubKey {
|
||||
if k.k.subkeys == nil {
|
||||
subKeys := k.k.subkeys
|
||||
runtime.KeepAlive(k)
|
||||
if subKeys == nil {
|
||||
return nil
|
||||
}
|
||||
return &SubKey{k: k.k.subkeys, parent: k}
|
||||
return &SubKey{k: subKeys, parent: k} // The parent: k reference ensures subKeys remains valid
|
||||
}
|
||||
|
||||
func (k *Key) UserIDs() *UserID {
|
||||
if k.k.uids == nil {
|
||||
uids := k.k.uids
|
||||
runtime.KeepAlive(k)
|
||||
if uids == nil {
|
||||
return nil
|
||||
}
|
||||
return &UserID{u: k.k.uids, parent: k}
|
||||
return &UserID{u: uids, parent: k} // The parent: k reference ensures uids remains valid
|
||||
}
|
||||
|
||||
func (k *Key) KeyListMode() KeyListMode {
|
||||
return KeyListMode(k.k.keylist_mode)
|
||||
res := KeyListMode(k.k.keylist_mode)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
type SubKey struct {
|
||||
@ -737,12 +950,3 @@ func (u *UserID) Comment() string {
|
||||
func (u *UserID) Email() string {
|
||||
return C.GoString(u.u.email)
|
||||
}
|
||||
|
||||
// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG.
|
||||
// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved
|
||||
// - and cgo can't be used in tests. So, provide this helper for test initialization.
|
||||
func unsetenvGPGAgentInfo() {
|
||||
v := C.CString("GPG_AGENT_INFO")
|
||||
defer C.free(unsafe.Pointer(v))
|
||||
C.unsetenv(v)
|
||||
}
|
||||
|
18
vendor/github.com/mtrmac/gpgme/unset_agent_info.go
generated
vendored
Normal file
18
vendor/github.com/mtrmac/gpgme/unset_agent_info.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// +build !windows
|
||||
|
||||
package gpgme
|
||||
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG.
|
||||
// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved
|
||||
// - and cgo can't be used in tests. So, provide this helper for test initialization.
|
||||
func unsetenvGPGAgentInfo() {
|
||||
v := C.CString("GPG_AGENT_INFO")
|
||||
defer C.free(unsafe.Pointer(v))
|
||||
C.unsetenv(v)
|
||||
}
|
14
vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go
generated
vendored
Normal file
14
vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
package gpgme
|
||||
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// unsetenv is not available in mingw
|
||||
func unsetenvGPGAgentInfo() {
|
||||
v := C.CString("GPG_AGENT_INFO=")
|
||||
defer C.free(unsafe.Pointer(v))
|
||||
C.putenv(v)
|
||||
}
|
33
vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
generated
vendored
33
vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
generated
vendored
@ -7,7 +7,6 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -18,6 +17,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@ -253,6 +254,12 @@ func getSELinuxPolicyRoot() string {
|
||||
return filepath.Join(selinuxDir, readConfig(selinuxTypeTag))
|
||||
}
|
||||
|
||||
func isProcHandle(fh *os.File) (bool, error) {
|
||||
var buf unix.Statfs_t
|
||||
err := unix.Fstatfs(int(fh.Fd()), &buf)
|
||||
return buf.Type == unix.PROC_SUPER_MAGIC, err
|
||||
}
|
||||
|
||||
func readCon(fpath string) (string, error) {
|
||||
if fpath == "" {
|
||||
return "", ErrEmptyPath
|
||||
@ -264,6 +271,12 @@ func readCon(fpath string) (string, error) {
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
if ok, err := isProcHandle(in); err != nil {
|
||||
return "", err
|
||||
} else if !ok {
|
||||
return "", fmt.Errorf("%s not on procfs", fpath)
|
||||
}
|
||||
|
||||
var retval string
|
||||
if _, err := fmt.Fscanf(in, "%s", &retval); err != nil {
|
||||
return "", err
|
||||
@ -276,7 +289,10 @@ func SetFileLabel(fpath string, label string) error {
|
||||
if fpath == "" {
|
||||
return ErrEmptyPath
|
||||
}
|
||||
return lsetxattr(fpath, xattrNameSelinux, []byte(label), 0)
|
||||
if err := lsetxattr(fpath, xattrNameSelinux, []byte(label), 0); err != nil {
|
||||
return errors.Wrapf(err, "failed to set file label on %s", fpath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FileLabel returns the SELinux label for this path or returns an error.
|
||||
@ -346,12 +362,21 @@ func writeCon(fpath string, val string) error {
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
if ok, err := isProcHandle(out); err != nil {
|
||||
return err
|
||||
} else if !ok {
|
||||
return fmt.Errorf("%s not on procfs", fpath)
|
||||
}
|
||||
|
||||
if val != "" {
|
||||
_, err = out.Write([]byte(val))
|
||||
} else {
|
||||
_, err = out.Write(nil)
|
||||
}
|
||||
return err
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to set %s on procfs", fpath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
@ -394,7 +419,7 @@ func SetExecLabel(label string) error {
|
||||
}
|
||||
|
||||
/*
|
||||
SetTaskLabel sets the SELinux label for the current thread, or an error.
|
||||
SetTaskLabel sets the SELinux label for the current thread, or an error.
|
||||
This requires the dyntransition permission.
|
||||
*/
|
||||
func SetTaskLabel(label string) error {
|
||||
|
29
vendor/github.com/pkg/errors/cause.go
generated
vendored
29
vendor/github.com/pkg/errors/cause.go
generated
vendored
@ -1,29 +0,0 @@
|
||||
// +build !go1.13
|
||||
|
||||
package errors
|
||||
|
||||
// Cause recursively unwraps an error chain and returns the underlying cause of
|
||||
// the error, if possible. An error value has a cause if it implements the
|
||||
// following interface:
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// If the error does not implement Cause, the original error will
|
||||
// be returned. If the error is nil, nil will be returned without further
|
||||
// investigation.
|
||||
func Cause(err error) error {
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
26
vendor/github.com/pkg/errors/errors.go
generated
vendored
26
vendor/github.com/pkg/errors/errors.go
generated
vendored
@ -260,3 +260,29 @@ func (w *withMessage) Format(s fmt.State, verb rune) {
|
||||
io.WriteString(s, w.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Cause returns the underlying cause of the error, if possible.
|
||||
// An error value has a cause if it implements the following
|
||||
// interface:
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// If the error does not implement Cause, the original error will
|
||||
// be returned. If the error is nil, nil will be returned without further
|
||||
// investigation.
|
||||
func Cause(err error) error {
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
for err != nil {
|
||||
cause, ok := err.(causer)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
err = cause.Cause()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
33
vendor/github.com/pkg/errors/go113.go
generated
vendored
33
vendor/github.com/pkg/errors/go113.go
generated
vendored
@ -36,36 +36,3 @@ func As(err error, target interface{}) bool { return stderrors.As(err, target) }
|
||||
func Unwrap(err error) error {
|
||||
return stderrors.Unwrap(err)
|
||||
}
|
||||
|
||||
// Cause recursively unwraps an error chain and returns the underlying cause of
|
||||
// the error, if possible. There are two ways that an error value may provide a
|
||||
// cause. First, the error may implement the following interface:
|
||||
//
|
||||
// type causer interface {
|
||||
// Cause() error
|
||||
// }
|
||||
//
|
||||
// Second, the error may return a non-nil value when passed as an argument to
|
||||
// the Unwrap function. This makes Cause forwards-compatible with Go 1.13 error
|
||||
// chains.
|
||||
//
|
||||
// If an error value satisfies both methods of unwrapping, Cause will use the
|
||||
// causer interface.
|
||||
//
|
||||
// If the error is nil, nil will be returned without further investigation.
|
||||
func Cause(err error) error {
|
||||
type causer interface {
|
||||
Cause() error
|
||||
}
|
||||
|
||||
for err != nil {
|
||||
if cause, ok := err.(causer); ok {
|
||||
err = cause.Cause()
|
||||
} else if unwrapped := Unwrap(err); unwrapped != nil {
|
||||
err = unwrapped
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
7
vendor/github.com/vbauerster/mpb/v4/bar_option.go
generated
vendored
7
vendor/github.com/vbauerster/mpb/v4/bar_option.go
generated
vendored
@ -10,11 +10,10 @@ import (
|
||||
// BarOption is a function option which changes the default behavior of a bar.
|
||||
type BarOption func(*bState)
|
||||
|
||||
type mergeWrapper interface {
|
||||
MergeUnwrap() []decor.Decorator
|
||||
}
|
||||
|
||||
func (s *bState) addDecorators(dest *[]decor.Decorator, decorators ...decor.Decorator) {
|
||||
type mergeWrapper interface {
|
||||
MergeUnwrap() []decor.Decorator
|
||||
}
|
||||
for _, decorator := range decorators {
|
||||
if mw, ok := decorator.(mergeWrapper); ok {
|
||||
*dest = append(*dest, mw.MergeUnwrap()...)
|
||||
|
27
vendor/github.com/vbauerster/mpb/v4/decor/decorator.go
generated
vendored
27
vendor/github.com/vbauerster/mpb/v4/decor/decorator.go
generated
vendored
@ -4,6 +4,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/acarl005/stripansi"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -117,25 +119,29 @@ var (
|
||||
// W represents width and C represents bit set of width related config.
|
||||
// A decorator should embed WC, to enable width synchronization.
|
||||
type WC struct {
|
||||
W int
|
||||
C int
|
||||
dynFormat string
|
||||
staticFormat string
|
||||
wsync chan int
|
||||
W int
|
||||
C int
|
||||
dynFormat string
|
||||
wsync chan int
|
||||
}
|
||||
|
||||
// FormatMsg formats final message according to WC.W and WC.C.
|
||||
// Should be called by any Decorator implementation.
|
||||
func (wc *WC) FormatMsg(msg string) string {
|
||||
var format string
|
||||
runeCount := utf8.RuneCountInString(stripansi.Strip(msg))
|
||||
ansiCount := utf8.RuneCountInString(msg) - runeCount
|
||||
if (wc.C & DSyncWidth) != 0 {
|
||||
wc.wsync <- utf8.RuneCountInString(msg)
|
||||
max := <-wc.wsync
|
||||
if (wc.C & DextraSpace) != 0 {
|
||||
max++
|
||||
runeCount++
|
||||
}
|
||||
return fmt.Sprintf(fmt.Sprintf(wc.dynFormat, max), msg)
|
||||
wc.wsync <- runeCount
|
||||
max := <-wc.wsync
|
||||
format = fmt.Sprintf(wc.dynFormat, ansiCount+max)
|
||||
} else {
|
||||
format = fmt.Sprintf(wc.dynFormat, ansiCount+wc.W)
|
||||
}
|
||||
return fmt.Sprintf(wc.staticFormat, msg)
|
||||
return fmt.Sprintf(format, msg)
|
||||
}
|
||||
|
||||
// Init initializes width related config.
|
||||
@ -145,7 +151,6 @@ func (wc *WC) Init() WC {
|
||||
wc.dynFormat += "-"
|
||||
}
|
||||
wc.dynFormat += "%ds"
|
||||
wc.staticFormat = fmt.Sprintf(wc.dynFormat, wc.W)
|
||||
if (wc.C & DSyncWidth) != 0 {
|
||||
// it's deliberate choice to override wsync on each Init() call,
|
||||
// this way globals like WCSyncSpace can be reused
|
||||
|
49
vendor/github.com/vbauerster/mpb/v4/decor/merge.go
generated
vendored
49
vendor/github.com/vbauerster/mpb/v4/decor/merge.go
generated
vendored
@ -2,6 +2,7 @@ package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
@ -28,10 +29,7 @@ func Merge(decorator Decorator, placeholders ...WC) Decorator {
|
||||
if (wc.C & DSyncWidth) == 0 {
|
||||
return decorator
|
||||
}
|
||||
md.placeHolders[i] = &placeHolderDecorator{
|
||||
WC: wc.Init(),
|
||||
wch: make(chan int),
|
||||
}
|
||||
md.placeHolders[i] = &placeHolderDecorator{wc.Init()}
|
||||
}
|
||||
return md
|
||||
}
|
||||
@ -69,29 +67,40 @@ func (d *mergeDecorator) Base() Decorator {
|
||||
func (d *mergeDecorator) Decor(st *Statistics) string {
|
||||
msg := d.Decorator.Decor(st)
|
||||
msgLen := utf8.RuneCountInString(msg)
|
||||
|
||||
var space int
|
||||
for _, ph := range d.placeHolders {
|
||||
space += <-ph.wch
|
||||
}
|
||||
|
||||
d.wc.wsync <- msgLen - space
|
||||
|
||||
max := <-d.wc.wsync
|
||||
if (d.wc.C & DextraSpace) != 0 {
|
||||
max++
|
||||
msgLen++
|
||||
}
|
||||
return fmt.Sprintf(fmt.Sprintf(d.wc.dynFormat, max+space), msg)
|
||||
|
||||
var total int
|
||||
max := utf8.RuneCountInString(d.placeHolders[0].FormatMsg(""))
|
||||
total += max
|
||||
pw := (msgLen - max) / len(d.placeHolders)
|
||||
rem := (msgLen - max) % len(d.placeHolders)
|
||||
|
||||
var diff int
|
||||
for i := 1; i < len(d.placeHolders); i++ {
|
||||
ph := d.placeHolders[i]
|
||||
width := pw - diff
|
||||
if (ph.WC.C & DextraSpace) != 0 {
|
||||
width--
|
||||
if width < 0 {
|
||||
width = 0
|
||||
}
|
||||
}
|
||||
max = utf8.RuneCountInString(ph.FormatMsg(strings.Repeat(" ", width)))
|
||||
total += max
|
||||
diff = max - pw
|
||||
}
|
||||
|
||||
d.wc.wsync <- pw + rem
|
||||
max = <-d.wc.wsync
|
||||
return fmt.Sprintf(fmt.Sprintf(d.wc.dynFormat, max+total), msg)
|
||||
}
|
||||
|
||||
type placeHolderDecorator struct {
|
||||
WC
|
||||
wch chan int
|
||||
}
|
||||
|
||||
func (d *placeHolderDecorator) Decor(st *Statistics) string {
|
||||
go func() {
|
||||
d.wch <- utf8.RuneCountInString(d.FormatMsg(""))
|
||||
}()
|
||||
func (d *placeHolderDecorator) Decor(_ *Statistics) string {
|
||||
return ""
|
||||
}
|
||||
|
1
vendor/github.com/vbauerster/mpb/v4/go.mod
generated
vendored
1
vendor/github.com/vbauerster/mpb/v4/go.mod
generated
vendored
@ -2,6 +2,7 @@ module github.com/vbauerster/mpb/v4
|
||||
|
||||
require (
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708
|
||||
golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056 // indirect
|
||||
)
|
||||
|
2
vendor/github.com/vbauerster/mpb/v4/go.sum
generated
vendored
2
vendor/github.com/vbauerster/mpb/v4/go.sum
generated
vendored
@ -1,5 +1,7 @@
|
||||
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8=
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
|
8
vendor/github.com/vbauerster/mpb/v4/proxyreader.go
generated
vendored
8
vendor/github.com/vbauerster/mpb/v4/proxyreader.go
generated
vendored
@ -18,9 +18,7 @@ func (prox *proxyReader) Read(p []byte) (n int, err error) {
|
||||
prox.iT = time.Now()
|
||||
}
|
||||
if err == io.EOF {
|
||||
go func() {
|
||||
prox.bar.SetTotal(0, true)
|
||||
}()
|
||||
go prox.bar.SetTotal(0, true)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -37,9 +35,7 @@ func (prox *proxyWriterTo) WriteTo(w io.Writer) (n int64, err error) {
|
||||
prox.iT = time.Now()
|
||||
}
|
||||
if err == io.EOF {
|
||||
go func() {
|
||||
prox.bar.SetTotal(0, true)
|
||||
}()
|
||||
go prox.bar.SetTotal(0, true)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
19
vendor/modules.txt
vendored
19
vendor/modules.txt
vendored
@ -26,6 +26,8 @@ github.com/Microsoft/hcsshim/internal/wclayer
|
||||
github.com/Microsoft/hcsshim/osversion
|
||||
# github.com/VividCortex/ewma v1.1.1
|
||||
github.com/VividCortex/ewma
|
||||
# github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
github.com/acarl005/stripansi
|
||||
# github.com/beorn7/perks v1.0.1
|
||||
github.com/beorn7/perks/quantile
|
||||
# github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f
|
||||
@ -36,7 +38,7 @@ github.com/containerd/containerd/log
|
||||
github.com/containerd/containerd/platforms
|
||||
# github.com/containers/common v0.0.7
|
||||
github.com/containers/common/pkg/unshare
|
||||
# github.com/containers/image/v5 v5.1.0
|
||||
# github.com/containers/image/v5 v5.2.0
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
github.com/containers/image/v5/directory/explicitfilepath
|
||||
@ -47,6 +49,7 @@ github.com/containers/image/v5/docker/policyconfiguration
|
||||
github.com/containers/image/v5/docker/reference
|
||||
github.com/containers/image/v5/docker/tarfile
|
||||
github.com/containers/image/v5/image
|
||||
github.com/containers/image/v5/internal/iolimits
|
||||
github.com/containers/image/v5/internal/pkg/keyctl
|
||||
github.com/containers/image/v5/internal/tmpdir
|
||||
github.com/containers/image/v5/manifest
|
||||
@ -87,7 +90,7 @@ github.com/containers/ocicrypt/keywrap/pgp
|
||||
github.com/containers/ocicrypt/keywrap/pkcs7
|
||||
github.com/containers/ocicrypt/spec
|
||||
github.com/containers/ocicrypt/utils
|
||||
# github.com/containers/storage v1.15.5
|
||||
# github.com/containers/storage v1.15.8
|
||||
github.com/containers/storage
|
||||
github.com/containers/storage/drivers
|
||||
github.com/containers/storage/drivers/aufs
|
||||
@ -198,7 +201,7 @@ github.com/gorilla/mux
|
||||
github.com/hashicorp/golang-lru/simplelru
|
||||
# github.com/imdario/mergo v0.3.8
|
||||
github.com/imdario/mergo
|
||||
# github.com/klauspost/compress v1.9.4
|
||||
# github.com/klauspost/compress v1.9.8
|
||||
github.com/klauspost/compress/flate
|
||||
github.com/klauspost/compress/fse
|
||||
github.com/klauspost/compress/huff0
|
||||
@ -213,13 +216,13 @@ github.com/konsorten/go-windows-terminal-sequences
|
||||
github.com/kr/pretty
|
||||
# github.com/kr/text v0.1.0
|
||||
github.com/kr/text
|
||||
# github.com/mattn/go-shellwords v1.0.6
|
||||
# github.com/mattn/go-shellwords v1.0.9
|
||||
github.com/mattn/go-shellwords
|
||||
# github.com/matttproud/golang_protobuf_extensions v1.0.1
|
||||
github.com/matttproud/golang_protobuf_extensions/pbutil
|
||||
# github.com/mistifyio/go-zfs v2.1.1+incompatible
|
||||
github.com/mistifyio/go-zfs
|
||||
# github.com/mtrmac/gpgme v0.0.0-20170102180018-b2432428689c
|
||||
# github.com/mtrmac/gpgme v0.1.1
|
||||
github.com/mtrmac/gpgme
|
||||
# github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/go-digest
|
||||
@ -234,13 +237,13 @@ github.com/opencontainers/runc/libcontainer/system
|
||||
github.com/opencontainers/runc/libcontainer/user
|
||||
# github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/runtime-spec/specs-go
|
||||
# github.com/opencontainers/selinux v1.3.0
|
||||
# github.com/opencontainers/selinux v1.3.1
|
||||
github.com/opencontainers/selinux/go-selinux
|
||||
github.com/opencontainers/selinux/go-selinux/label
|
||||
# github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913
|
||||
github.com/ostreedev/ostree-go/pkg/glibobject
|
||||
github.com/ostreedev/ostree-go/pkg/otbuiltin
|
||||
# github.com/pkg/errors v0.9.0
|
||||
# github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/errors
|
||||
# github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/pmezard/go-difflib/difflib
|
||||
@ -287,7 +290,7 @@ github.com/urfave/cli
|
||||
github.com/vbatts/tar-split/archive/tar
|
||||
github.com/vbatts/tar-split/tar/asm
|
||||
github.com/vbatts/tar-split/tar/storage
|
||||
# github.com/vbauerster/mpb/v4 v4.11.1
|
||||
# github.com/vbauerster/mpb/v4 v4.11.2
|
||||
github.com/vbauerster/mpb/v4
|
||||
github.com/vbauerster/mpb/v4/cwriter
|
||||
github.com/vbauerster/mpb/v4/decor
|
||||
|
Loading…
Reference in New Issue
Block a user