mirror of
https://github.com/containers/skopeo.git
synced 2025-07-05 10:56:56 +00:00
Merge pull request #732 from harche/pr_branch
Image encryption/decryption support in skopeo
This commit is contained in:
commit
ce6ec7720e
@ -11,6 +11,9 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
enchelpers "github.com/containers/ocicrypt/helpers"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@ -25,6 +28,8 @@ type copyOptions struct {
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
encryptionKeys cli.StringSlice // Keys needed to encrypt the image
|
||||
decryptionKeys cli.StringSlice // Keys needed to decrypt the image
|
||||
}
|
||||
|
||||
func copyCmd(global *globalOptions) cli.Command {
|
||||
@ -82,6 +87,16 @@ func copyCmd(global *globalOptions) cli.Command {
|
||||
Usage: "`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)",
|
||||
Value: newOptionalStringValue(&opts.format),
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "encryption-key",
|
||||
Usage: "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)",
|
||||
Value: &opts.encryptionKeys,
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "decryption-key",
|
||||
Usage: "*Experimental* key needed to decrypt the image",
|
||||
Value: &opts.decryptionKeys,
|
||||
},
|
||||
}, sharedFlags...), srcFlags...), destFlags...),
|
||||
}
|
||||
}
|
||||
@ -156,6 +171,38 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
}
|
||||
|
||||
if len(opts.encryptionKeys.Value()) > 0 && len(opts.decryptionKeys.Value()) > 0 {
|
||||
return fmt.Errorf("--encryption-key and --decryption-key cannot be specified together")
|
||||
}
|
||||
|
||||
var encLayers *[]int
|
||||
var encConfig *encconfig.EncryptConfig
|
||||
var decConfig *encconfig.DecryptConfig
|
||||
|
||||
if len(opts.encryptionKeys.Value()) > 0 {
|
||||
// encryption
|
||||
encLayers = &[]int{}
|
||||
encryptionKeys := opts.encryptionKeys.Value()
|
||||
ecc, err := enchelpers.CreateCryptoConfig(encryptionKeys, []string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{ecc})
|
||||
encConfig = cc.EncryptConfig
|
||||
}
|
||||
|
||||
if len(opts.decryptionKeys.Value()) > 0 {
|
||||
// decryption
|
||||
decryptionKeys := opts.decryptionKeys.Value()
|
||||
dcc, err := enchelpers.CreateCryptoConfig([]string{}, decryptionKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cc := encconfig.CombineCryptoConfigs([]encconfig.CryptoConfig{dcc})
|
||||
decConfig = cc.DecryptConfig
|
||||
}
|
||||
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
@ -164,6 +211,9 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
ImageListSelection: imageListSelection,
|
||||
OciDecryptConfig: decConfig,
|
||||
OciEncryptLayers: encLayers,
|
||||
OciEncryptConfig: encConfig,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
@ -36,6 +36,10 @@ If the authorization state is not found there, $HOME/.docker/config.json is chec
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--encryption-key** _Key_ a reference prefixed with the encryption protocol to use. The supported protocols are JWE, PGP and PKCS7. For instance, jwe:/path/to/key.pem or pgp:admin@example.com or pkcs7:/path/to/x509-file. This feature is still *experimental*.
|
||||
|
||||
**--decryption-key** _Key_ a reference required to perform decryption of container images. This should point to files which represent keys and/or certificates that can be used for decryption. Decryption will be tried with all keys. This feature is still *experimental*.
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
|
||||
@ -84,6 +88,30 @@ To copy and sign an image:
|
||||
# skopeo copy --sign-by dev@example.com container-storage:example/busybox:streaming docker://example/busybox:gold
|
||||
```
|
||||
|
||||
To encrypt an image:
|
||||
```sh
|
||||
skopeo copy docker://docker.io/library/nginx:latest oci:local_nginx:latest
|
||||
|
||||
openssl genrsa -out private.key 1024
|
||||
openssl rsa -in private.key -pubout > public.key
|
||||
|
||||
skopeo copy --encryption-key jwe:./public.key oci:local_nginx:latest oci:try-encrypt:encrypted
|
||||
```
|
||||
|
||||
To decrypt an image:
|
||||
```sh
|
||||
skopeo copy --decryption-key ./private.key oci:try-encrypt:encryted oci:try-decrypt:decrypted
|
||||
```
|
||||
|
||||
To copy encrypted image without decryption:
|
||||
```sh
|
||||
skopeo copy oci:try-encrypt:encryted oci:try-encrypt-copy:encrypted
|
||||
```
|
||||
|
||||
To decrypt an image that requires more than one key:
|
||||
```sh
|
||||
skopeo copy --decryption-key ./private1.key --decryption-key ./private2.key --decryption-key ./private3.key oci:try-encrypt:encrypted oci:try-decrypt:decrypted
|
||||
```
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
|
5
go.mod
5
go.mod
@ -4,8 +4,9 @@ go 1.12
|
||||
|
||||
require (
|
||||
github.com/containers/buildah v1.11.5
|
||||
github.com/containers/image/v5 v5.0.0
|
||||
github.com/containers/storage v1.13.5
|
||||
github.com/containers/image/v5 v5.0.1-0.20191126085826-502848a1358b
|
||||
github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741
|
||||
github.com/containers/storage v1.14.0
|
||||
github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23
|
||||
github.com/dsnet/compress v0.0.1 // indirect
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127
|
||||
|
43
go.sum
43
go.sum
@ -1,4 +1,5 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg=
|
||||
github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
@ -33,11 +34,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f h1:tSNMc+rJDfmYntojat8lljbt1mgKNpTxUZJsSzJ9Y1s=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0 h1:xjvXQWABwS2uiv3TWgQt5Uth60Gu86LTGZXMJkjc7rY=
|
||||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20180216233310-d8fb8589b0e8/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc h1:TP+534wVlf61smEIq1nwLLAjQVEK2EADoW3CX9AuT+8=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
@ -46,17 +47,20 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=
|
||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containers/buildah v1.11.4 h1:hHh3Tclf5LL1bunlZxYJjwMi5DmFBug8Q0HpuMH8Lo0=
|
||||
github.com/containers/buildah v1.11.4/go.mod h1:Igrk75FAxLnzDaHUbtpWB8pwL+Bv+cnakWMvqAXW2v8=
|
||||
github.com/containers/buildah v1.11.5 h1:bVpkaVlvA7G+1mBDAcX6yf7jNZJ/ZrrAHDt4WCx2i8E=
|
||||
github.com/containers/buildah v1.11.5/go.mod h1:bfNPqLO8GnI0qMPmI6MHSpQNK+a3TH9syYsRg+iqhRw=
|
||||
github.com/containers/image/v5 v5.0.0 h1:arnXgbt1ucsC/ndtSpiQY87rA0UjhF+/xQnPzqdBDn4=
|
||||
github.com/containers/image/v5 v5.0.0/go.mod h1:MgiLzCfIeo8lrHi+4Lb8HP+rh513sm0Mlk6RrhjFOLY=
|
||||
github.com/containers/image/v5 v5.0.1-0.20191126085826-502848a1358b h1:xUXa/0+KWQY1PAGuvfqXh1U18qTRYvHzhiys/BpZG4c=
|
||||
github.com/containers/image/v5 v5.0.1-0.20191126085826-502848a1358b/go.mod h1:NNGElTgKPvARdKeiJIE/IF+ddvHmNwaLPBupsoZI8eI=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741 h1:8tQkOcednLJtUcZgK7sPglscXtxvMOnFOa6wd09VWLM=
|
||||
github.com/containers/ocicrypt v0.0.0-20190930154801-b87a4a69c741/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
github.com/containers/storage v1.13.4/go.mod h1:6D8nK2sU9V7nEmAraINRs88ZEscM5C5DK+8Npp27GeA=
|
||||
github.com/containers/storage v1.13.5 h1:/SUzGeOP2HDijpF7Yur21Ch6WTZC1BNeZF917CWcp5c=
|
||||
github.com/containers/storage v1.13.5/go.mod h1:HELz8Sn+UVbPaUZMI8RvIG9doD4y4z6Gtg4k7xdd2ZY=
|
||||
github.com/containers/storage v1.14.0 h1:LbX6WZaDmkXt4DT4xWIg3YXAWd6oA4K9Fi6/KG1xt84=
|
||||
github.com/containers/storage v1.14.0/go.mod h1:qGPsti/qC1xxX+xcpHfiTMT+8ThVE2Jf83wFHHqkDAY=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
@ -75,13 +79,13 @@ github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BU
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.0.0-20180522102801-da99009bbb11/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce h1:H3csZuxZESJeeEiOxq4YXPNmLFbjl7u2qVBrAAGX/sA=
|
||||
github.com/docker/docker v1.4.2-0.20190927142053-ada3c14355ce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23 h1:oqgGT9O61YAYvI41EBsLePOr+LE6roB0xY4gpkZuFSE=
|
||||
github.com/docker/docker v1.4.2-0.20191101170500-ac7306503d23/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker-credential-helpers v0.6.1 h1:Dq4iIfcM7cNtddhLVWe9h4QDjsi4OER3Z8voPu/I52g=
|
||||
github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.0.0-20180212134524-7beb39f0b969/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
@ -91,8 +95,10 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316 h1:moehPjPiGUaWdwgOl92xRyFHJyaqXDHcCyW9M6nmCK4=
|
||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20190625141545-5a177b73e316/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 h1:UhxFibDNY/bfvqU5CAUmr9zpesgbU6SWc8/B4mflAE4=
|
||||
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
|
||||
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
@ -101,9 +107,10 @@ github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsouza/go-dockerclient v1.5.0/go.mod h1:AqZZK/zFO3phxYxlTsAaeAMSdQ9mgHuhy+bjN034Qds=
|
||||
github.com/fsouza/go-dockerclient v1.6.0 h1:f7j+AX94143JL1H3TiqSMkM4EcLDI0De1qD4GGn3Hig=
|
||||
github.com/fsouza/go-dockerclient v1.6.0/go.mod h1:YWwtNPuL4XTX1SKJQk86cWPmmqwx+4np9qfPbb+znGc=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v0.0.0-20161207003320-04f313413ffd/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
@ -124,17 +131,18 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
@ -161,6 +169,8 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111 h1:NAAiV9ass6VReWFjuxqrMIq12WKlSULI6Gs3PxQghLA=
|
||||
github.com/ishidawataru/sctp v0.0.0-20180918013207-6e2cb1366111/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8=
|
||||
@ -173,8 +183,9 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.8.1 h1:oygt2ychZFHOB6M9gUgajzgKrwRgHbGC77NwA4COVgI=
|
||||
github.com/klauspost/compress v1.8.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
|
||||
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
@ -189,6 +200,8 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lumjjb/image/v5 v5.0.0-20191125184705-a298da5c535d h1:H050B1puFO2G3eZP0is6JjpH7OZf2A+2QmtqpTk4Gd0=
|
||||
github.com/lumjjb/image/v5 v5.0.0-20191125184705-a298da5c535d/go.mod h1:NNGElTgKPvARdKeiJIE/IF+ddvHmNwaLPBupsoZI8eI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
|
||||
@ -218,7 +231,6 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
@ -236,6 +248,8 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
|
||||
github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158 h1:/A6bAdnSZoTQmKml3MdHAnSEPnBAQeigNBl4sxnfaaQ=
|
||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190827142921-dd075602f158/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190618234442-a950415649c7/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.0 h1:O6L965K88AilqnxeYPks/75HLpp4IG+FjeSCI3cVdRg=
|
||||
@ -335,6 +349,7 @@ github.com/xeipuuv/gojsonschema v0.0.0-20190816131739-be0936907f66/go.mod h1:anY
|
||||
github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg=
|
||||
github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
@ -343,6 +358,7 @@ go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ=
|
||||
golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@ -391,6 +407,7 @@ golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fq
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0 h1:xQwXv67TxFo9nC1GJFyab5eq/5B590r6RlnL/G8Sz7w=
|
||||
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@ -404,8 +421,6 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
@ -416,15 +431,19 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
|
||||
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
|
||||
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -1,6 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -534,6 +537,108 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
|
||||
originalImageDir, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(originalImageDir)
|
||||
encryptedImgDir, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(encryptedImgDir)
|
||||
decryptedImgDir, err := ioutil.TempDir("", "copy-3")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(decryptedImgDir)
|
||||
keysDir, err := ioutil.TempDir("", "copy-4")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(keysDir)
|
||||
undecryptedImgDir, err := ioutil.TempDir("", "copy-5")
|
||||
defer os.RemoveAll(undecryptedImgDir)
|
||||
|
||||
// Create RSA key pair
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
c.Assert(err, check.IsNil)
|
||||
publicKey := &privateKey.PublicKey
|
||||
privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey)
|
||||
publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = ioutil.WriteFile(keysDir+"/private.key", privateKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = ioutil.WriteFile(keysDir+"/public.key", publicKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// We can either perform encryption or decryption on the image.
|
||||
// This is why use should not be able to specify both encryption and decryption
|
||||
// during copy at the same time.
|
||||
assertSkopeoFails(c, ".*--encryption-key and --decryption-key cannot be specified together.*",
|
||||
"copy", "--encryption-key", "jwe:"+keysDir+"/public.key", "--decryption-key", keysDir+"/private.key",
|
||||
"oci:"+encryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted")
|
||||
assertSkopeoFails(c, ".*--encryption-key and --decryption-key cannot be specified together.*",
|
||||
"copy", "--decryption-key", keysDir+"/private.key", "--encryption-key", "jwe:"+keysDir+"/public.key",
|
||||
"oci:"+encryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted")
|
||||
|
||||
// Copy a standard busybox image locally
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://busybox", "oci:"+originalImageDir+":latest")
|
||||
|
||||
// Encrypt the image
|
||||
assertSkopeoSucceeds(c, "", "copy", "--encryption-key",
|
||||
"jwe:"+keysDir+"/public.key", "oci:"+originalImageDir+":latest", "oci:"+encryptedImgDir+":encrypted")
|
||||
|
||||
// An attempt to decrypt an encrypted image without a valid private key should fail
|
||||
invalidPrivateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
c.Assert(err, check.IsNil)
|
||||
invalidPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(invalidPrivateKey)
|
||||
err = ioutil.WriteFile(keysDir+"/invalid_private.key", invalidPrivateKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoFails(c, ".*no suitable key unwrapper found or none of the private keys could be used for decryption.*",
|
||||
"copy", "--decryption-key", keysDir+"/invalid_private.key",
|
||||
"oci:"+encryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted")
|
||||
|
||||
// Copy encrypted image without decrypting it
|
||||
assertSkopeoSucceeds(c, "", "copy", "oci:"+encryptedImgDir+":encrypted", "oci:"+undecryptedImgDir+":encrypted")
|
||||
// Original busybox image has gzipped layers. But encrypted busybox layers should
|
||||
// not be of gzip type
|
||||
matchLayerBlobBinaryType(c, undecryptedImgDir+"/blobs/sha256", "application/x-gzip", false)
|
||||
|
||||
// Decrypt the image
|
||||
assertSkopeoSucceeds(c, "", "copy", "--decryption-key", keysDir+"/private.key",
|
||||
"oci:"+undecryptedImgDir+":encrypted", "oci:"+decryptedImgDir+":decrypted")
|
||||
|
||||
// After successful decryption we should find the gzipped layer from the
|
||||
// busybox image
|
||||
matchLayerBlobBinaryType(c, decryptedImgDir+"/blobs/sha256", "application/x-gzip", true)
|
||||
}
|
||||
|
||||
func matchLayerBlobBinaryType(c *check.C, ociImageDirPath string, contentType string, shouldMatch bool) {
|
||||
files, err := ioutil.ReadDir(ociImageDirPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
blobFound := false
|
||||
for _, f := range files {
|
||||
fileContent, err := os.Open(ociImageDirPath + "/" + f.Name())
|
||||
c.Assert(err, check.IsNil)
|
||||
layerContentType, err := getFileContentType(fileContent)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
if layerContentType == contentType {
|
||||
blobFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
c.Assert(blobFound, check.Equals, shouldMatch)
|
||||
|
||||
}
|
||||
|
||||
func getFileContentType(out *os.File) (string, error) {
|
||||
buffer := make([]byte, 512)
|
||||
_, err := out.Read(buffer)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
contentType := http.DetectContentType(buffer)
|
||||
|
||||
return contentType, nil
|
||||
}
|
||||
|
||||
// Check whether dir: images in dir1 and dir2 are equal, ignoring schema1 signatures.
|
||||
func assertDirImagesAreEqual(c *check.C, dir1, dir2 string) {
|
||||
// The manifests may have different JWS signatures; so, compare the manifests by digests, which
|
||||
|
90
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
Normal file
90
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// G is an alias for GetLogger.
|
||||
//
|
||||
// We may want to define this locally to a package to get package tagged log
|
||||
// messages.
|
||||
G = GetLogger
|
||||
|
||||
// L is an alias for the standard logger.
|
||||
L = logrus.NewEntry(logrus.StandardLogger())
|
||||
)
|
||||
|
||||
type (
|
||||
loggerKey struct{}
|
||||
)
|
||||
|
||||
// TraceLevel is the log level for tracing. Trace level is lower than debug level,
|
||||
// and is usually used to trace detailed behavior of the program.
|
||||
const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1))
|
||||
|
||||
// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
|
||||
// ensure the formatted time is always the same number of characters.
|
||||
const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
// It supports trace level.
|
||||
func ParseLevel(lvl string) (logrus.Level, error) {
|
||||
if lvl == "trace" {
|
||||
return TraceLevel, nil
|
||||
}
|
||||
return logrus.ParseLevel(lvl)
|
||||
}
|
||||
|
||||
// WithLogger returns a new context with the provided logger. Use in
|
||||
// combination with logger.WithField(s) for great effect.
|
||||
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
|
||||
return context.WithValue(ctx, loggerKey{}, logger)
|
||||
}
|
||||
|
||||
// GetLogger retrieves the current logger from the context. If no logger is
|
||||
// available, the default logger is returned.
|
||||
func GetLogger(ctx context.Context) *logrus.Entry {
|
||||
logger := ctx.Value(loggerKey{})
|
||||
|
||||
if logger == nil {
|
||||
return L
|
||||
}
|
||||
|
||||
return logger.(*logrus.Entry)
|
||||
}
|
||||
|
||||
// Trace logs a message at level Trace with the log entry passed-in.
|
||||
func Trace(e *logrus.Entry, args ...interface{}) {
|
||||
level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
|
||||
if level >= TraceLevel {
|
||||
e.Debug(args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Tracef logs a message at level Trace with the log entry passed-in.
|
||||
func Tracef(e *logrus.Entry, format string, args ...interface{}) {
|
||||
level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level)))
|
||||
if level >= TraceLevel {
|
||||
e.Debugf(format, args...)
|
||||
}
|
||||
}
|
229
vendor/github.com/containerd/containerd/platforms/compare.go
generated
vendored
Normal file
229
vendor/github.com/containerd/containerd/platforms/compare.go
generated
vendored
Normal file
@ -0,0 +1,229 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
import specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
// MatchComparer is able to match and compare platforms to
|
||||
// filter and sort platforms.
|
||||
type MatchComparer interface {
|
||||
Matcher
|
||||
|
||||
Less(specs.Platform, specs.Platform) bool
|
||||
}
|
||||
|
||||
// Only returns a match comparer for a single platform
|
||||
// using default resolution logic for the platform.
|
||||
//
|
||||
// For ARMv8, will also match ARMv7, ARMv6 and ARMv5 (for 32bit runtimes)
|
||||
// For ARMv7, will also match ARMv6 and ARMv5
|
||||
// For ARMv6, will also match ARMv5
|
||||
func Only(platform specs.Platform) MatchComparer {
|
||||
platform = Normalize(platform)
|
||||
if platform.Architecture == "arm" {
|
||||
if platform.Variant == "v8" {
|
||||
return orderedPlatformComparer{
|
||||
matchers: []Matcher{
|
||||
&matcher{
|
||||
Platform: platform,
|
||||
},
|
||||
&matcher{
|
||||
Platform: specs.Platform{
|
||||
Architecture: platform.Architecture,
|
||||
OS: platform.OS,
|
||||
OSVersion: platform.OSVersion,
|
||||
OSFeatures: platform.OSFeatures,
|
||||
Variant: "v7",
|
||||
},
|
||||
},
|
||||
&matcher{
|
||||
Platform: specs.Platform{
|
||||
Architecture: platform.Architecture,
|
||||
OS: platform.OS,
|
||||
OSVersion: platform.OSVersion,
|
||||
OSFeatures: platform.OSFeatures,
|
||||
Variant: "v6",
|
||||
},
|
||||
},
|
||||
&matcher{
|
||||
Platform: specs.Platform{
|
||||
Architecture: platform.Architecture,
|
||||
OS: platform.OS,
|
||||
OSVersion: platform.OSVersion,
|
||||
OSFeatures: platform.OSFeatures,
|
||||
Variant: "v5",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
if platform.Variant == "v7" {
|
||||
return orderedPlatformComparer{
|
||||
matchers: []Matcher{
|
||||
&matcher{
|
||||
Platform: platform,
|
||||
},
|
||||
&matcher{
|
||||
Platform: specs.Platform{
|
||||
Architecture: platform.Architecture,
|
||||
OS: platform.OS,
|
||||
OSVersion: platform.OSVersion,
|
||||
OSFeatures: platform.OSFeatures,
|
||||
Variant: "v6",
|
||||
},
|
||||
},
|
||||
&matcher{
|
||||
Platform: specs.Platform{
|
||||
Architecture: platform.Architecture,
|
||||
OS: platform.OS,
|
||||
OSVersion: platform.OSVersion,
|
||||
OSFeatures: platform.OSFeatures,
|
||||
Variant: "v5",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
if platform.Variant == "v6" {
|
||||
return orderedPlatformComparer{
|
||||
matchers: []Matcher{
|
||||
&matcher{
|
||||
Platform: platform,
|
||||
},
|
||||
&matcher{
|
||||
Platform: specs.Platform{
|
||||
Architecture: platform.Architecture,
|
||||
OS: platform.OS,
|
||||
OSVersion: platform.OSVersion,
|
||||
OSFeatures: platform.OSFeatures,
|
||||
Variant: "v5",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return singlePlatformComparer{
|
||||
Matcher: &matcher{
|
||||
Platform: platform,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Ordered returns a platform MatchComparer which matches any of the platforms
|
||||
// but orders them in order they are provided.
|
||||
func Ordered(platforms ...specs.Platform) MatchComparer {
|
||||
matchers := make([]Matcher, len(platforms))
|
||||
for i := range platforms {
|
||||
matchers[i] = NewMatcher(platforms[i])
|
||||
}
|
||||
return orderedPlatformComparer{
|
||||
matchers: matchers,
|
||||
}
|
||||
}
|
||||
|
||||
// Any returns a platform MatchComparer which matches any of the platforms
|
||||
// with no preference for ordering.
|
||||
func Any(platforms ...specs.Platform) MatchComparer {
|
||||
matchers := make([]Matcher, len(platforms))
|
||||
for i := range platforms {
|
||||
matchers[i] = NewMatcher(platforms[i])
|
||||
}
|
||||
return anyPlatformComparer{
|
||||
matchers: matchers,
|
||||
}
|
||||
}
|
||||
|
||||
// All is a platform MatchComparer which matches all platforms
|
||||
// with preference for ordering.
|
||||
var All MatchComparer = allPlatformComparer{}
|
||||
|
||||
type singlePlatformComparer struct {
|
||||
Matcher
|
||||
}
|
||||
|
||||
func (c singlePlatformComparer) Less(p1, p2 specs.Platform) bool {
|
||||
return c.Match(p1) && !c.Match(p2)
|
||||
}
|
||||
|
||||
type orderedPlatformComparer struct {
|
||||
matchers []Matcher
|
||||
}
|
||||
|
||||
func (c orderedPlatformComparer) Match(platform specs.Platform) bool {
|
||||
for _, m := range c.matchers {
|
||||
if m.Match(platform) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool {
|
||||
for _, m := range c.matchers {
|
||||
p1m := m.Match(p1)
|
||||
p2m := m.Match(p2)
|
||||
if p1m && !p2m {
|
||||
return true
|
||||
}
|
||||
if p1m || p2m {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type anyPlatformComparer struct {
|
||||
matchers []Matcher
|
||||
}
|
||||
|
||||
func (c anyPlatformComparer) Match(platform specs.Platform) bool {
|
||||
for _, m := range c.matchers {
|
||||
if m.Match(platform) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool {
|
||||
var p1m, p2m bool
|
||||
for _, m := range c.matchers {
|
||||
if !p1m && m.Match(p1) {
|
||||
p1m = true
|
||||
}
|
||||
if !p2m && m.Match(p2) {
|
||||
p2m = true
|
||||
}
|
||||
if p1m && p2m {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// If one matches, and the other does, sort match first
|
||||
return p1m && !p2m
|
||||
}
|
||||
|
||||
type allPlatformComparer struct{}
|
||||
|
||||
func (allPlatformComparer) Match(specs.Platform) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool {
|
||||
return false
|
||||
}
|
117
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
Normal file
117
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
Normal file
@ -0,0 +1,117 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Present the ARM instruction set architecture, eg: v7, v8
|
||||
var cpuVariant string
|
||||
|
||||
func init() {
|
||||
if isArmArch(runtime.GOARCH) {
|
||||
cpuVariant = getCPUVariant()
|
||||
} else {
|
||||
cpuVariant = ""
|
||||
}
|
||||
}
|
||||
|
||||
// For Linux, the kernel has already detected the ABI, ISA and Features.
|
||||
// So we don't need to access the ARM registers to detect platform information
|
||||
// by ourselves. We can just parse these information from /proc/cpuinfo
|
||||
func getCPUInfo(pattern string) (info string, err error) {
|
||||
if !isLinuxOS(runtime.GOOS) {
|
||||
return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
cpuinfo, err := os.Open("/proc/cpuinfo")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer cpuinfo.Close()
|
||||
|
||||
// Start to Parse the Cpuinfo line by line. For SMP SoC, we parse
|
||||
// the first core is enough.
|
||||
scanner := bufio.NewScanner(cpuinfo)
|
||||
for scanner.Scan() {
|
||||
newline := scanner.Text()
|
||||
list := strings.Split(newline, ":")
|
||||
|
||||
if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) {
|
||||
return strings.TrimSpace(list[1]), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether the scanner encountered errors
|
||||
err = scanner.Err()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern)
|
||||
}
|
||||
|
||||
func getCPUVariant() string {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows only supports v7 for ARM32 and v8 for ARM64 and so we can use
|
||||
// runtime.GOARCH to determine the variants
|
||||
var variant string
|
||||
switch runtime.GOARCH {
|
||||
case "arm64":
|
||||
variant = "v8"
|
||||
case "arm":
|
||||
variant = "v7"
|
||||
default:
|
||||
variant = "unknown"
|
||||
}
|
||||
|
||||
return variant
|
||||
}
|
||||
|
||||
variant, err := getCPUInfo("Cpu architecture")
|
||||
if err != nil {
|
||||
log.L.WithError(err).Error("failure getting variant")
|
||||
return ""
|
||||
}
|
||||
|
||||
switch variant {
|
||||
case "8", "AArch64":
|
||||
variant = "v8"
|
||||
case "7", "7M", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
|
||||
variant = "v7"
|
||||
case "6", "6TEJ":
|
||||
variant = "v6"
|
||||
case "5", "5T", "5TE", "5TEJ":
|
||||
variant = "v5"
|
||||
case "4", "4T":
|
||||
variant = "v4"
|
||||
case "3":
|
||||
variant = "v3"
|
||||
default:
|
||||
variant = "unknown"
|
||||
}
|
||||
|
||||
return variant
|
||||
}
|
114
vendor/github.com/containerd/containerd/platforms/database.go
generated
vendored
Normal file
114
vendor/github.com/containerd/containerd/platforms/database.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// isLinuxOS returns true if the operating system is Linux.
|
||||
//
|
||||
// The OS value should be normalized before calling this function.
|
||||
func isLinuxOS(os string) bool {
|
||||
return os == "linux"
|
||||
}
|
||||
|
||||
// These function are generated from https://golang.org/src/go/build/syslist.go.
|
||||
//
|
||||
// We use switch statements because they are slightly faster than map lookups
|
||||
// and use a little less memory.
|
||||
|
||||
// isKnownOS returns true if we know about the operating system.
|
||||
//
|
||||
// The OS value should be normalized before calling this function.
|
||||
func isKnownOS(os string) bool {
|
||||
switch os {
|
||||
case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isArmArch returns true if the architecture is ARM.
|
||||
//
|
||||
// The arch value should be normalized before being passed to this function.
|
||||
func isArmArch(arch string) bool {
|
||||
switch arch {
|
||||
case "arm", "arm64":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isKnownArch returns true if we know about the architecture.
|
||||
//
|
||||
// The arch value should be normalized before being passed to this function.
|
||||
func isKnownArch(arch string) bool {
|
||||
switch arch {
|
||||
case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func normalizeOS(os string) string {
|
||||
if os == "" {
|
||||
return runtime.GOOS
|
||||
}
|
||||
os = strings.ToLower(os)
|
||||
|
||||
switch os {
|
||||
case "macos":
|
||||
os = "darwin"
|
||||
}
|
||||
return os
|
||||
}
|
||||
|
||||
// normalizeArch normalizes the architecture.
|
||||
func normalizeArch(arch, variant string) (string, string) {
|
||||
arch, variant = strings.ToLower(arch), strings.ToLower(variant)
|
||||
switch arch {
|
||||
case "i386":
|
||||
arch = "386"
|
||||
variant = ""
|
||||
case "x86_64", "x86-64":
|
||||
arch = "amd64"
|
||||
variant = ""
|
||||
case "aarch64", "arm64":
|
||||
arch = "arm64"
|
||||
switch variant {
|
||||
case "8", "v8":
|
||||
variant = ""
|
||||
}
|
||||
case "armhf":
|
||||
arch = "arm"
|
||||
variant = "v7"
|
||||
case "armel":
|
||||
arch = "arm"
|
||||
variant = "v6"
|
||||
case "arm":
|
||||
switch variant {
|
||||
case "", "7":
|
||||
variant = "v7"
|
||||
case "5", "6", "8":
|
||||
variant = "v" + variant
|
||||
}
|
||||
}
|
||||
|
||||
return arch, variant
|
||||
}
|
38
vendor/github.com/containerd/containerd/platforms/defaults.go
generated
vendored
Normal file
38
vendor/github.com/containerd/containerd/platforms/defaults.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// DefaultString returns the default string specifier for the platform.
|
||||
func DefaultString() string {
|
||||
return Format(DefaultSpec())
|
||||
}
|
||||
|
||||
// DefaultSpec returns the current platform's default platform specification.
|
||||
func DefaultSpec() specs.Platform {
|
||||
return specs.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
// The Variant field will be empty if arch != ARM.
|
||||
Variant: cpuVariant,
|
||||
}
|
||||
}
|
24
vendor/github.com/containerd/containerd/platforms/defaults_unix.go
generated
vendored
Normal file
24
vendor/github.com/containerd/containerd/platforms/defaults_unix.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
// Default returns the default matcher for the platform.
|
||||
func Default() MatchComparer {
|
||||
return Only(DefaultSpec())
|
||||
}
|
31
vendor/github.com/containerd/containerd/platforms/defaults_windows.go
generated
vendored
Normal file
31
vendor/github.com/containerd/containerd/platforms/defaults_windows.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Default returns the default matcher for the platform.
|
||||
func Default() MatchComparer {
|
||||
return Ordered(DefaultSpec(), specs.Platform{
|
||||
OS: "linux",
|
||||
Architecture: "amd64",
|
||||
})
|
||||
}
|
279
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
Normal file
279
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
Normal file
@ -0,0 +1,279 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package platforms provides a toolkit for normalizing, matching and
|
||||
// specifying container platforms.
|
||||
//
|
||||
// Centered around OCI platform specifications, we define a string-based
|
||||
// specifier syntax that can be used for user input. With a specifier, users
|
||||
// only need to specify the parts of the platform that are relevant to their
|
||||
// context, providing an operating system or architecture or both.
|
||||
//
|
||||
// How do I use this package?
|
||||
//
|
||||
// The vast majority of use cases should simply use the match function with
|
||||
// user input. The first step is to parse a specifier into a matcher:
|
||||
//
|
||||
// m, err := Parse("linux")
|
||||
// if err != nil { ... }
|
||||
//
|
||||
// Once you have a matcher, use it to match against the platform declared by a
|
||||
// component, typically from an image or runtime. Since extracting an images
|
||||
// platform is a little more involved, we'll use an example against the
|
||||
// platform default:
|
||||
//
|
||||
// if ok := m.Match(Default()); !ok { /* doesn't match */ }
|
||||
//
|
||||
// This can be composed in loops for resolving runtimes or used as a filter for
|
||||
// fetch and select images.
|
||||
//
|
||||
// More details of the specifier syntax and platform spec follow.
|
||||
//
|
||||
// Declaring Platform Support
|
||||
//
|
||||
// Components that have strict platform requirements should use the OCI
|
||||
// platform specification to declare their support. Typically, this will be
|
||||
// images and runtimes that should make these declaring which platform they
|
||||
// support specifically. This looks roughly as follows:
|
||||
//
|
||||
// type Platform struct {
|
||||
// Architecture string
|
||||
// OS string
|
||||
// Variant string
|
||||
// }
|
||||
//
|
||||
// Most images and runtimes should at least set Architecture and OS, according
|
||||
// to their GOARCH and GOOS values, respectively (follow the OCI image
|
||||
// specification when in doubt). ARM should set variant under certain
|
||||
// discussions, which are outlined below.
|
||||
//
|
||||
// Platform Specifiers
|
||||
//
|
||||
// While the OCI platform specifications provide a tool for components to
|
||||
// specify structured information, user input typically doesn't need the full
|
||||
// context and much can be inferred. To solve this problem, we introduced
|
||||
// "specifiers". A specifier has the format
|
||||
// `<os>|<arch>|<os>/<arch>[/<variant>]`. The user can provide either the
|
||||
// operating system or the architecture or both.
|
||||
//
|
||||
// An example of a common specifier is `linux/amd64`. If the host has a default
|
||||
// of runtime that matches this, the user can simply provide the component that
|
||||
// matters. For example, if a image provides amd64 and arm64 support, the
|
||||
// operating system, `linux` can be inferred, so they only have to provide
|
||||
// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
|
||||
// where the architecture may be known but a runtime may support images from
|
||||
// different operating systems.
|
||||
//
|
||||
// Normalization
|
||||
//
|
||||
// Because not all users are familiar with the way the Go runtime represents
|
||||
// platforms, several normalizations have been provided to make this package
|
||||
// easier to user.
|
||||
//
|
||||
// The following are performed for architectures:
|
||||
//
|
||||
// Value Normalized
|
||||
// aarch64 arm64
|
||||
// armhf arm
|
||||
// armel arm/v6
|
||||
// i386 386
|
||||
// x86_64 amd64
|
||||
// x86-64 amd64
|
||||
//
|
||||
// We also normalize the operating system `macos` to `darwin`.
|
||||
//
|
||||
// ARM Support
|
||||
//
|
||||
// To qualify ARM architecture, the Variant field is used to qualify the arm
|
||||
// version. The most common arm version, v7, is represented without the variant
|
||||
// unless it is explicitly provided. This is treated as equivalent to armhf. A
|
||||
// previous architecture, armel, will be normalized to arm/v6.
|
||||
//
|
||||
// While these normalizations are provided, their support on arm platforms has
|
||||
// not yet been fully implemented and tested.
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
|
||||
)
|
||||
|
||||
// Matcher matches platforms specifications, provided by an image or runtime.
|
||||
type Matcher interface {
|
||||
Match(platform specs.Platform) bool
|
||||
}
|
||||
|
||||
// NewMatcher returns a simple matcher based on the provided platform
|
||||
// specification. The returned matcher only looks for equality based on os,
|
||||
// architecture and variant.
|
||||
//
|
||||
// One may implement their own matcher if this doesn't provide the required
|
||||
// functionality.
|
||||
//
|
||||
// Applications should opt to use `Match` over directly parsing specifiers.
|
||||
func NewMatcher(platform specs.Platform) Matcher {
|
||||
return &matcher{
|
||||
Platform: Normalize(platform),
|
||||
}
|
||||
}
|
||||
|
||||
type matcher struct {
|
||||
specs.Platform
|
||||
}
|
||||
|
||||
func (m *matcher) Match(platform specs.Platform) bool {
|
||||
normalized := Normalize(platform)
|
||||
return m.OS == normalized.OS &&
|
||||
m.Architecture == normalized.Architecture &&
|
||||
m.Variant == normalized.Variant
|
||||
}
|
||||
|
||||
func (m *matcher) String() string {
|
||||
return Format(m.Platform)
|
||||
}
|
||||
|
||||
// Parse parses the platform specifier syntax into a platform declaration.
|
||||
//
|
||||
// Platform specifiers are in the format `<os>|<arch>|<os>/<arch>[/<variant>]`.
|
||||
// The minimum required information for a platform specifier is the operating
|
||||
// system or architecture. If there is only a single string (no slashes), the
|
||||
// value will be matched against the known set of operating systems, then fall
|
||||
// back to the known set of architectures. The missing component will be
|
||||
// inferred based on the local environment.
|
||||
func Parse(specifier string) (specs.Platform, error) {
|
||||
if strings.Contains(specifier, "*") {
|
||||
// TODO(stevvooe): need to work out exact wildcard handling
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
|
||||
}
|
||||
|
||||
parts := strings.Split(specifier, "/")
|
||||
|
||||
for _, part := range parts {
|
||||
if !specifierRe.MatchString(part) {
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
|
||||
}
|
||||
}
|
||||
|
||||
var p specs.Platform
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
// in this case, we will test that the value might be an OS, then look
|
||||
// it up. If it is not known, we'll treat it as an architecture. Since
|
||||
// we have very little information about the platform here, we are
|
||||
// going to be a little more strict if we don't know about the argument
|
||||
// value.
|
||||
p.OS = normalizeOS(parts[0])
|
||||
if isKnownOS(p.OS) {
|
||||
// picks a default architecture
|
||||
p.Architecture = runtime.GOARCH
|
||||
if p.Architecture == "arm" {
|
||||
// TODO(stevvooe): Resolve arm variant, if not v6 (default)
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented")
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
p.Architecture, p.Variant = normalizeArch(parts[0], "")
|
||||
if p.Architecture == "arm" && p.Variant == "v7" {
|
||||
p.Variant = ""
|
||||
}
|
||||
if isKnownArch(p.Architecture) {
|
||||
p.OS = runtime.GOOS
|
||||
return p, nil
|
||||
}
|
||||
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
|
||||
case 2:
|
||||
// In this case, we treat as a regular os/arch pair. We don't care
|
||||
// about whether or not we know of the platform.
|
||||
p.OS = normalizeOS(parts[0])
|
||||
p.Architecture, p.Variant = normalizeArch(parts[1], "")
|
||||
if p.Architecture == "arm" && p.Variant == "v7" {
|
||||
p.Variant = ""
|
||||
}
|
||||
|
||||
return p, nil
|
||||
case 3:
|
||||
// we have a fully specified variant, this is rare
|
||||
p.OS = normalizeOS(parts[0])
|
||||
p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
|
||||
if p.Architecture == "arm64" && p.Variant == "" {
|
||||
p.Variant = "v8"
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
|
||||
}
|
||||
|
||||
// MustParse is like Parses but panics if the specifier cannot be parsed.
|
||||
// Simplifies initialization of global variables.
|
||||
func MustParse(specifier string) specs.Platform {
|
||||
p, err := Parse(specifier)
|
||||
if err != nil {
|
||||
panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error())
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Format returns a string specifier from the provided platform specification.
|
||||
func Format(platform specs.Platform) string {
|
||||
if platform.OS == "" {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant)
|
||||
}
|
||||
|
||||
func joinNotEmpty(s ...string) string {
|
||||
var ss []string
|
||||
for _, s := range s {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ss = append(ss, s)
|
||||
}
|
||||
|
||||
return strings.Join(ss, "/")
|
||||
}
|
||||
|
||||
// Normalize validates and translate the platform to the canonical value.
|
||||
//
|
||||
// For example, if "Aarch64" is encountered, we change it to "arm64" or if
|
||||
// "x86_64" is encountered, it becomes "amd64".
|
||||
func Normalize(platform specs.Platform) specs.Platform {
|
||||
platform.OS = normalizeOS(platform.OS)
|
||||
platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
|
||||
|
||||
// these fields are deprecated, remove them
|
||||
platform.OSFeatures = nil
|
||||
platform.OSVersion = ""
|
||||
|
||||
return platform
|
||||
}
|
221
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
221
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@ -21,6 +21,8 @@ import (
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/ocicrypt"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
@ -39,9 +41,14 @@ type digestingReader struct {
|
||||
validationSucceeded bool
|
||||
}
|
||||
|
||||
// maxParallelDownloads is used to limit the maxmimum number of parallel
|
||||
// downloads. Let's follow Firefox by limiting it to 6.
|
||||
var maxParallelDownloads = 6
|
||||
var (
|
||||
// ErrDecryptParamsMissing is returned if there is missing decryption parameters
|
||||
ErrDecryptParamsMissing = errors.New("Necessary DecryptParameters not present")
|
||||
|
||||
// maxParallelDownloads is used to limit the maxmimum number of parallel
|
||||
// downloads. Let's follow Firefox by limiting it to 6.
|
||||
maxParallelDownloads = 6
|
||||
)
|
||||
|
||||
// compressionBufferSize is the buffer size used to compress a blob
|
||||
var compressionBufferSize = 1048576
|
||||
@ -50,6 +57,7 @@ var compressionBufferSize = 1048576
|
||||
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
|
||||
// (neither is set if EOF is never reached).
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
var digester digest.Digester
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
}
|
||||
@ -57,9 +65,11 @@ func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digest
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
|
||||
}
|
||||
digester = digestAlgorithm.Digester()
|
||||
|
||||
return &digestingReader{
|
||||
source: source,
|
||||
digester: digestAlgorithm.Digester(),
|
||||
digester: digester,
|
||||
expectedDigest: expectedDigest,
|
||||
validationFailed: false,
|
||||
}, nil
|
||||
@ -99,6 +109,8 @@ type copier struct {
|
||||
copyInParallel bool
|
||||
compressionFormat compression.Algorithm
|
||||
compressionLevel *int
|
||||
ociDecryptConfig *encconfig.DecryptConfig
|
||||
ociEncryptConfig *encconfig.EncryptConfig
|
||||
}
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
@ -109,6 +121,9 @@ type imageCopier struct {
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
canSubstituteBlobs bool
|
||||
ociDecryptConfig *encconfig.DecryptConfig
|
||||
ociEncryptConfig *encconfig.EncryptConfig
|
||||
ociEncryptLayers *[]int
|
||||
}
|
||||
|
||||
const (
|
||||
@ -155,6 +170,20 @@ type Options struct {
|
||||
ForceManifestMIMEType string
|
||||
ImageListSelection ImageListSelection // set to either CopySystemImage (the default), CopyAllImages, or CopySpecificImages to control which instances we copy when the source reference is a list; ignored if the source reference is not a list
|
||||
Instances []digest.Digest // if ImageListSelection is CopySpecificImages, copy only these instances and the list itself
|
||||
// If OciEncryptConfig is non-nil, it indicates that an image should be encrypted.
|
||||
// The encryption options is derived from the construction of EncryptConfig object.
|
||||
// Note: During initial encryption process of a layer, the resultant digest is not known
|
||||
// during creation, so newDigestingReader has to be set with validateDigest = false
|
||||
OciEncryptConfig *encconfig.EncryptConfig
|
||||
// OciEncryptLayers represents the list of layers to encrypt.
|
||||
// If nil, don't encrypt any layers.
|
||||
// If non-nil and len==0, denotes encrypt all layers.
|
||||
// integers in the slice represent 0-indexed layer indices, with support for negative
|
||||
// indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer.
|
||||
OciEncryptLayers *[]int
|
||||
// OciDecryptConfig contains the config that can be used to decrypt an image if it is
|
||||
// encrypted if non-nil. If nil, it does not attempt to decrypt an image.
|
||||
OciDecryptConfig *encconfig.DecryptConfig
|
||||
}
|
||||
|
||||
// validateImageListSelection returns an error if the passed-in value is not one that we recognize as a valid ImageListSelection value
|
||||
@ -493,6 +522,15 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
return nil, "", "", errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
}
|
||||
|
||||
// TODO: Remove src.SupportsEncryption call and interface once copyUpdatedConfigAndManifest does not depend on source Image manifest type
|
||||
// Currently, the way copyUpdatedConfigAndManifest updates the manifest is to apply updates to the source manifest and call PutManifest
|
||||
// of the modified source manifest. The implication is that schemas like docker2 cannot be encrypted even though the destination
|
||||
// supports encryption because docker2 struct does not have annotations, which are required.
|
||||
// Reference to issue: https://github.com/containers/image/issues/746
|
||||
if options.OciEncryptLayers != nil && !src.SupportsEncryption(ctx) {
|
||||
return nil, "", "", errors.Errorf("Encryption request but not supported by source transport %s", src.Reference().Transport().Name())
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
// expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
|
||||
// one item from a manifest list that matches it, accept that as a match.
|
||||
@ -524,7 +562,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
@ -552,6 +590,9 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
|
||||
ociDecryptConfig: options.OciDecryptConfig,
|
||||
ociEncryptConfig: options.OciEncryptConfig,
|
||||
ociEncryptLayers: options.OciEncryptLayers,
|
||||
}
|
||||
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
|
||||
@ -565,15 +606,19 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
destRequiresOciEncryption := (isEncrypted(src) && ic.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
|
||||
|
||||
// We compute preferredManifestMIMEType only to show it in error messages.
|
||||
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType)
|
||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType, destRequiresOciEncryption)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
||||
// If encrypted and decryption keys provided, we should try to decrypt
|
||||
ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.ociDecryptConfig != nil) || ic.ociEncryptConfig != nil
|
||||
|
||||
if err := ic.copyLayers(ctx); err != nil {
|
||||
return nil, "", "", err
|
||||
@ -651,21 +696,28 @@ func (c *copier) Printf(format string, a ...interface{}) {
|
||||
fmt.Fprintf(c.reportWriter, format, a...)
|
||||
}
|
||||
|
||||
func checkImageDestinationForCurrentRuntimeOS(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
||||
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
|
||||
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
wantedOS := runtime.GOOS
|
||||
if sys != nil && sys.OSChoice != "" {
|
||||
wantedOS = sys.OSChoice
|
||||
}
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error parsing image configuration")
|
||||
}
|
||||
osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS)
|
||||
if wantedOS == "windows" && c.OS == "linux" {
|
||||
return osErr
|
||||
} else if wantedOS != "windows" && c.OS == "windows" {
|
||||
return osErr
|
||||
|
||||
wantedOS := runtime.GOOS
|
||||
if sys != nil && sys.OSChoice != "" {
|
||||
wantedOS = sys.OSChoice
|
||||
}
|
||||
if wantedOS != c.OS {
|
||||
return fmt.Errorf("Image operating system mismatch: image uses %q, expecting %q", c.OS, wantedOS)
|
||||
}
|
||||
|
||||
wantedArch := runtime.GOARCH
|
||||
if sys != nil && sys.ArchitectureChoice != "" {
|
||||
wantedArch = sys.ArchitectureChoice
|
||||
}
|
||||
if wantedArch != c.Architecture {
|
||||
return fmt.Errorf("Image architecture mismatch: image uses %q, expecting %q", c.Architecture, wantedArch)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -709,6 +761,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
srcInfosUpdated := false
|
||||
// If we only need to check authorization, no updates required.
|
||||
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
|
||||
if !ic.canModifyManifest {
|
||||
return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden")
|
||||
@ -737,7 +790,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
}
|
||||
|
||||
data := make([]copyLayerData, numLayers)
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, pool *mpb.Progress) {
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress) {
|
||||
defer copySemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
@ -752,18 +805,36 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
}
|
||||
} else {
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, pool)
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool)
|
||||
}
|
||||
data[index] = cld
|
||||
}
|
||||
|
||||
// Create layer Encryption map
|
||||
encLayerBitmap := map[int]bool{}
|
||||
var encryptAll bool
|
||||
if ic.ociEncryptLayers != nil {
|
||||
encryptAll = len(*ic.ociEncryptLayers) == 0
|
||||
totalLayers := len(srcInfos)
|
||||
for _, l := range *ic.ociEncryptLayers {
|
||||
// if layer is negative, it is reverse indexed.
|
||||
encLayerBitmap[(totalLayers+l)%totalLayers] = true
|
||||
}
|
||||
|
||||
if encryptAll {
|
||||
for i := 0; i < len(srcInfos); i++ {
|
||||
encLayerBitmap[i] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func() { // A scope for defer
|
||||
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
copySemaphore.Acquire(ctx, 1)
|
||||
go copyLayerHelper(i, srcLayer, progressPool)
|
||||
go copyLayerHelper(i, srcLayer, encLayerBitmap[i], progressPool)
|
||||
}
|
||||
|
||||
// Wait for all layers to be copied
|
||||
@ -919,7 +990,7 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
bar := c.createProgressBar(progressPool, srcInfo, "config", "done")
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@ -945,9 +1016,10 @@ type diffIDResult struct {
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress) (types.BlobInfo, digest.Digest, error) {
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
// Diffs are needed if we are encrypting an image or trying to decrypt an image
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.ociDecryptConfig != nil)
|
||||
|
||||
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
||||
if !diffIDIsNeeded {
|
||||
@ -972,7 +1044,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, po
|
||||
|
||||
bar := ic.c.createProgressBar(pool, srcInfo, "blob", "done")
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, Annotations: srcInfo.Annotations}, diffIDIsNeeded, bar)
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
@ -1003,7 +1075,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, po
|
||||
// perhaps compressing the stream if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
@ -1027,7 +1099,10 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
|
||||
return pipeWriter
|
||||
}
|
||||
}
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success
|
||||
ic.c.ociDecryptConfig = ic.ociDecryptConfig
|
||||
ic.c.ociEncryptConfig = ic.ociEncryptConfig
|
||||
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, toEncrypt, bar) // Sets err to nil on success
|
||||
return blobInfo, diffIDChan, err
|
||||
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
|
||||
}
|
||||
@ -1064,7 +1139,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||
canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
||||
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
||||
// The copying happens through a pipeline of connected io.Readers.
|
||||
// === Input: srcStream
|
||||
|
||||
@ -1078,7 +1153,29 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
|
||||
}
|
||||
|
||||
var destStream io.Reader = digestingReader
|
||||
var decrypted bool
|
||||
if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
|
||||
newDesc := imgspecv1.Descriptor{
|
||||
Annotations: srcInfo.Annotations,
|
||||
}
|
||||
|
||||
var d digest.Digest
|
||||
destStream, d, err = ocicrypt.DecryptLayer(c.ociDecryptConfig, destStream, newDesc, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error decrypting layer %s", srcInfo.Digest)
|
||||
}
|
||||
|
||||
srcInfo.Digest = d
|
||||
srcInfo.Size = -1
|
||||
for k := range srcInfo.Annotations {
|
||||
if strings.HasPrefix(k, "org.opencontainers.image.enc") {
|
||||
delete(srcInfo.Annotations, k)
|
||||
}
|
||||
}
|
||||
decrypted = true
|
||||
}
|
||||
|
||||
// === Detect compression of the input stream.
|
||||
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
|
||||
@ -1101,7 +1198,12 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
// === Deal with layer compression/decompression if necessary
|
||||
var inputInfo types.BlobInfo
|
||||
var compressionOperation types.LayerCompression
|
||||
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
|
||||
if canModifyBlob && isOciEncrypted(srcInfo.MediaType) {
|
||||
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
|
||||
logrus.Debugf("Using original blob without modification for encrypted blob")
|
||||
compressionOperation = types.PreserveOriginal
|
||||
inputInfo = srcInfo
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
|
||||
logrus.Debugf("Compressing blob on the fly")
|
||||
compressionOperation = types.Compress
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
@ -1152,15 +1254,51 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo = srcInfo
|
||||
}
|
||||
|
||||
// Perform image encryption for valid mediatypes if ociEncryptConfig provided
|
||||
var (
|
||||
encrypted bool
|
||||
finalizer ocicrypt.EncryptLayerFinalizer
|
||||
)
|
||||
if toEncrypt {
|
||||
if decrypted {
|
||||
return types.BlobInfo{}, errors.New("Unable to support both decryption and encryption in the same copy")
|
||||
}
|
||||
|
||||
if !isOciEncrypted(srcInfo.MediaType) && c.ociEncryptConfig != nil {
|
||||
var annotations map[string]string
|
||||
if !decrypted {
|
||||
annotations = srcInfo.Annotations
|
||||
}
|
||||
desc := imgspecv1.Descriptor{
|
||||
MediaType: srcInfo.MediaType,
|
||||
Digest: srcInfo.Digest,
|
||||
Size: srcInfo.Size,
|
||||
Annotations: annotations,
|
||||
}
|
||||
|
||||
s, fin, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, destStream, desc)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error encrypting blob %s", srcInfo.Digest)
|
||||
}
|
||||
|
||||
destStream = s
|
||||
finalizer = fin
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
encrypted = true
|
||||
}
|
||||
}
|
||||
|
||||
// === Report progress using the c.progress channel, if required.
|
||||
if c.progress != nil && c.progressInterval > 0 {
|
||||
destStream = &progressReader{
|
||||
source: destStream,
|
||||
channel: c.progress,
|
||||
interval: c.progressInterval,
|
||||
artifact: srcInfo,
|
||||
lastTime: time.Now(),
|
||||
}
|
||||
progressReader := newProgressReader(
|
||||
destStream,
|
||||
c.progress,
|
||||
c.progressInterval,
|
||||
srcInfo,
|
||||
)
|
||||
defer progressReader.reportDone()
|
||||
destStream = progressReader
|
||||
}
|
||||
|
||||
// === Finally, send the layer stream to dest.
|
||||
@ -1176,6 +1314,21 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
if canModifyBlob && !isConfig {
|
||||
uploadedInfo.CompressionAlgorithm = &desiredCompressionFormat
|
||||
}
|
||||
if decrypted {
|
||||
uploadedInfo.CryptoOperation = types.Decrypt
|
||||
} else if encrypted {
|
||||
encryptAnnotations, err := finalizer()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Unable to finalize encryption")
|
||||
}
|
||||
uploadedInfo.CryptoOperation = types.Encrypt
|
||||
if uploadedInfo.Annotations == nil {
|
||||
uploadedInfo.Annotations = map[string]string{}
|
||||
}
|
||||
for k, v := range encryptAnnotations {
|
||||
uploadedInfo.Annotations[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
|
||||
// all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
|
||||
|
24
vendor/github.com/containers/image/v5/copy/encrypt.go
generated
vendored
Normal file
24
vendor/github.com/containers/image/v5/copy/encrypt.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
|
||||
// This function will be moved to be part of OCI spec when adopted.
|
||||
func isOciEncrypted(mediatype string) bool {
|
||||
return strings.HasSuffix(mediatype, "+encrypted")
|
||||
}
|
||||
|
||||
// isEncrypted checks if an image is encrypted
|
||||
func isEncrypted(i types.Image) bool {
|
||||
layers := i.LayerInfos()
|
||||
for _, l := range layers {
|
||||
if isOciEncrypted(l.MediaType) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
8
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
8
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
@ -42,7 +42,7 @@ func (os *orderedSet) append(s string) {
|
||||
// Note that the conversion will only happen later, through ic.src.UpdatedImage
|
||||
// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
|
||||
// and a list of other possible alternatives, in order.
|
||||
func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) {
|
||||
func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupportedManifestMIMETypes []string, forceManifestMIMEType string, requiresOciEncryption bool) (string, []string, error) {
|
||||
_, srcType, err := ic.src.Manifest(ctx)
|
||||
if err != nil { // This should have been cached?!
|
||||
return "", nil, errors.Wrap(err, "Error reading manifest")
|
||||
@ -57,12 +57,14 @@ func (ic *imageCopier) determineManifestConversion(ctx context.Context, destSupp
|
||||
destSupportedManifestMIMETypes = []string{forceManifestMIMEType}
|
||||
}
|
||||
|
||||
if len(destSupportedManifestMIMETypes) == 0 {
|
||||
if len(destSupportedManifestMIMETypes) == 0 && (!requiresOciEncryption || manifest.MIMETypeSupportsEncryption(srcType)) {
|
||||
return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
|
||||
}
|
||||
supportedByDest := map[string]struct{}{}
|
||||
for _, t := range destSupportedManifestMIMETypes {
|
||||
supportedByDest[t] = struct{}{}
|
||||
if !requiresOciEncryption || manifest.MIMETypeSupportsEncryption(t) {
|
||||
supportedByDest[t] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
|
||||
|
69
vendor/github.com/containers/image/v5/copy/progress_reader.go
generated
vendored
69
vendor/github.com/containers/image/v5/copy/progress_reader.go
generated
vendored
@ -9,20 +9,71 @@ import (
|
||||
|
||||
// progressReader is a reader that reports its progress on an interval.
|
||||
type progressReader struct {
|
||||
source io.Reader
|
||||
channel chan types.ProgressProperties
|
||||
interval time.Duration
|
||||
artifact types.BlobInfo
|
||||
lastTime time.Time
|
||||
offset uint64
|
||||
source io.Reader
|
||||
channel chan<- types.ProgressProperties
|
||||
interval time.Duration
|
||||
artifact types.BlobInfo
|
||||
lastUpdate time.Time
|
||||
offset uint64
|
||||
offsetUpdate uint64
|
||||
}
|
||||
|
||||
// newProgressReader creates a new progress reader for:
|
||||
// `source`: The source when internally reading bytes
|
||||
// `channel`: The reporter channel to which the progress will be sent
|
||||
// `interval`: The update interval to indicate how often the progress should update
|
||||
// `artifact`: The blob metadata which is currently being progressed
|
||||
func newProgressReader(
|
||||
source io.Reader,
|
||||
channel chan<- types.ProgressProperties,
|
||||
interval time.Duration,
|
||||
artifact types.BlobInfo,
|
||||
) *progressReader {
|
||||
// The progress reader constructor informs the progress channel
|
||||
// that a new artifact will be read
|
||||
channel <- types.ProgressProperties{
|
||||
Event: types.ProgressEventNewArtifact,
|
||||
Artifact: artifact,
|
||||
}
|
||||
return &progressReader{
|
||||
source: source,
|
||||
channel: channel,
|
||||
interval: interval,
|
||||
artifact: artifact,
|
||||
lastUpdate: time.Now(),
|
||||
offset: 0,
|
||||
offsetUpdate: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// reportDone indicates to the internal channel that the progress has been
|
||||
// finished
|
||||
func (r *progressReader) reportDone() {
|
||||
r.channel <- types.ProgressProperties{
|
||||
Event: types.ProgressEventDone,
|
||||
Artifact: r.artifact,
|
||||
Offset: r.offset,
|
||||
OffsetUpdate: r.offsetUpdate,
|
||||
}
|
||||
}
|
||||
|
||||
// Read continuously reads bytes into the progress reader and reports the
|
||||
// status via the internal channel
|
||||
func (r *progressReader) Read(p []byte) (int, error) {
|
||||
n, err := r.source.Read(p)
|
||||
r.offset += uint64(n)
|
||||
if time.Since(r.lastTime) > r.interval {
|
||||
r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset}
|
||||
r.lastTime = time.Now()
|
||||
r.offsetUpdate += uint64(n)
|
||||
|
||||
// Fire the progress reader in the provided interval
|
||||
if time.Since(r.lastUpdate) > r.interval {
|
||||
r.channel <- types.ProgressProperties{
|
||||
Event: types.ProgressEventRead,
|
||||
Artifact: r.artifact,
|
||||
Offset: r.offset,
|
||||
OffsetUpdate: r.offsetUpdate,
|
||||
}
|
||||
r.lastUpdate = time.Now()
|
||||
r.offsetUpdate = 0
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@ -112,7 +112,7 @@ func (d *dirImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *dirImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
@ -36,7 +36,7 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (types.
|
||||
return nil, errors.New("docker-archive doesn't support modifying existing images")
|
||||
}
|
||||
|
||||
tarDest := tarfile.NewDestination(fh, ref.destinationRef)
|
||||
tarDest := tarfile.NewDestinationWithContext(sys, fh, ref.destinationRef)
|
||||
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
|
||||
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
|
||||
}
|
||||
|
5
vendor/github.com/containers/image/v5/docker/archive/src.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/archive/src.go
generated
vendored
@ -2,6 +2,7 @@ package archive
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containers/image/v5/docker/tarfile"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -14,11 +15,11 @@ type archiveImageSource struct {
|
||||
|
||||
// newImageSource returns a types.ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx context.Context, ref archiveReference) (types.ImageSource, error) {
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref archiveReference) (types.ImageSource, error) {
|
||||
if ref.destinationRef != nil {
|
||||
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
|
||||
}
|
||||
src, err := tarfile.NewSourceFromFile(ref.path)
|
||||
src, err := tarfile.NewSourceFromFileWithContext(sys, ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
@ -134,7 +134,7 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string {
|
||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
|
||||
func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
|
||||
src, err := newImageSource(ctx, ref)
|
||||
src, err := newImageSource(ctx, sys, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -144,7 +144,7 @@ func (ref archiveReference) NewImage(ctx context.Context, sys *types.SystemConte
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref archiveReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, ref)
|
||||
return newImageSource(ctx, sys, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
|
4
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
@ -54,7 +54,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
|
||||
return &daemonImageDestination{
|
||||
ref: ref,
|
||||
mustMatchRuntimeOS: mustMatchRuntimeOS,
|
||||
Destination: tarfile.NewDestination(writer, namedTaggedRef),
|
||||
Destination: tarfile.NewDestinationWithContext(sys, writer, namedTaggedRef),
|
||||
goroutineCancel: goroutineCancel,
|
||||
statusChannel: statusChannel,
|
||||
writer: writer,
|
||||
@ -90,7 +90,7 @@ func (d *daemonImageDestination) DesiredLayerCompression() types.LayerCompressio
|
||||
return types.PreserveOriginal
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *daemonImageDestination) MustMatchRuntimeOS() bool {
|
||||
return d.mustMatchRuntimeOS
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
generated
vendored
@ -40,7 +40,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
|
||||
}
|
||||
defer inputStream.Close()
|
||||
|
||||
src, err := tarfile.NewSourceFromStream(inputStream)
|
||||
src, err := tarfile.NewSourceFromStreamWithSystemContext(sys, inputStream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -94,7 +94,7 @@ func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
12
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
12
vendor/github.com/containers/image/v5/docker/tarfile/dest.go
generated
vendored
@ -29,10 +29,17 @@ type Destination struct {
|
||||
// Other state.
|
||||
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
|
||||
config []byte
|
||||
sysCtx *types.SystemContext
|
||||
}
|
||||
|
||||
// NewDestination returns a tarfile.Destination for the specified io.Writer.
|
||||
// Deprecated: please use NewDestinationWithContext instead
|
||||
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
|
||||
return NewDestinationWithContext(nil, dest, ref)
|
||||
}
|
||||
|
||||
// NewDestinationWithContext returns a tarfile.Destination for the specified io.Writer.
|
||||
func NewDestinationWithContext(sys *types.SystemContext, dest io.Writer, ref reference.NamedTagged) *Destination {
|
||||
repoTags := []reference.NamedTagged{}
|
||||
if ref != nil {
|
||||
repoTags = append(repoTags, ref)
|
||||
@ -42,6 +49,7 @@ func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
|
||||
tar: tar.NewWriter(dest),
|
||||
repoTags: repoTags,
|
||||
blobs: make(map[digest.Digest]types.BlobInfo),
|
||||
sysCtx: sys,
|
||||
}
|
||||
}
|
||||
|
||||
@ -70,7 +78,7 @@ func (d *Destination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *Destination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
@ -99,7 +107,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
|
||||
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob")
|
||||
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
|
20
vendor/github.com/containers/image/v5/docker/tarfile/src.go
generated
vendored
20
vendor/github.com/containers/image/v5/docker/tarfile/src.go
generated
vendored
@ -46,7 +46,14 @@ type layerInfo struct {
|
||||
// To do for both the NewSourceFromFile and NewSourceFromStream functions
|
||||
|
||||
// NewSourceFromFile returns a tarfile.Source for the specified path.
|
||||
// Deprecated: Please use NewSourceFromFileWithContext which will allows you to configure temp directory
|
||||
// for big files through SystemContext.BigFilesTemporaryDir
|
||||
func NewSourceFromFile(path string) (*Source, error) {
|
||||
return NewSourceFromFileWithContext(nil, path)
|
||||
}
|
||||
|
||||
// NewSourceFromFileWithContext returns a tarfile.Source for the specified path.
|
||||
func NewSourceFromFileWithContext(sys *types.SystemContext, path string) (*Source, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error opening file %q", path)
|
||||
@ -65,16 +72,25 @@ func NewSourceFromFile(path string) (*Source, error) {
|
||||
tarPath: path,
|
||||
}, nil
|
||||
}
|
||||
return NewSourceFromStream(stream)
|
||||
return NewSourceFromStreamWithSystemContext(sys, stream)
|
||||
}
|
||||
|
||||
// NewSourceFromStream returns a tarfile.Source for the specified inputStream,
|
||||
// which can be either compressed or uncompressed. The caller can close the
|
||||
// inputStream immediately after NewSourceFromFile returns.
|
||||
// Deprecated: Please use NewSourceFromStreamWithSystemContext which will allows you to configure
|
||||
// temp directory for big files through SystemContext.BigFilesTemporaryDir
|
||||
func NewSourceFromStream(inputStream io.Reader) (*Source, error) {
|
||||
return NewSourceFromStreamWithSystemContext(nil, inputStream)
|
||||
}
|
||||
|
||||
// NewSourceFromStreamWithSystemContext returns a tarfile.Source for the specified inputStream,
|
||||
// which can be either compressed or uncompressed. The caller can close the
|
||||
// inputStream immediately after NewSourceFromFile returns.
|
||||
func NewSourceFromStreamWithSystemContext(sys *types.SystemContext, inputStream io.Reader) (*Source, error) {
|
||||
// FIXME: use SystemContext here.
|
||||
// Save inputStream to a temporary file
|
||||
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tar")
|
||||
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating temporary file")
|
||||
}
|
||||
|
5
vendor/github.com/containers/image/v5/image/docker_schema1.go
generated
vendored
5
vendor/github.com/containers/image/v5/image/docker_schema1.go
generated
vendored
@ -200,3 +200,8 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||
|
||||
return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil
|
||||
}
|
||||
|
||||
// SupportsEncryption returns if encryption is supported for the manifest type
|
||||
func (m *manifestSchema1) SupportsEncryption(context.Context) bool {
|
||||
return false
|
||||
}
|
||||
|
5
vendor/github.com/containers/image/v5/image/docker_schema2.go
generated
vendored
5
vendor/github.com/containers/image/v5/image/docker_schema2.go
generated
vendored
@ -355,3 +355,8 @@ func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwawa
|
||||
}
|
||||
return json.Marshal(rawContents)
|
||||
}
|
||||
|
||||
// SupportsEncryption returns if encryption is supported for the manifest type
|
||||
func (m *manifestSchema2) SupportsEncryption(context.Context) bool {
|
||||
return false
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/image/manifest.go
generated
vendored
2
vendor/github.com/containers/image/v5/image/manifest.go
generated
vendored
@ -44,6 +44,8 @@ type genericManifest interface {
|
||||
// UpdatedImage returns a types.Image modified according to options.
|
||||
// This does not change the state of the original Image object.
|
||||
UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error)
|
||||
// SupportsEncryption returns if encryption is supported for the manifest type
|
||||
SupportsEncryption(ctx context.Context) bool
|
||||
}
|
||||
|
||||
// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src.
|
||||
|
5
vendor/github.com/containers/image/v5/image/oci.go
generated
vendored
5
vendor/github.com/containers/image/v5/image/oci.go
generated
vendored
@ -212,3 +212,8 @@ func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
|
||||
m1 := manifestSchema2FromComponents(config, m.src, nil, layers)
|
||||
return memoryImageFromManifest(m1), nil
|
||||
}
|
||||
|
||||
// SupportsEncryption returns if encryption is supported for the manifest type
|
||||
func (m *manifestOCI1) SupportsEncryption(context.Context) bool {
|
||||
return true
|
||||
}
|
||||
|
7
vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
generated
vendored
7
vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go
generated
vendored
@ -3,6 +3,8 @@ package tmpdir
|
||||
import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// unixTempDirForBigFiles is the directory path to store big files on non Windows systems.
|
||||
@ -18,7 +20,10 @@ const builtinUnixTempDirForBigFiles = "/var/tmp"
|
||||
// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files.
|
||||
// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp
|
||||
// which on systemd based systems could be the unsuitable tmpfs filesystem.
|
||||
func TemporaryDirectoryForBigFiles() string {
|
||||
func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string {
|
||||
if sys != nil && sys.BigFilesTemporaryDir != "" {
|
||||
return sys.BigFilesTemporaryDir
|
||||
}
|
||||
var temporaryDirectoryForBigFiles string
|
||||
if runtime.GOOS == "windows" {
|
||||
temporaryDirectoryForBigFiles = os.TempDir()
|
||||
|
5
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
5
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
@ -206,6 +206,11 @@ func MIMETypeIsMultiImage(mimeType string) bool {
|
||||
return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex
|
||||
}
|
||||
|
||||
// MIMETypeSupportsEncryption returns true if the mimeType supports encryption
|
||||
func MIMETypeSupportsEncryption(mimeType string) bool {
|
||||
return mimeType == imgspecv1.MediaTypeImageManifest
|
||||
}
|
||||
|
||||
// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server,
|
||||
// centralizing various workarounds.
|
||||
func NormalizedMIMEType(input string) string {
|
||||
|
63
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
63
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
@ -3,9 +3,11 @@ package manifest
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
ociencspec "github.com/containers/ocicrypt/spec"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -33,7 +35,7 @@ type OCI1 struct {
|
||||
// SupportedOCI1MediaType checks if the specified string is a supported OCI1 media type.
|
||||
func SupportedOCI1MediaType(m string) error {
|
||||
switch m {
|
||||
case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader:
|
||||
case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, imgspecv1.MediaTypeImageLayerZstd, imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeLayoutHeader, ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unsupported OCIv1 media type: %q", m)
|
||||
@ -117,7 +119,7 @@ func isOCI1Layer(mimeType string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers)
|
||||
func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
if len(m.Layers) != len(layerInfos) {
|
||||
return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos))
|
||||
@ -125,11 +127,20 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
original := m.Layers
|
||||
m.Layers = make([]imgspecv1.Descriptor, len(layerInfos))
|
||||
for i, info := range layerInfos {
|
||||
mimeType := original[i].MediaType
|
||||
// First make sure we support the media type of the original layer.
|
||||
if err := SupportedOCI1MediaType(original[i].MediaType); err != nil {
|
||||
return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer: %q", original[i].MediaType)
|
||||
}
|
||||
|
||||
if info.CryptoOperation == types.Decrypt {
|
||||
decMimeType, err := getDecryptedMediaType(mimeType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error preparing updated manifest: decryption specified but original mediatype is not encrypted: %q", mimeType)
|
||||
}
|
||||
mimeType = decMimeType
|
||||
}
|
||||
|
||||
// Set the correct media types based on the specified compression
|
||||
// operation, the desired compression algorithm AND the original media
|
||||
// type.
|
||||
@ -142,31 +153,29 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
switch info.CompressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
// Keep the original media type.
|
||||
m.Layers[i].MediaType = original[i].MediaType
|
||||
m.Layers[i].MediaType = mimeType
|
||||
|
||||
case types.Decompress:
|
||||
// Decompress the original media type and check if it was
|
||||
// non-distributable one or not.
|
||||
mimeType := original[i].MediaType
|
||||
switch {
|
||||
case isOCI1NonDistributableLayer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
|
||||
case isOCI1Layer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", original[i].MediaType)
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for decompression: %q", mimeType)
|
||||
}
|
||||
|
||||
case types.Compress:
|
||||
if info.CompressionAlgorithm == nil {
|
||||
logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", info.Digest)
|
||||
m.Layers[i].MediaType = original[i].MediaType
|
||||
m.Layers[i].MediaType = mimeType
|
||||
break
|
||||
}
|
||||
// Compress the original media type and set the new one based on
|
||||
// that type (distributable or not) and the specified compression
|
||||
// algorithm. Throw an error if the algorithm is not supported.
|
||||
mimeType := original[i].MediaType
|
||||
switch info.CompressionAlgorithm.Name() {
|
||||
case compression.Gzip.Name():
|
||||
switch {
|
||||
@ -175,7 +184,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
case isOCI1Layer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerGzip
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType)
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
|
||||
}
|
||||
|
||||
case compression.Zstd.Name():
|
||||
@ -185,7 +194,7 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
case isOCI1Layer(mimeType):
|
||||
m.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerZstd
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", original[i].MediaType)
|
||||
return fmt.Errorf("Error preparing updated manifest: unsupported media type for compression: %q", mimeType)
|
||||
}
|
||||
|
||||
default:
|
||||
@ -195,6 +204,15 @@ func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
default:
|
||||
return fmt.Errorf("Error preparing updated manifest: unknown compression operation (%d) for layer %q", info.CompressionOperation, info.Digest)
|
||||
}
|
||||
|
||||
if info.CryptoOperation == types.Encrypt {
|
||||
encMediaType, err := getEncryptedMediaType(m.Layers[i].MediaType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", m.Layers[i].MediaType)
|
||||
}
|
||||
m.Layers[i].MediaType = encMediaType
|
||||
}
|
||||
|
||||
m.Layers[i].Digest = info.Digest
|
||||
m.Layers[i].Size = info.Size
|
||||
m.Layers[i].Annotations = info.Annotations
|
||||
@ -241,3 +259,30 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
|
||||
}
|
||||
return m.Config.Digest.Hex(), nil
|
||||
}
|
||||
|
||||
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||
// an error if the mediatype does not support encryption
|
||||
func getEncryptedMediaType(mediatype string) (string, error) {
|
||||
for _, s := range strings.Split(mediatype, "+")[1:] {
|
||||
if s == "encrypted" {
|
||||
return "", errors.Errorf("unsupportedmediatype: %v already encrypted", mediatype)
|
||||
}
|
||||
}
|
||||
unsuffixedMediatype := strings.Split(mediatype, "+")[0]
|
||||
switch unsuffixedMediatype {
|
||||
case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerNonDistributable:
|
||||
return mediatype + "+encrypted", nil
|
||||
}
|
||||
|
||||
return "", errors.Errorf("unsupported mediatype to encrypt: %v", mediatype)
|
||||
}
|
||||
|
||||
// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return
|
||||
// an error if the mediatype does not support decryption
|
||||
func getDecryptedMediaType(mediatype string) (string, error) {
|
||||
if !strings.HasSuffix(mediatype, "+encrypted") {
|
||||
return "", errors.Errorf("unsupported mediatype to decrypt %v:", mediatype)
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(mediatype, "+encrypted"), nil
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
4
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@ -19,7 +19,7 @@ type ociArchiveImageDestination struct {
|
||||
|
||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) {
|
||||
tempDirRef, err := createOCIRef(ref.image)
|
||||
tempDirRef, err := createOCIRef(sys, ref.image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating oci reference")
|
||||
}
|
||||
@ -66,7 +66,7 @@ func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return d.unpackedDest.AcceptsForeignLayerURLs()
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise
|
||||
func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool {
|
||||
return d.unpackedDest.MustMatchRuntimeOS()
|
||||
}
|
||||
|
10
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
10
vendor/github.com/containers/image/v5/oci/archive/oci_src.go
generated
vendored
@ -20,7 +20,7 @@ type ociArchiveImageSource struct {
|
||||
// newImageSource returns an ImageSource for reading from an existing directory.
|
||||
// newImageSource untars the file and saves it in a temp directory
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) {
|
||||
tempDirRef, err := createUntarTempDir(ref)
|
||||
tempDirRef, err := createUntarTempDir(sys, ref)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error creating temp directory")
|
||||
}
|
||||
@ -38,12 +38,18 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiv
|
||||
}
|
||||
|
||||
// LoadManifestDescriptor loads the manifest
|
||||
// Deprecated: use LoadManifestDescriptorWithContext instead
|
||||
func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
|
||||
return LoadManifestDescriptorWithContext(nil, imgRef)
|
||||
}
|
||||
|
||||
// LoadManifestDescriptorWithContext loads the manifest
|
||||
func LoadManifestDescriptorWithContext(sys *types.SystemContext, imgRef types.ImageReference) (imgspecv1.Descriptor, error) {
|
||||
ociArchRef, ok := imgRef.(ociArchiveReference)
|
||||
if !ok {
|
||||
return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference")
|
||||
}
|
||||
tempDirRef, err := createUntarTempDir(ociArchRef)
|
||||
tempDirRef, err := createUntarTempDir(sys, ociArchRef)
|
||||
if err != nil {
|
||||
return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory")
|
||||
}
|
||||
|
9
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
9
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
@ -159,8 +159,9 @@ func (t *tempDirOCIRef) deleteTempDir() error {
|
||||
}
|
||||
|
||||
// createOCIRef creates the oci reference of the image
|
||||
func createOCIRef(image string) (tempDirOCIRef, error) {
|
||||
dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci")
|
||||
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
|
||||
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
|
||||
dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory")
|
||||
}
|
||||
@ -174,8 +175,8 @@ func createOCIRef(image string) (tempDirOCIRef, error) {
|
||||
}
|
||||
|
||||
// creates the temporary directory and copies the tarred content to it
|
||||
func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) {
|
||||
tempDirRef, err := createOCIRef(ref.image)
|
||||
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
|
||||
tempDirRef, err := createOCIRef(sys, ref.image)
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference")
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@ -97,7 +97,7 @@ func (d *ociImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *ociImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
2
vendor/github.com/containers/image/v5/openshift/openshift.go
generated
vendored
@ -378,7 +378,7 @@ func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *openshiftImageDestination) MustMatchRuntimeOS() bool {
|
||||
return false
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
@ -120,7 +120,7 @@ func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (d *ostreeImageDestination) MustMatchRuntimeOS() bool {
|
||||
return true
|
||||
}
|
||||
|
6
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
6
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
@ -341,8 +341,8 @@ func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *
|
||||
|
||||
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
|
||||
// it's time to Commit() the image
|
||||
func newImageDestination(imageRef storageReference) (*storageImageDestination, error) {
|
||||
directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "storage")
|
||||
func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) {
|
||||
directory, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage")
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating a temporary directory")
|
||||
}
|
||||
@ -930,7 +930,7 @@ func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
func (s *storageImageDestination) MustMatchRuntimeOS() bool {
|
||||
return true
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
2
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
@ -295,5 +295,5 @@ func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemC
|
||||
}
|
||||
|
||||
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(s)
|
||||
return newImageDestination(sys, s)
|
||||
}
|
||||
|
57
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
57
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
@ -104,6 +104,19 @@ const (
|
||||
Compress
|
||||
)
|
||||
|
||||
// LayerCrypto indicates if layers have been encrypted or decrypted or none
|
||||
type LayerCrypto int
|
||||
|
||||
const (
|
||||
// PreserveOriginalCrypto indicates the layer must be preserved, ie
|
||||
// no encryption/decryption
|
||||
PreserveOriginalCrypto LayerCrypto = iota
|
||||
// Encrypt indicates the layer is encrypted
|
||||
Encrypt
|
||||
// Decrypt indicates the layer is decrypted
|
||||
Decrypt
|
||||
)
|
||||
|
||||
// BlobInfo collects known information about a blob (layer/config).
|
||||
// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that.
|
||||
type BlobInfo struct {
|
||||
@ -115,11 +128,18 @@ type BlobInfo struct {
|
||||
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
|
||||
// whether the original layer should be preserved or (de)compressed. The
|
||||
// field defaults to preserve the original layer.
|
||||
// TODO: To remove together with CryptoOperation in re-design to remove
|
||||
// field out out of BlobInfo.
|
||||
CompressionOperation LayerCompression
|
||||
// CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct
|
||||
// MIME type for compressed layers (e.g., gzip or zstd). This field MUST be
|
||||
// set when `CompressionOperation == Compress`.
|
||||
CompressionAlgorithm *compression.Algorithm
|
||||
// CryptoOperation is used in Image.UpdateLayerInfos to instruct
|
||||
// whether the original layer was encrypted/decrypted
|
||||
// TODO: To remove together with CompressionOperation in re-design to
|
||||
// remove field out out of BlobInfo.
|
||||
CryptoOperation LayerCrypto
|
||||
}
|
||||
|
||||
// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
|
||||
@ -264,7 +284,7 @@ type ImageDestination interface {
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
// uploaded to the image destination, true otherwise.
|
||||
AcceptsForeignLayerURLs() bool
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
|
||||
// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise.
|
||||
MustMatchRuntimeOS() bool
|
||||
// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(),
|
||||
// and would prefer to receive an unmodified manifest instead of one modified for the destination.
|
||||
@ -378,6 +398,8 @@ type Image interface {
|
||||
// Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
|
||||
// This does not change the state of the original Image object.
|
||||
UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error)
|
||||
// SupportsEncryption returns an indicator that the image supports encryption
|
||||
SupportsEncryption(ctx context.Context) bool
|
||||
// Size returns an approximation of the amount of disk space which is consumed by the image in its current
|
||||
// location. If the size is not known, -1 will be returned.
|
||||
Size() (int64, error)
|
||||
@ -490,9 +512,10 @@ type SystemContext struct {
|
||||
OSChoice string
|
||||
// If not "", overrides the system's default directory containing a blob info cache.
|
||||
BlobInfoCacheDir string
|
||||
|
||||
// Additional tags when creating or copying a docker-archive.
|
||||
DockerArchiveAdditionalTags []reference.NamedTagged
|
||||
// If not "", overrides the temporary directory to use for storing big files
|
||||
BigFilesTemporaryDir string
|
||||
|
||||
// === OCI.Transport overrides ===
|
||||
// If not "", a directory containing a CA certificate (ending with ".crt"),
|
||||
@ -547,9 +570,37 @@ type SystemContext struct {
|
||||
CompressionLevel *int
|
||||
}
|
||||
|
||||
// ProgressEvent is the type of events a progress reader can produce
|
||||
// Warning: new event types may be added any time.
|
||||
type ProgressEvent uint
|
||||
|
||||
const (
|
||||
// ProgressEventNewArtifact will be fired on progress reader setup
|
||||
ProgressEventNewArtifact ProgressEvent = iota
|
||||
|
||||
// ProgressEventRead indicates that the artifact download is currently in
|
||||
// progress
|
||||
ProgressEventRead
|
||||
|
||||
// ProgressEventDone is fired when the data transfer has been finished for
|
||||
// the specific artifact
|
||||
ProgressEventDone
|
||||
)
|
||||
|
||||
// ProgressProperties is used to pass information from the copy code to a monitor which
|
||||
// can use the real-time information to produce output or react to changes.
|
||||
type ProgressProperties struct {
|
||||
// The event indicating what
|
||||
Event ProgressEvent
|
||||
|
||||
// The artifact which has been updated in this interval
|
||||
Artifact BlobInfo
|
||||
Offset uint64
|
||||
|
||||
// The currently downloaded size in bytes
|
||||
// Increases from 0 to the final Artifact size
|
||||
Offset uint64
|
||||
|
||||
// The additional offset which has been downloaded inside the last update
|
||||
// interval. Will be reset after each ProgressEventRead event.
|
||||
OffsetUpdate uint64
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -8,10 +8,10 @@ const (
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 0
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
VersionPatch = 1
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
5
vendor/github.com/containers/ocicrypt/MAINTAINERS
generated
vendored
Normal file
5
vendor/github.com/containers/ocicrypt/MAINTAINERS
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# ocicrypt maintainers
|
||||
#
|
||||
# Github ID, Name, Email Address
|
||||
lumjjb, Brandon Lum, lumjjb@gmail.com
|
||||
stefanberger, Stefan Berger, stefanb@linux.ibm.com
|
31
vendor/github.com/containers/ocicrypt/Makefile
generated
vendored
Normal file
31
vendor/github.com/containers/ocicrypt/Makefile
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright The containerd Authors.
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
.PHONY: check build decoder
|
||||
|
||||
all: build
|
||||
|
||||
FORCE:
|
||||
|
||||
check:
|
||||
golangci-lint run
|
||||
|
||||
build: vendor
|
||||
go build ./...
|
||||
|
||||
vendor:
|
||||
go mod tidy
|
||||
|
||||
test:
|
||||
go test ./...
|
32
vendor/github.com/containers/ocicrypt/README.md
generated
vendored
Normal file
32
vendor/github.com/containers/ocicrypt/README.md
generated
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
# OCIcrypt Library
|
||||
|
||||
The `ocicrypt` library is the OCI image spec implementation of container image encryption. More details of the spec can be seen in the [OCI repository](https://github.com/opencontainers/image-spec/pull/775). The purpose of this library is to encode spec structures and consts in code, as well as provide a consistent implementation of image encryption across container runtimes and build tools.
|
||||
|
||||
## Usage
|
||||
|
||||
There are various levels of usage for this library. The main consumers of these would be runtime/buil tools, and a more specific use would be in the ability to extend cryptographic function.
|
||||
|
||||
### Runtime/Build tool usage
|
||||
|
||||
The general exposed interface a runtime/build tool would use, would be to perform encryption or decryption of layers:
|
||||
|
||||
```
|
||||
package "github.com/containers/ocicrypt"
|
||||
func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error)
|
||||
func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error)
|
||||
```
|
||||
|
||||
The settings/parameters to these functions can be specified via creation of an encryption config with the `github.com/containers/ocicrypt/config` package. We note that because setting of annotations and other fields of the layer descriptor is done through various means in different runtimes/build tools, it is the resposibility of the caller to still ensure that the layer descriptor follows the OCI specification (i.e. encoding, setting annotations, etc.).
|
||||
|
||||
|
||||
### Crypto Agility and Extensibility
|
||||
|
||||
The implementation for both symmetric and assymetric encryption used in this library are behind 2 main interfaces, which users can extend if need be. These are in the following packages:
|
||||
- github.com/containers/ocicrypt/blockcipher - LayerBlockCipher interface for block ciphers
|
||||
- github.com/containers/ocicrypt/keywrap - KeyWrapper interface for key wrapping
|
||||
|
||||
We note that adding interfaces here is risky outside the OCI spec is not recommended, unless for very specialized and confined usecases. Please open an issue or PR if there is a general usecase that could be added to the OCI spec.
|
||||
|
||||
## Security Issues
|
||||
|
||||
We consider security issues related to this library critical. Please report and security related issues by emailing maintainers in the [MAINTAINERS](MAINTAINERS) file.
|
160
vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
generated
vendored
Normal file
160
vendor/github.com/containers/ocicrypt/blockcipher/blockcipher.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package blockcipher
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LayerCipherType is the ciphertype as specified in the layer metadata
|
||||
type LayerCipherType string
|
||||
|
||||
// TODO: Should be obtained from OCI spec once included
|
||||
const (
|
||||
AES256CTR LayerCipherType = "AES_256_CTR_HMAC_SHA256"
|
||||
)
|
||||
|
||||
// PrivateLayerBlockCipherOptions includes the information required to encrypt/decrypt
|
||||
// an image which are sensitive and should not be in plaintext
|
||||
type PrivateLayerBlockCipherOptions struct {
|
||||
// SymmetricKey represents the symmetric key used for encryption/decryption
|
||||
// This field should be populated by Encrypt/Decrypt calls
|
||||
SymmetricKey []byte `json:"symkey"`
|
||||
|
||||
// Digest is the digest of the original data for verification.
|
||||
// This is NOT populated by Encrypt/Decrypt calls
|
||||
Digest digest.Digest `json:"digest"`
|
||||
|
||||
// CipherOptions contains the cipher metadata used for encryption/decryption
|
||||
// This field should be populated by Encrypt/Decrypt calls
|
||||
CipherOptions map[string][]byte `json:"cipheroptions"`
|
||||
}
|
||||
|
||||
// PublicLayerBlockCipherOptions includes the information required to encrypt/decrypt
|
||||
// an image which are public and can be deduplicated in plaintext across multiple
|
||||
// recipients
|
||||
type PublicLayerBlockCipherOptions struct {
|
||||
// CipherType denotes the cipher type according to the list of OCI suppported
|
||||
// cipher types.
|
||||
CipherType LayerCipherType `json:"cipher"`
|
||||
|
||||
// Hmac contains the hmac string to help verify encryption
|
||||
Hmac []byte `json:"hmac"`
|
||||
|
||||
// CipherOptions contains the cipher metadata used for encryption/decryption
|
||||
// This field should be populated by Encrypt/Decrypt calls
|
||||
CipherOptions map[string][]byte `json:"cipheroptions"`
|
||||
}
|
||||
|
||||
// LayerBlockCipherOptions contains the public and private LayerBlockCipherOptions
|
||||
// required to encrypt/decrypt an image
|
||||
type LayerBlockCipherOptions struct {
|
||||
Public PublicLayerBlockCipherOptions
|
||||
Private PrivateLayerBlockCipherOptions
|
||||
}
|
||||
|
||||
// LayerBlockCipher returns a provider for encrypt/decrypt functionality
|
||||
// for handling the layer data for a specific algorithm
|
||||
type LayerBlockCipher interface {
|
||||
// GenerateKey creates a symmetric key
|
||||
GenerateKey() ([]byte, error)
|
||||
// Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions
|
||||
Encrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error)
|
||||
// Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions
|
||||
Decrypt(layerDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error)
|
||||
}
|
||||
|
||||
// LayerBlockCipherHandler is the handler for encrypt/decrypt for layers
|
||||
type LayerBlockCipherHandler struct {
|
||||
cipherMap map[LayerCipherType]LayerBlockCipher
|
||||
}
|
||||
|
||||
// Finalizer is called after data blobs are written, and returns the LayerBlockCipherOptions for the encrypted blob
|
||||
type Finalizer func() (LayerBlockCipherOptions, error)
|
||||
|
||||
// GetOpt returns the value of the cipher option and if the option exists
|
||||
func (lbco LayerBlockCipherOptions) GetOpt(key string) (value []byte, ok bool) {
|
||||
if v, ok := lbco.Public.CipherOptions[key]; ok {
|
||||
return v, ok
|
||||
} else if v, ok := lbco.Private.CipherOptions[key]; ok {
|
||||
return v, ok
|
||||
} else {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func wrapFinalizerWithType(fin Finalizer, typ LayerCipherType) Finalizer {
|
||||
return func() (LayerBlockCipherOptions, error) {
|
||||
lbco, err := fin()
|
||||
if err != nil {
|
||||
return LayerBlockCipherOptions{}, err
|
||||
}
|
||||
lbco.Public.CipherType = typ
|
||||
return lbco, err
|
||||
}
|
||||
}
|
||||
|
||||
// Encrypt is the handler for the layer decryption routine
|
||||
func (h *LayerBlockCipherHandler) Encrypt(plainDataReader io.Reader, typ LayerCipherType) (io.Reader, Finalizer, error) {
|
||||
if c, ok := h.cipherMap[typ]; ok {
|
||||
sk, err := c.GenerateKey()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
opt := LayerBlockCipherOptions{
|
||||
Private: PrivateLayerBlockCipherOptions{
|
||||
SymmetricKey: sk,
|
||||
},
|
||||
}
|
||||
encDataReader, fin, err := c.Encrypt(plainDataReader, opt)
|
||||
if err == nil {
|
||||
fin = wrapFinalizerWithType(fin, typ)
|
||||
}
|
||||
return encDataReader, fin, err
|
||||
}
|
||||
return nil, nil, errors.Errorf("unsupported cipher type: %s", typ)
|
||||
}
|
||||
|
||||
// Decrypt is the handler for the layer decryption routine
|
||||
func (h *LayerBlockCipherHandler) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) {
|
||||
typ := opt.Public.CipherType
|
||||
if typ == "" {
|
||||
return nil, LayerBlockCipherOptions{}, errors.New("no cipher type provided")
|
||||
}
|
||||
if c, ok := h.cipherMap[LayerCipherType(typ)]; ok {
|
||||
return c.Decrypt(encDataReader, opt)
|
||||
}
|
||||
return nil, LayerBlockCipherOptions{}, errors.Errorf("unsupported cipher type: %s", typ)
|
||||
}
|
||||
|
||||
// NewLayerBlockCipherHandler returns a new default handler
|
||||
func NewLayerBlockCipherHandler() (*LayerBlockCipherHandler, error) {
|
||||
h := LayerBlockCipherHandler{
|
||||
cipherMap: map[LayerCipherType]LayerBlockCipher{},
|
||||
}
|
||||
|
||||
var err error
|
||||
h.cipherMap[AES256CTR], err = NewAESCTRLayerBlockCipher(256)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to set up Cipher AES-256-CTR")
|
||||
}
|
||||
|
||||
return &h, nil
|
||||
}
|
193
vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go
generated
vendored
Normal file
193
vendor/github.com/containers/ocicrypt/blockcipher/blockcipher_aes_ctr.go
generated
vendored
Normal file
@ -0,0 +1,193 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package blockcipher
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
|
||||
"github.com/containers/ocicrypt/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// AESCTRLayerBlockCipher implements the AES CTR stream cipher
|
||||
type AESCTRLayerBlockCipher struct {
|
||||
keylen int // in bytes
|
||||
reader io.Reader
|
||||
encrypt bool
|
||||
stream cipher.Stream
|
||||
err error
|
||||
hmac hash.Hash
|
||||
expHmac []byte
|
||||
doneEncrypting bool
|
||||
}
|
||||
|
||||
type aesctrcryptor struct {
|
||||
bc *AESCTRLayerBlockCipher
|
||||
}
|
||||
|
||||
// NewAESCTRLayerBlockCipher returns a new AES SIV block cipher of 256 or 512 bits
|
||||
func NewAESCTRLayerBlockCipher(bits int) (LayerBlockCipher, error) {
|
||||
if bits != 256 {
|
||||
return nil, errors.New("AES CTR bit count not supported")
|
||||
}
|
||||
return &AESCTRLayerBlockCipher{keylen: bits / 8}, nil
|
||||
}
|
||||
|
||||
func (r *aesctrcryptor) Read(p []byte) (int, error) {
|
||||
var (
|
||||
o int
|
||||
)
|
||||
|
||||
if r.bc.err != nil {
|
||||
return 0, r.bc.err
|
||||
}
|
||||
|
||||
o, err := utils.FillBuffer(r.bc.reader, p)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
r.bc.err = err
|
||||
} else {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if !r.bc.encrypt {
|
||||
if _, err := r.bc.hmac.Write(p[:o]); err != nil {
|
||||
r.bc.err = errors.Wrapf(err, "could not write to hmac")
|
||||
return 0, r.bc.err
|
||||
}
|
||||
|
||||
if r.bc.err == io.EOF {
|
||||
// Before we return EOF we let the HMAC comparison
|
||||
// provide a verdict
|
||||
if !hmac.Equal(r.bc.hmac.Sum(nil), r.bc.expHmac) {
|
||||
r.bc.err = fmt.Errorf("could not properly decrypt byte stream; exp hmac: '%x', actual hmac: '%s'", r.bc.expHmac, r.bc.hmac.Sum(nil))
|
||||
return 0, r.bc.err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.bc.stream.XORKeyStream(p[:o], p[:o])
|
||||
|
||||
if r.bc.encrypt {
|
||||
if _, err := r.bc.hmac.Write(p[:o]); err != nil {
|
||||
r.bc.err = errors.Wrapf(err, "could not write to hmac")
|
||||
return 0, r.bc.err
|
||||
}
|
||||
|
||||
if r.bc.err == io.EOF {
|
||||
// Final data encrypted; Do the 'then-MAC' part
|
||||
r.bc.doneEncrypting = true
|
||||
}
|
||||
}
|
||||
|
||||
return o, r.bc.err
|
||||
}
|
||||
|
||||
// init initializes an instance
|
||||
func (bc *AESCTRLayerBlockCipher) init(encrypt bool, reader io.Reader, opts LayerBlockCipherOptions) (LayerBlockCipherOptions, error) {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
|
||||
key := opts.Private.SymmetricKey
|
||||
if len(key) != bc.keylen {
|
||||
return LayerBlockCipherOptions{}, fmt.Errorf("invalid key length of %d bytes; need %d bytes", len(key), bc.keylen)
|
||||
}
|
||||
|
||||
nonce, ok := opts.GetOpt("nonce")
|
||||
if !ok {
|
||||
nonce = make([]byte, aes.BlockSize)
|
||||
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
|
||||
return LayerBlockCipherOptions{}, errors.Wrap(err, "unable to generate random nonce")
|
||||
}
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return LayerBlockCipherOptions{}, errors.Wrap(err, "aes.NewCipher failed")
|
||||
}
|
||||
|
||||
bc.reader = reader
|
||||
bc.encrypt = encrypt
|
||||
bc.stream = cipher.NewCTR(block, nonce)
|
||||
bc.err = nil
|
||||
bc.hmac = hmac.New(sha256.New, key)
|
||||
bc.expHmac = opts.Public.Hmac
|
||||
bc.doneEncrypting = false
|
||||
|
||||
if !encrypt && len(bc.expHmac) == 0 {
|
||||
return LayerBlockCipherOptions{}, errors.New("HMAC is not provided for decryption process")
|
||||
}
|
||||
|
||||
lbco := LayerBlockCipherOptions{
|
||||
Private: PrivateLayerBlockCipherOptions{
|
||||
SymmetricKey: key,
|
||||
CipherOptions: map[string][]byte{
|
||||
"nonce": nonce,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return lbco, nil
|
||||
}
|
||||
|
||||
// GenerateKey creates a synmmetric key
|
||||
func (bc *AESCTRLayerBlockCipher) GenerateKey() ([]byte, error) {
|
||||
key := make([]byte, bc.keylen)
|
||||
if _, err := io.ReadFull(rand.Reader, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// Encrypt takes in layer data and returns the ciphertext and relevant LayerBlockCipherOptions
|
||||
func (bc *AESCTRLayerBlockCipher) Encrypt(plainDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, Finalizer, error) {
|
||||
lbco, err := bc.init(true, plainDataReader, opt)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
finalizer := func() (LayerBlockCipherOptions, error) {
|
||||
if !bc.doneEncrypting {
|
||||
return LayerBlockCipherOptions{}, errors.New("Read()ing not complete, unable to finalize")
|
||||
}
|
||||
if lbco.Public.CipherOptions == nil {
|
||||
lbco.Public.CipherOptions = map[string][]byte{}
|
||||
}
|
||||
lbco.Public.Hmac = bc.hmac.Sum(nil)
|
||||
return lbco, nil
|
||||
}
|
||||
return &aesctrcryptor{bc}, finalizer, nil
|
||||
}
|
||||
|
||||
// Decrypt takes in layer ciphertext data and returns the plaintext and relevant LayerBlockCipherOptions
|
||||
func (bc *AESCTRLayerBlockCipher) Decrypt(encDataReader io.Reader, opt LayerBlockCipherOptions) (io.Reader, LayerBlockCipherOptions, error) {
|
||||
lbco, err := bc.init(false, encDataReader, opt)
|
||||
if err != nil {
|
||||
return nil, LayerBlockCipherOptions{}, err
|
||||
}
|
||||
|
||||
return utils.NewDelayedReader(&aesctrcryptor{bc}, 1024*10), lbco, nil
|
||||
}
|
114
vendor/github.com/containers/ocicrypt/config/config.go
generated
vendored
Normal file
114
vendor/github.com/containers/ocicrypt/config/config.go
generated
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
// EncryptConfig is the container image PGP encryption configuration holding
|
||||
// the identifiers of those that will be able to decrypt the container and
|
||||
// the PGP public keyring file data that contains their public keys.
|
||||
type EncryptConfig struct {
|
||||
// map holding 'gpg-recipients', 'gpg-pubkeyringfile', 'pubkeys', 'x509s'
|
||||
Parameters map[string][][]byte
|
||||
|
||||
DecryptConfig DecryptConfig
|
||||
}
|
||||
|
||||
// DecryptConfig wraps the Parameters map that holds the decryption key
|
||||
type DecryptConfig struct {
|
||||
// map holding 'privkeys', 'x509s', 'gpg-privatekeys'
|
||||
Parameters map[string][][]byte
|
||||
}
|
||||
|
||||
// CryptoConfig is a common wrapper for EncryptConfig and DecrypConfig that can
|
||||
// be passed through functions that share much code for encryption and decryption
|
||||
type CryptoConfig struct {
|
||||
EncryptConfig *EncryptConfig
|
||||
DecryptConfig *DecryptConfig
|
||||
}
|
||||
|
||||
// InitDecryption initialized a CryptoConfig object with parameters used for decryption
|
||||
func InitDecryption(dcparameters map[string][][]byte) CryptoConfig {
|
||||
return CryptoConfig{
|
||||
DecryptConfig: &DecryptConfig{
|
||||
Parameters: dcparameters,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// InitEncryption initializes a CryptoConfig object with parameters used for encryption
|
||||
// It also takes dcparameters that may be needed for decryption when adding a recipient
|
||||
// to an already encrypted image
|
||||
func InitEncryption(parameters, dcparameters map[string][][]byte) CryptoConfig {
|
||||
return CryptoConfig{
|
||||
EncryptConfig: &EncryptConfig{
|
||||
Parameters: parameters,
|
||||
DecryptConfig: DecryptConfig{
|
||||
Parameters: dcparameters,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CombineCryptoConfigs takes a CryptoConfig list and creates a single CryptoConfig
|
||||
// containing the crypto configuration of all the key bundles
|
||||
func CombineCryptoConfigs(ccs []CryptoConfig) CryptoConfig {
|
||||
ecparam := map[string][][]byte{}
|
||||
ecdcparam := map[string][][]byte{}
|
||||
dcparam := map[string][][]byte{}
|
||||
|
||||
for _, cc := range ccs {
|
||||
if ec := cc.EncryptConfig; ec != nil {
|
||||
addToMap(ecparam, ec.Parameters)
|
||||
addToMap(ecdcparam, ec.DecryptConfig.Parameters)
|
||||
}
|
||||
|
||||
if dc := cc.DecryptConfig; dc != nil {
|
||||
addToMap(dcparam, dc.Parameters)
|
||||
}
|
||||
}
|
||||
|
||||
return CryptoConfig{
|
||||
EncryptConfig: &EncryptConfig{
|
||||
Parameters: ecparam,
|
||||
DecryptConfig: DecryptConfig{
|
||||
Parameters: ecdcparam,
|
||||
},
|
||||
},
|
||||
DecryptConfig: &DecryptConfig{
|
||||
Parameters: dcparam,
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// AttachDecryptConfig adds DecryptConfig to the field of EncryptConfig so that
|
||||
// the decryption parameters can be used to add recipients to an existing image
|
||||
// if the user is able to decrypt it.
|
||||
func (ec *EncryptConfig) AttachDecryptConfig(dc *DecryptConfig) {
|
||||
if dc != nil {
|
||||
addToMap(ec.DecryptConfig.Parameters, dc.Parameters)
|
||||
}
|
||||
}
|
||||
|
||||
func addToMap(orig map[string][][]byte, add map[string][][]byte) {
|
||||
for k, v := range add {
|
||||
if ov, ok := orig[k]; ok {
|
||||
orig[k] = append(ov, v...)
|
||||
} else {
|
||||
orig[k] = v
|
||||
}
|
||||
}
|
||||
}
|
134
vendor/github.com/containers/ocicrypt/config/constructors.go
generated
vendored
Normal file
134
vendor/github.com/containers/ocicrypt/config/constructors.go
generated
vendored
Normal file
@ -0,0 +1,134 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// EncryptWithJwe returns a CryptoConfig to encrypt with jwe public keys
|
||||
func EncryptWithJwe(pubKeys [][]byte) (CryptoConfig, error) {
|
||||
dc := DecryptConfig{}
|
||||
ep := map[string][][]byte{
|
||||
"pubkeys": pubKeys,
|
||||
}
|
||||
|
||||
return CryptoConfig{
|
||||
EncryptConfig: &EncryptConfig{
|
||||
Parameters: ep,
|
||||
DecryptConfig: dc,
|
||||
},
|
||||
DecryptConfig: &dc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EncryptWithPkcs7 returns a CryptoConfig to encrypt with pkcs7 x509 certs
|
||||
func EncryptWithPkcs7(x509s [][]byte) (CryptoConfig, error) {
|
||||
dc := DecryptConfig{}
|
||||
|
||||
ep := map[string][][]byte{
|
||||
"x509s": x509s,
|
||||
}
|
||||
|
||||
return CryptoConfig{
|
||||
EncryptConfig: &EncryptConfig{
|
||||
Parameters: ep,
|
||||
DecryptConfig: dc,
|
||||
},
|
||||
DecryptConfig: &dc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EncryptWithGpg returns a CryptoConfig to encrypt with configured gpg parameters
|
||||
func EncryptWithGpg(gpgRecipients [][]byte, gpgPubRingFile []byte) (CryptoConfig, error) {
|
||||
dc := DecryptConfig{}
|
||||
ep := map[string][][]byte{
|
||||
"gpg-recipients": gpgRecipients,
|
||||
"gpg-pubkeyringfile": {gpgPubRingFile},
|
||||
}
|
||||
|
||||
return CryptoConfig{
|
||||
EncryptConfig: &EncryptConfig{
|
||||
Parameters: ep,
|
||||
DecryptConfig: dc,
|
||||
},
|
||||
DecryptConfig: &dc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DecryptWithPrivKeys returns a CryptoConfig to decrypt with configured private keys
|
||||
func DecryptWithPrivKeys(privKeys [][]byte, privKeysPasswords [][]byte) (CryptoConfig, error) {
|
||||
if len(privKeys) != len(privKeysPasswords) {
|
||||
return CryptoConfig{}, errors.New("Length of privKeys should match length of privKeysPasswords")
|
||||
}
|
||||
|
||||
dc := DecryptConfig{
|
||||
Parameters: map[string][][]byte{
|
||||
"privkeys": privKeys,
|
||||
"privkeys-passwords": privKeysPasswords,
|
||||
},
|
||||
}
|
||||
|
||||
ep := map[string][][]byte{}
|
||||
|
||||
return CryptoConfig{
|
||||
EncryptConfig: &EncryptConfig{
|
||||
Parameters: ep,
|
||||
DecryptConfig: dc,
|
||||
},
|
||||
DecryptConfig: &dc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DecryptWithX509s returns a CryptoConfig to decrypt with configured x509 certs
|
||||
func DecryptWithX509s(x509s [][]byte) (CryptoConfig, error) {
|
||||
dc := DecryptConfig{
|
||||
Parameters: map[string][][]byte{
|
||||
"x509s": x509s,
|
||||
},
|
||||
}
|
||||
|
||||
ep := map[string][][]byte{}
|
||||
|
||||
return CryptoConfig{
|
||||
EncryptConfig: &EncryptConfig{
|
||||
Parameters: ep,
|
||||
DecryptConfig: dc,
|
||||
},
|
||||
DecryptConfig: &dc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DecryptWithGpgPrivKeys returns a CryptoConfig to decrypt with configured gpg private keys
|
||||
func DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeysPwds [][]byte) (CryptoConfig, error) {
|
||||
dc := DecryptConfig{
|
||||
Parameters: map[string][][]byte{
|
||||
"gpg-privatekeys": gpgPrivKeys,
|
||||
"gpg-privatekeys-passwords": gpgPrivKeysPwds,
|
||||
},
|
||||
}
|
||||
|
||||
ep := map[string][][]byte{}
|
||||
|
||||
return CryptoConfig{
|
||||
EncryptConfig: &EncryptConfig{
|
||||
Parameters: ep,
|
||||
DecryptConfig: dc,
|
||||
},
|
||||
DecryptConfig: &dc,
|
||||
}, nil
|
||||
}
|
325
vendor/github.com/containers/ocicrypt/encryption.go
generated
vendored
Normal file
325
vendor/github.com/containers/ocicrypt/encryption.go
generated
vendored
Normal file
@ -0,0 +1,325 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ocicrypt
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/ocicrypt/blockcipher"
|
||||
"github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
"github.com/containers/ocicrypt/keywrap/jwe"
|
||||
"github.com/containers/ocicrypt/keywrap/pgp"
|
||||
"github.com/containers/ocicrypt/keywrap/pkcs7"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// EncryptLayerFinalizer is a finalizer run to return the annotations to set for
|
||||
// the encrypted layer
|
||||
type EncryptLayerFinalizer func() (map[string]string, error)
|
||||
|
||||
func init() {
|
||||
keyWrappers = make(map[string]keywrap.KeyWrapper)
|
||||
keyWrapperAnnotations = make(map[string]string)
|
||||
RegisterKeyWrapper("pgp", pgp.NewKeyWrapper())
|
||||
RegisterKeyWrapper("jwe", jwe.NewKeyWrapper())
|
||||
RegisterKeyWrapper("pkcs7", pkcs7.NewKeyWrapper())
|
||||
}
|
||||
|
||||
var keyWrappers map[string]keywrap.KeyWrapper
|
||||
var keyWrapperAnnotations map[string]string
|
||||
|
||||
// RegisterKeyWrapper allows to register key wrappers by their encryption scheme
|
||||
func RegisterKeyWrapper(scheme string, iface keywrap.KeyWrapper) {
|
||||
keyWrappers[scheme] = iface
|
||||
keyWrapperAnnotations[iface.GetAnnotationID()] = scheme
|
||||
}
|
||||
|
||||
// GetKeyWrapper looks up the encryptor interface given an encryption scheme (gpg, jwe)
|
||||
func GetKeyWrapper(scheme string) keywrap.KeyWrapper {
|
||||
return keyWrappers[scheme]
|
||||
}
|
||||
|
||||
// GetWrappedKeysMap returns a map of wrappedKeys as values in a
|
||||
// map with the encryption scheme(s) as the key(s)
|
||||
func GetWrappedKeysMap(desc ocispec.Descriptor) map[string]string {
|
||||
wrappedKeysMap := make(map[string]string)
|
||||
|
||||
for annotationsID, scheme := range keyWrapperAnnotations {
|
||||
if annotation, ok := desc.Annotations[annotationsID]; ok {
|
||||
wrappedKeysMap[scheme] = annotation
|
||||
}
|
||||
}
|
||||
return wrappedKeysMap
|
||||
}
|
||||
|
||||
// EncryptLayer encrypts the layer by running one encryptor after the other
|
||||
func EncryptLayer(ec *config.EncryptConfig, encOrPlainLayerReader io.Reader, desc ocispec.Descriptor) (io.Reader, EncryptLayerFinalizer, error) {
|
||||
var (
|
||||
encLayerReader io.Reader
|
||||
err error
|
||||
encrypted bool
|
||||
bcFin blockcipher.Finalizer
|
||||
privOptsData []byte
|
||||
pubOptsData []byte
|
||||
)
|
||||
|
||||
if ec == nil {
|
||||
return nil, nil, errors.New("EncryptConfig must not be nil")
|
||||
}
|
||||
|
||||
for annotationsID := range keyWrapperAnnotations {
|
||||
annotation := desc.Annotations[annotationsID]
|
||||
if annotation != "" {
|
||||
privOptsData, err = decryptLayerKeyOptsData(&ec.DecryptConfig, desc)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
pubOptsData, err = getLayerPubOpts(desc)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
// already encrypted!
|
||||
encrypted = true
|
||||
}
|
||||
}
|
||||
|
||||
if !encrypted {
|
||||
encLayerReader, bcFin, err = commonEncryptLayer(encOrPlainLayerReader, desc.Digest, blockcipher.AES256CTR)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
encLayerFinalizer := func() (map[string]string, error) {
|
||||
// If layer was already encrypted, bcFin should be nil, use existing optsData
|
||||
if bcFin != nil {
|
||||
opts, err := bcFin()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privOptsData, err = json.Marshal(opts.Private)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not JSON marshal opts")
|
||||
}
|
||||
pubOptsData, err = json.Marshal(opts.Public)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not JSON marshal opts")
|
||||
}
|
||||
}
|
||||
|
||||
newAnnotations := make(map[string]string)
|
||||
for annotationsID, scheme := range keyWrapperAnnotations {
|
||||
b64Annotations := desc.Annotations[annotationsID]
|
||||
keywrapper := GetKeyWrapper(scheme)
|
||||
b64Annotations, err = preWrapKeys(keywrapper, ec, b64Annotations, privOptsData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if b64Annotations != "" {
|
||||
newAnnotations[annotationsID] = b64Annotations
|
||||
}
|
||||
}
|
||||
|
||||
newAnnotations["org.opencontainers.image.enc.pubopts"] = base64.StdEncoding.EncodeToString(pubOptsData)
|
||||
|
||||
if len(newAnnotations) == 0 {
|
||||
return nil, errors.New("no encryptor found to handle encryption")
|
||||
}
|
||||
|
||||
return newAnnotations, err
|
||||
}
|
||||
|
||||
// if nothing was encrypted, we just return encLayer = nil
|
||||
return encLayerReader, encLayerFinalizer, err
|
||||
|
||||
}
|
||||
|
||||
// preWrapKeys calls WrapKeys and handles the base64 encoding and concatenation of the
|
||||
// annotation data
|
||||
func preWrapKeys(keywrapper keywrap.KeyWrapper, ec *config.EncryptConfig, b64Annotations string, optsData []byte) (string, error) {
|
||||
newAnnotation, err := keywrapper.WrapKeys(ec, optsData)
|
||||
if err != nil || len(newAnnotation) == 0 {
|
||||
return b64Annotations, err
|
||||
}
|
||||
b64newAnnotation := base64.StdEncoding.EncodeToString(newAnnotation)
|
||||
if b64Annotations == "" {
|
||||
return b64newAnnotation, nil
|
||||
}
|
||||
return b64Annotations + "," + b64newAnnotation, nil
|
||||
}
|
||||
|
||||
// DecryptLayer decrypts a layer trying one keywrap.KeyWrapper after the other to see whether it
|
||||
// can apply the provided private key
|
||||
// If unwrapOnly is set we will only try to decrypt the layer encryption key and return
|
||||
func DecryptLayer(dc *config.DecryptConfig, encLayerReader io.Reader, desc ocispec.Descriptor, unwrapOnly bool) (io.Reader, digest.Digest, error) {
|
||||
if dc == nil {
|
||||
return nil, "", errors.New("DecryptConfig must not be nil")
|
||||
}
|
||||
privOptsData, err := decryptLayerKeyOptsData(dc, desc)
|
||||
if err != nil || unwrapOnly {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
var pubOptsData []byte
|
||||
pubOptsData, err = getLayerPubOpts(desc)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return commonDecryptLayer(encLayerReader, privOptsData, pubOptsData)
|
||||
}
|
||||
|
||||
func decryptLayerKeyOptsData(dc *config.DecryptConfig, desc ocispec.Descriptor) ([]byte, error) {
|
||||
privKeyGiven := false
|
||||
for annotationsID, scheme := range keyWrapperAnnotations {
|
||||
b64Annotation := desc.Annotations[annotationsID]
|
||||
if b64Annotation != "" {
|
||||
keywrapper := GetKeyWrapper(scheme)
|
||||
|
||||
if len(keywrapper.GetPrivateKeys(dc.Parameters)) == 0 {
|
||||
continue
|
||||
}
|
||||
privKeyGiven = true
|
||||
|
||||
optsData, err := preUnwrapKey(keywrapper, dc, b64Annotation)
|
||||
if err != nil {
|
||||
// try next keywrap.KeyWrapper
|
||||
continue
|
||||
}
|
||||
if optsData == nil {
|
||||
// try next keywrap.KeyWrapper
|
||||
continue
|
||||
}
|
||||
return optsData, nil
|
||||
}
|
||||
}
|
||||
if !privKeyGiven {
|
||||
return nil, errors.New("missing private key needed for decryption")
|
||||
}
|
||||
return nil, errors.Errorf("no suitable key unwrapper found or none of the private keys could be used for decryption")
|
||||
}
|
||||
|
||||
func getLayerPubOpts(desc ocispec.Descriptor) ([]byte, error) {
|
||||
pubOptsString := desc.Annotations["org.opencontainers.image.enc.pubopts"]
|
||||
if pubOptsString == "" {
|
||||
return json.Marshal(blockcipher.PublicLayerBlockCipherOptions{})
|
||||
}
|
||||
return base64.StdEncoding.DecodeString(pubOptsString)
|
||||
}
|
||||
|
||||
// preUnwrapKey decodes the comma separated base64 strings and calls the Unwrap function
|
||||
// of the given keywrapper with it and returns the result in case the Unwrap functions
|
||||
// does not return an error. If all attempts fail, an error is returned.
|
||||
func preUnwrapKey(keywrapper keywrap.KeyWrapper, dc *config.DecryptConfig, b64Annotations string) ([]byte, error) {
|
||||
if b64Annotations == "" {
|
||||
return nil, nil
|
||||
}
|
||||
for _, b64Annotation := range strings.Split(b64Annotations, ",") {
|
||||
annotation, err := base64.StdEncoding.DecodeString(b64Annotation)
|
||||
if err != nil {
|
||||
return nil, errors.New("could not base64 decode the annotation")
|
||||
}
|
||||
optsData, err := keywrapper.UnwrapKey(dc, annotation)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return optsData, nil
|
||||
}
|
||||
return nil, errors.New("no suitable key found for decrypting layer key")
|
||||
}
|
||||
|
||||
// commonEncryptLayer is a function to encrypt the plain layer using a new random
|
||||
// symmetric key and return the LayerBlockCipherHandler's JSON in string form for
|
||||
// later use during decryption
|
||||
func commonEncryptLayer(plainLayerReader io.Reader, d digest.Digest, typ blockcipher.LayerCipherType) (io.Reader, blockcipher.Finalizer, error) {
|
||||
lbch, err := blockcipher.NewLayerBlockCipherHandler()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
encLayerReader, bcFin, err := lbch.Encrypt(plainLayerReader, typ)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
newBcFin := func() (blockcipher.LayerBlockCipherOptions, error) {
|
||||
lbco, err := bcFin()
|
||||
if err != nil {
|
||||
return blockcipher.LayerBlockCipherOptions{}, err
|
||||
}
|
||||
lbco.Private.Digest = d
|
||||
return lbco, nil
|
||||
}
|
||||
|
||||
return encLayerReader, newBcFin, err
|
||||
}
|
||||
|
||||
// commonDecryptLayer decrypts an encrypted layer previously encrypted with commonEncryptLayer
|
||||
// by passing along the optsData
|
||||
func commonDecryptLayer(encLayerReader io.Reader, privOptsData []byte, pubOptsData []byte) (io.Reader, digest.Digest, error) {
|
||||
privOpts := blockcipher.PrivateLayerBlockCipherOptions{}
|
||||
err := json.Unmarshal(privOptsData, &privOpts)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "could not JSON unmarshal privOptsData")
|
||||
}
|
||||
|
||||
lbch, err := blockcipher.NewLayerBlockCipherHandler()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
pubOpts := blockcipher.PublicLayerBlockCipherOptions{}
|
||||
if len(pubOptsData) > 0 {
|
||||
err := json.Unmarshal(pubOptsData, &pubOpts)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrapf(err, "could not JSON unmarshal pubOptsData")
|
||||
}
|
||||
}
|
||||
|
||||
opts := blockcipher.LayerBlockCipherOptions{
|
||||
Private: privOpts,
|
||||
Public: pubOpts,
|
||||
}
|
||||
|
||||
plainLayerReader, opts, err := lbch.Decrypt(encLayerReader, opts)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return plainLayerReader, opts.Private.Digest, nil
|
||||
}
|
||||
|
||||
// FilterOutAnnotations filters out the annotations belonging to the image encryption 'namespace'
|
||||
// and returns a map with those taken out
|
||||
func FilterOutAnnotations(annotations map[string]string) map[string]string {
|
||||
a := make(map[string]string)
|
||||
if len(annotations) > 0 {
|
||||
for k, v := range annotations {
|
||||
if strings.HasPrefix(k, "org.opencontainers.image.enc.") {
|
||||
continue
|
||||
}
|
||||
a[k] = v
|
||||
}
|
||||
}
|
||||
return a
|
||||
}
|
18
vendor/github.com/containers/ocicrypt/go.mod
generated
vendored
Normal file
18
vendor/github.com/containers/ocicrypt/go.mod
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
module github.com/containers/ocicrypt
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/containerd/containerd v1.2.10
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/sirupsen/logrus v1.4.2 // indirect
|
||||
github.com/stretchr/testify v1.3.0 // indirect
|
||||
github.com/urfave/cli v1.22.1
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
|
||||
google.golang.org/grpc v1.24.0 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.3.1
|
||||
gotest.tools v2.2.0+incompatible // indirect
|
||||
)
|
73
vendor/github.com/containers/ocicrypt/go.sum
generated
vendored
Normal file
73
vendor/github.com/containers/ocicrypt/go.sum
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/containerd/containerd v1.2.10 h1:liQDhXqIn7y6cJ/7qBgOaZsiTZJc56/wkkhDBiDBRDw=
|
||||
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
|
||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
|
||||
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
|
||||
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
425
vendor/github.com/containers/ocicrypt/gpg.go
generated
vendored
Normal file
425
vendor/github.com/containers/ocicrypt/gpg.go
generated
vendored
Normal file
@ -0,0 +1,425 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ocicrypt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
// GPGVersion enum representing the GPG client version to use.
|
||||
type GPGVersion int
|
||||
|
||||
const (
|
||||
// GPGv2 signifies gpgv2+
|
||||
GPGv2 GPGVersion = iota
|
||||
// GPGv1 signifies gpgv1+
|
||||
GPGv1
|
||||
// GPGVersionUndetermined signifies gpg client version undetermined
|
||||
GPGVersionUndetermined
|
||||
)
|
||||
|
||||
// GPGClient defines an interface for wrapping the gpg command line tools
|
||||
type GPGClient interface {
|
||||
// ReadGPGPubRingFile gets the byte sequence of the gpg public keyring
|
||||
ReadGPGPubRingFile() ([]byte, error)
|
||||
// GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase
|
||||
GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error)
|
||||
// GetSecretKeyDetails gets the details of a secret key
|
||||
GetSecretKeyDetails(keyid uint64) ([]byte, bool, error)
|
||||
// GetKeyDetails gets the details of a public key
|
||||
GetKeyDetails(keyid uint64) ([]byte, bool, error)
|
||||
// ResolveRecipients resolves PGP key ids to user names
|
||||
ResolveRecipients([]string) []string
|
||||
}
|
||||
|
||||
// gpgClient contains generic gpg client information
|
||||
type gpgClient struct {
|
||||
gpgHomeDir string
|
||||
}
|
||||
|
||||
// gpgv2Client is a gpg2 client
|
||||
type gpgv2Client struct {
|
||||
gpgClient
|
||||
}
|
||||
|
||||
// gpgv1Client is a gpg client
|
||||
type gpgv1Client struct {
|
||||
gpgClient
|
||||
}
|
||||
|
||||
// GuessGPGVersion guesses the version of gpg. Defaults to gpg2 if exists, if
|
||||
// not defaults to regular gpg.
|
||||
func GuessGPGVersion() GPGVersion {
|
||||
if err := exec.Command("gpg2", "--version").Run(); err == nil {
|
||||
return GPGv2
|
||||
} else if err := exec.Command("gpg", "--version").Run(); err == nil {
|
||||
return GPGv1
|
||||
} else {
|
||||
return GPGVersionUndetermined
|
||||
}
|
||||
}
|
||||
|
||||
// NewGPGClient creates a new GPGClient object representing the given version
|
||||
// and using the given home directory
|
||||
func NewGPGClient(gpgVersion, gpgHomeDir string) (GPGClient, error) {
|
||||
v := new(GPGVersion)
|
||||
switch gpgVersion {
|
||||
case "v1":
|
||||
*v = GPGv1
|
||||
case "v2":
|
||||
*v = GPGv2
|
||||
default:
|
||||
v = nil
|
||||
}
|
||||
return newGPGClient(v, gpgHomeDir)
|
||||
}
|
||||
|
||||
func newGPGClient(version *GPGVersion, homedir string) (GPGClient, error) {
|
||||
var gpgVersion GPGVersion
|
||||
if version != nil {
|
||||
gpgVersion = *version
|
||||
} else {
|
||||
gpgVersion = GuessGPGVersion()
|
||||
}
|
||||
|
||||
switch gpgVersion {
|
||||
case GPGv1:
|
||||
return &gpgv1Client{
|
||||
gpgClient: gpgClient{gpgHomeDir: homedir},
|
||||
}, nil
|
||||
case GPGv2:
|
||||
return &gpgv2Client{
|
||||
gpgClient: gpgClient{gpgHomeDir: homedir},
|
||||
}, nil
|
||||
case GPGVersionUndetermined:
|
||||
return nil, fmt.Errorf("unable to determine GPG version")
|
||||
default:
|
||||
return nil, fmt.Errorf("unhandled case: NewGPGClient")
|
||||
}
|
||||
}
|
||||
|
||||
// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase
|
||||
func (gc *gpgv2Client) GetGPGPrivateKey(keyid uint64, passphrase string) ([]byte, error) {
|
||||
var args []string
|
||||
|
||||
if gc.gpgHomeDir != "" {
|
||||
args = append(args, []string{"--homedir", gc.gpgHomeDir}...)
|
||||
}
|
||||
|
||||
rfile, wfile, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not create pipe")
|
||||
}
|
||||
defer func() {
|
||||
rfile.Close()
|
||||
wfile.Close()
|
||||
}()
|
||||
// fill pipe in background
|
||||
go func(passphrase string) {
|
||||
_, _ = wfile.Write([]byte(passphrase))
|
||||
wfile.Close()
|
||||
}(passphrase)
|
||||
|
||||
args = append(args, []string{"--pinentry-mode", "loopback", "--batch", "--passphrase-fd", fmt.Sprintf("%d", 3), "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...)
|
||||
|
||||
cmd := exec.Command("gpg2", args...)
|
||||
cmd.ExtraFiles = []*os.File{rfile}
|
||||
|
||||
return runGPGGetOutput(cmd)
|
||||
}
|
||||
|
||||
// ReadGPGPubRingFile reads the GPG public key ring file
|
||||
func (gc *gpgv2Client) ReadGPGPubRingFile() ([]byte, error) {
|
||||
var args []string
|
||||
|
||||
if gc.gpgHomeDir != "" {
|
||||
args = append(args, []string{"--homedir", gc.gpgHomeDir}...)
|
||||
}
|
||||
args = append(args, []string{"--batch", "--export"}...)
|
||||
|
||||
cmd := exec.Command("gpg2", args...)
|
||||
|
||||
return runGPGGetOutput(cmd)
|
||||
}
|
||||
|
||||
func (gc *gpgv2Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) {
|
||||
var args []string
|
||||
|
||||
if gc.gpgHomeDir != "" {
|
||||
args = append([]string{"--homedir", gc.gpgHomeDir})
|
||||
}
|
||||
args = append(args, option, fmt.Sprintf("0x%x", keyid))
|
||||
|
||||
cmd := exec.Command("gpg2", args...)
|
||||
|
||||
keydata, err := runGPGGetOutput(cmd)
|
||||
return keydata, err == nil, err
|
||||
}
|
||||
|
||||
// GetSecretKeyDetails retrives the secret key details of key with keyid.
|
||||
// returns a byte array of the details and a bool if the key exists
|
||||
func (gc *gpgv2Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) {
|
||||
return gc.getKeyDetails("-K", keyid)
|
||||
}
|
||||
|
||||
// GetKeyDetails retrives the public key details of key with keyid.
|
||||
// returns a byte array of the details and a bool if the key exists
|
||||
func (gc *gpgv2Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) {
|
||||
return gc.getKeyDetails("-k", keyid)
|
||||
}
|
||||
|
||||
// ResolveRecipients converts PGP keyids to email addresses, if possible
|
||||
func (gc *gpgv2Client) ResolveRecipients(recipients []string) []string {
|
||||
return resolveRecipients(gc, recipients)
|
||||
}
|
||||
|
||||
// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase
|
||||
func (gc *gpgv1Client) GetGPGPrivateKey(keyid uint64, _ string) ([]byte, error) {
|
||||
var args []string
|
||||
|
||||
if gc.gpgHomeDir != "" {
|
||||
args = append(args, []string{"--homedir", gc.gpgHomeDir}...)
|
||||
}
|
||||
args = append(args, []string{"--batch", "--export-secret-key", fmt.Sprintf("0x%x", keyid)}...)
|
||||
|
||||
cmd := exec.Command("gpg", args...)
|
||||
|
||||
return runGPGGetOutput(cmd)
|
||||
}
|
||||
|
||||
// ReadGPGPubRingFile reads the GPG public key ring file
|
||||
func (gc *gpgv1Client) ReadGPGPubRingFile() ([]byte, error) {
|
||||
var args []string
|
||||
|
||||
if gc.gpgHomeDir != "" {
|
||||
args = append(args, []string{"--homedir", gc.gpgHomeDir}...)
|
||||
}
|
||||
args = append(args, []string{"--batch", "--export"}...)
|
||||
|
||||
cmd := exec.Command("gpg", args...)
|
||||
|
||||
return runGPGGetOutput(cmd)
|
||||
}
|
||||
|
||||
func (gc *gpgv1Client) getKeyDetails(option string, keyid uint64) ([]byte, bool, error) {
|
||||
var args []string
|
||||
|
||||
if gc.gpgHomeDir != "" {
|
||||
args = append([]string{"--homedir", gc.gpgHomeDir})
|
||||
}
|
||||
args = append(args, option, fmt.Sprintf("0x%x", keyid))
|
||||
|
||||
cmd := exec.Command("gpg", args...)
|
||||
|
||||
keydata, err := runGPGGetOutput(cmd)
|
||||
|
||||
return keydata, err == nil, err
|
||||
}
|
||||
|
||||
// GetSecretKeyDetails retrives the secret key details of key with keyid.
|
||||
// returns a byte array of the details and a bool if the key exists
|
||||
func (gc *gpgv1Client) GetSecretKeyDetails(keyid uint64) ([]byte, bool, error) {
|
||||
return gc.getKeyDetails("-K", keyid)
|
||||
}
|
||||
|
||||
// GetKeyDetails retrives the public key details of key with keyid.
|
||||
// returns a byte array of the details and a bool if the key exists
|
||||
func (gc *gpgv1Client) GetKeyDetails(keyid uint64) ([]byte, bool, error) {
|
||||
return gc.getKeyDetails("-k", keyid)
|
||||
}
|
||||
|
||||
// ResolveRecipients converts PGP keyids to email addresses, if possible
|
||||
func (gc *gpgv1Client) ResolveRecipients(recipients []string) []string {
|
||||
return resolveRecipients(gc, recipients)
|
||||
}
|
||||
|
||||
// runGPGGetOutput runs the GPG commandline and returns stdout as byte array
|
||||
// and any stderr in the error
|
||||
func runGPGGetOutput(cmd *exec.Cmd) ([]byte, error) {
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stdoutstr, err2 := ioutil.ReadAll(stdout)
|
||||
stderrstr, _ := ioutil.ReadAll(stderr)
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
return nil, fmt.Errorf("error from %s: %s", cmd.Path, string(stderrstr))
|
||||
}
|
||||
|
||||
return stdoutstr, err2
|
||||
}
|
||||
|
||||
// resolveRecipients walks the list of recipients and attempts to convert
|
||||
// all keyIds to email addresses; if something goes wrong during the
|
||||
// conversion of a recipient, the original string is returned for that
|
||||
// recpient
|
||||
func resolveRecipients(gc GPGClient, recipients []string) []string {
|
||||
var result []string
|
||||
|
||||
for _, recipient := range recipients {
|
||||
keyID, err := strconv.ParseUint(recipient, 0, 64)
|
||||
if err != nil {
|
||||
result = append(result, recipient)
|
||||
} else {
|
||||
details, found, _ := gc.GetKeyDetails(keyID)
|
||||
if !found {
|
||||
result = append(result, recipient)
|
||||
} else {
|
||||
email := extractEmailFromDetails(details)
|
||||
if email == "" {
|
||||
result = append(result, recipient)
|
||||
} else {
|
||||
result = append(result, email)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
var emailPattern = regexp.MustCompile(`uid\s+\[.*\]\s.*\s<(?P<email>.+)>`)
|
||||
|
||||
func extractEmailFromDetails(details []byte) string {
|
||||
loc := emailPattern.FindSubmatchIndex(details)
|
||||
if len(loc) == 0 {
|
||||
return ""
|
||||
}
|
||||
return string(emailPattern.Expand(nil, []byte("$email"), details, loc))
|
||||
}
|
||||
|
||||
// uint64ToStringArray converts an array of uint64's to an array of strings
|
||||
// by applying a format string to each uint64
|
||||
func uint64ToStringArray(format string, in []uint64) []string {
|
||||
var ret []string
|
||||
|
||||
for _, v := range in {
|
||||
ret = append(ret, fmt.Sprintf(format, v))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// GPGGetPrivateKey walks the list of layerInfos and tries to decrypt the
|
||||
// wrapped symmetric keys. For this it determines whether a private key is
|
||||
// in the GPGVault or on this system and prompts for the passwords for those
|
||||
// that are available. If we do not find a private key on the system for
|
||||
// getting to the symmetric key of a layer then an error is generated.
|
||||
func GPGGetPrivateKey(descs []ocispec.Descriptor, gpgClient GPGClient, gpgVault GPGVault, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) {
|
||||
// PrivateKeyData describes a private key
|
||||
type PrivateKeyData struct {
|
||||
KeyData []byte
|
||||
KeyDataPassword []byte
|
||||
}
|
||||
var pkd PrivateKeyData
|
||||
keyIDPasswordMap := make(map[uint64]PrivateKeyData)
|
||||
|
||||
for _, desc := range descs {
|
||||
for scheme, b64pgpPackets := range GetWrappedKeysMap(desc) {
|
||||
if scheme != "pgp" {
|
||||
continue
|
||||
}
|
||||
keywrapper := GetKeyWrapper(scheme)
|
||||
if keywrapper == nil {
|
||||
return nil, nil, errors.Errorf("could not get KeyWrapper for %s\n", scheme)
|
||||
}
|
||||
keyIds, err := keywrapper.GetKeyIdsFromPacket(b64pgpPackets)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, keyid := range keyIds {
|
||||
// do we have this key? -- first check the vault
|
||||
if gpgVault != nil {
|
||||
_, keydata := gpgVault.GetGPGPrivateKey(keyid)
|
||||
if len(keydata) > 0 {
|
||||
pkd = PrivateKeyData{
|
||||
KeyData: keydata,
|
||||
KeyDataPassword: nil, // password not supported in this case
|
||||
}
|
||||
keyIDPasswordMap[keyid] = pkd
|
||||
found = true
|
||||
break
|
||||
}
|
||||
} else if gpgClient != nil {
|
||||
// check the local system's gpg installation
|
||||
keyinfo, haveKey, _ := gpgClient.GetSecretKeyDetails(keyid)
|
||||
// this may fail if the key is not here; we ignore the error
|
||||
if !haveKey {
|
||||
// key not on this system
|
||||
continue
|
||||
}
|
||||
|
||||
_, found = keyIDPasswordMap[keyid]
|
||||
if !found {
|
||||
fmt.Printf("Passphrase required for Key id 0x%x: \n%v", keyid, string(keyinfo))
|
||||
fmt.Printf("Enter passphrase for key with Id 0x%x: ", keyid)
|
||||
|
||||
password, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
fmt.Printf("\n")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
keydata, err := gpgClient.GetGPGPrivateKey(keyid, string(password))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
pkd = PrivateKeyData{
|
||||
KeyData: keydata,
|
||||
KeyDataPassword: password,
|
||||
}
|
||||
keyIDPasswordMap[keyid] = pkd
|
||||
found = true
|
||||
}
|
||||
break
|
||||
} else {
|
||||
return nil, nil, errors.New("no GPGVault or GPGClient passed")
|
||||
}
|
||||
}
|
||||
if !found && len(b64pgpPackets) > 0 && mustFindKey {
|
||||
ids := uint64ToStringArray("0x%x", keyIds)
|
||||
|
||||
return nil, nil, errors.Errorf("missing key for decryption of layer %x of %s. Need one of the following keys: %s", desc.Digest, desc.Platform, strings.Join(ids, ", "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, pkd := range keyIDPasswordMap {
|
||||
gpgPrivKeys = append(gpgPrivKeys, pkd.KeyData)
|
||||
gpgPrivKeysPwds = append(gpgPrivKeysPwds, pkd.KeyDataPassword)
|
||||
}
|
||||
|
||||
return gpgPrivKeys, gpgPrivKeysPwds, nil
|
||||
}
|
100
vendor/github.com/containers/ocicrypt/gpgvault.go
generated
vendored
Normal file
100
vendor/github.com/containers/ocicrypt/gpgvault.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ocicrypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
"golang.org/x/crypto/openpgp/packet"
|
||||
)
|
||||
|
||||
// GPGVault defines an interface for wrapping multiple secret key rings
|
||||
type GPGVault interface {
|
||||
// AddSecretKeyRingData adds a secret keyring via its raw byte array
|
||||
AddSecretKeyRingData(gpgSecretKeyRingData []byte) error
|
||||
// AddSecretKeyRingDataArray adds secret keyring via its raw byte arrays
|
||||
AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error
|
||||
// AddSecretKeyRingFiles adds secret keyrings given their filenames
|
||||
AddSecretKeyRingFiles(filenames []string) error
|
||||
// GetGPGPrivateKey gets the private key bytes of a keyid given a passphrase
|
||||
GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte)
|
||||
}
|
||||
|
||||
// gpgVault wraps an array of gpgSecretKeyRing
|
||||
type gpgVault struct {
|
||||
entityLists []openpgp.EntityList
|
||||
keyDataList [][]byte // the raw data original passed in
|
||||
}
|
||||
|
||||
// NewGPGVault creates an empty GPGVault
|
||||
func NewGPGVault() GPGVault {
|
||||
return &gpgVault{}
|
||||
}
|
||||
|
||||
// AddSecretKeyRingData adds a secret keyring's to the gpgVault; the raw byte
|
||||
// array read from the file must be passed and will be parsed by this function
|
||||
func (g *gpgVault) AddSecretKeyRingData(gpgSecretKeyRingData []byte) error {
|
||||
// read the private keys
|
||||
r := bytes.NewReader(gpgSecretKeyRingData)
|
||||
entityList, err := openpgp.ReadKeyRing(r)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not read keyring")
|
||||
}
|
||||
g.entityLists = append(g.entityLists, entityList)
|
||||
g.keyDataList = append(g.keyDataList, gpgSecretKeyRingData)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddSecretKeyRingDataArray adds secret keyrings to the gpgVault; the raw byte
|
||||
// arrays read from files must be passed
|
||||
func (g *gpgVault) AddSecretKeyRingDataArray(gpgSecretKeyRingDataArray [][]byte) error {
|
||||
for _, gpgSecretKeyRingData := range gpgSecretKeyRingDataArray {
|
||||
if err := g.AddSecretKeyRingData(gpgSecretKeyRingData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddSecretKeyRingFiles adds the secret key rings given their filenames
|
||||
func (g *gpgVault) AddSecretKeyRingFiles(filenames []string) error {
|
||||
for _, filename := range filenames {
|
||||
gpgSecretKeyRingData, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = g.AddSecretKeyRingData(gpgSecretKeyRingData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGPGPrivateKey gets the bytes of a specified keyid, supplying a passphrase
|
||||
func (g *gpgVault) GetGPGPrivateKey(keyid uint64) ([]openpgp.Key, []byte) {
|
||||
for i, el := range g.entityLists {
|
||||
decKeys := el.KeysByIdUsage(keyid, packet.KeyFlagEncryptCommunications)
|
||||
if len(decKeys) > 0 {
|
||||
return decKeys, g.keyDataList[i]
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
313
vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go
generated
vendored
Normal file
313
vendor/github.com/containers/ocicrypt/helpers/parse_helpers.go
generated
vendored
Normal file
@ -0,0 +1,313 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containers/ocicrypt"
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
encutils "github.com/containers/ocicrypt/utils"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// processRecipientKeys sorts the array of recipients by type. Recipients may be either
|
||||
// x509 certificates, public keys, or PGP public keys identified by email address or name
|
||||
func processRecipientKeys(recipients []string) ([][]byte, [][]byte, [][]byte, error) {
|
||||
var (
|
||||
gpgRecipients [][]byte
|
||||
pubkeys [][]byte
|
||||
x509s [][]byte
|
||||
)
|
||||
for _, recipient := range recipients {
|
||||
|
||||
idx := strings.Index(recipient, ":")
|
||||
if idx < 0 {
|
||||
return nil, nil, nil, errors.New("Invalid recipient format")
|
||||
}
|
||||
|
||||
protocol := recipient[:idx]
|
||||
value := recipient[idx+1:]
|
||||
|
||||
switch protocol {
|
||||
case "pgp":
|
||||
gpgRecipients = append(gpgRecipients, []byte(value))
|
||||
|
||||
case "jwe":
|
||||
tmp, err := ioutil.ReadFile(value)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "Unable to read file")
|
||||
}
|
||||
if !encutils.IsPublicKey(tmp) {
|
||||
return nil, nil, nil, errors.New("File provided is not a public key")
|
||||
}
|
||||
pubkeys = append(pubkeys, tmp)
|
||||
|
||||
case "pkcs7":
|
||||
tmp, err := ioutil.ReadFile(value)
|
||||
if err != nil {
|
||||
return nil, nil, nil, errors.Wrap(err, "Unable to read file")
|
||||
}
|
||||
if !encutils.IsCertificate(tmp) {
|
||||
return nil, nil, nil, errors.New("File provided is not an x509 cert")
|
||||
}
|
||||
x509s = append(x509s, tmp)
|
||||
|
||||
default:
|
||||
return nil, nil, nil, errors.New("Provided protocol not recognized")
|
||||
}
|
||||
}
|
||||
return gpgRecipients, pubkeys, x509s, nil
|
||||
}
|
||||
|
||||
// processPwdString process a password that may be in any of the following formats:
|
||||
// - file=<passwordfile>
|
||||
// - pass=<password>
|
||||
// - fd=<filedescriptor>
|
||||
// - <password>
|
||||
func processPwdString(pwdString string) ([]byte, error) {
|
||||
if strings.HasPrefix(pwdString, "file=") {
|
||||
return ioutil.ReadFile(pwdString[5:])
|
||||
} else if strings.HasPrefix(pwdString, "pass=") {
|
||||
return []byte(pwdString[5:]), nil
|
||||
} else if strings.HasPrefix(pwdString, "fd=") {
|
||||
fdStr := pwdString[3:]
|
||||
fd, err := strconv.Atoi(fdStr)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not parse file descriptor %s", fdStr)
|
||||
}
|
||||
f := os.NewFile(uintptr(fd), "pwdfile")
|
||||
if f == nil {
|
||||
return nil, fmt.Errorf("%s is not a valid file descriptor", fdStr)
|
||||
}
|
||||
defer f.Close()
|
||||
pwd := make([]byte, 64)
|
||||
n, err := f.Read(pwd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not read from file descriptor")
|
||||
}
|
||||
return pwd[:n], nil
|
||||
}
|
||||
return []byte(pwdString), nil
|
||||
}
|
||||
|
||||
// processPrivateKeyFiles sorts the different types of private key files; private key files may either be
|
||||
// private keys or GPG private key ring files. The private key files may include the password for the
|
||||
// private key and take any of the following forms:
|
||||
// - <filename>
|
||||
// - <filename>:file=<passwordfile>
|
||||
// - <filename>:pass=<password>
|
||||
// - <filename>:fd=<filedescriptor>
|
||||
// - <filename>:<password>
|
||||
func processPrivateKeyFiles(keyFilesAndPwds []string) ([][]byte, [][]byte, [][]byte, [][]byte, error) {
|
||||
var (
|
||||
gpgSecretKeyRingFiles [][]byte
|
||||
gpgSecretKeyPasswords [][]byte
|
||||
privkeys [][]byte
|
||||
privkeysPasswords [][]byte
|
||||
err error
|
||||
)
|
||||
// keys needed for decryption in case of adding a recipient
|
||||
for _, keyfileAndPwd := range keyFilesAndPwds {
|
||||
var password []byte
|
||||
|
||||
parts := strings.Split(keyfileAndPwd, ":")
|
||||
if len(parts) == 2 {
|
||||
password, err = processPwdString(parts[1])
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
keyfile := parts[0]
|
||||
tmp, err := ioutil.ReadFile(keyfile)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
isPrivKey, err := encutils.IsPrivateKey(tmp, password)
|
||||
if encutils.IsPasswordError(err) {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
if isPrivKey {
|
||||
privkeys = append(privkeys, tmp)
|
||||
privkeysPasswords = append(privkeysPasswords, password)
|
||||
} else if encutils.IsGPGPrivateKeyRing(tmp) {
|
||||
gpgSecretKeyRingFiles = append(gpgSecretKeyRingFiles, tmp)
|
||||
gpgSecretKeyPasswords = append(gpgSecretKeyPasswords, password)
|
||||
} else {
|
||||
return nil, nil, nil, nil, fmt.Errorf("unidentified private key in file %s (password=%s)", keyfile, string(password))
|
||||
}
|
||||
}
|
||||
return gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privkeys, privkeysPasswords, nil
|
||||
}
|
||||
|
||||
func createGPGClient(context *cli.Context) (ocicrypt.GPGClient, error) {
|
||||
return ocicrypt.NewGPGClient(context.String("gpg-version"), context.String("gpg-homedir"))
|
||||
}
|
||||
|
||||
func getGPGPrivateKeys(context *cli.Context, gpgSecretKeyRingFiles [][]byte, descs []ocispec.Descriptor, mustFindKey bool) (gpgPrivKeys [][]byte, gpgPrivKeysPwds [][]byte, err error) {
|
||||
gpgClient, err := createGPGClient(context)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var gpgVault ocicrypt.GPGVault
|
||||
if len(gpgSecretKeyRingFiles) > 0 {
|
||||
gpgVault = ocicrypt.NewGPGVault()
|
||||
err = gpgVault.AddSecretKeyRingDataArray(gpgSecretKeyRingFiles)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return ocicrypt.GPGGetPrivateKey(descs, gpgClient, gpgVault, mustFindKey)
|
||||
}
|
||||
|
||||
// CreateDecryptCryptoConfig creates the CryptoConfig object that contains the necessary
|
||||
// information to perform decryption from command line options and possibly
|
||||
// LayerInfos describing the image and helping us to query for the PGP decryption keys
|
||||
func CreateDecryptCryptoConfig(keys []string, decRecipients []string) (encconfig.CryptoConfig, error) {
|
||||
ccs := []encconfig.CryptoConfig{}
|
||||
|
||||
// x509 cert is needed for PKCS7 decryption
|
||||
_, _, x509s, err := processRecipientKeys(decRecipients)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
|
||||
gpgSecretKeyRingFiles, gpgSecretKeyPasswords, privKeys, privKeysPasswords, err := processPrivateKeyFiles(keys)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
|
||||
if len(gpgSecretKeyRingFiles) > 0 {
|
||||
gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
ccs = append(ccs, gpgCc)
|
||||
}
|
||||
|
||||
/* TODO: Add in GPG client query for secret keys in the future.
|
||||
_, err = createGPGClient(context)
|
||||
gpgInstalled := err == nil
|
||||
if gpgInstalled {
|
||||
if len(gpgSecretKeyRingFiles) == 0 && len(privKeys) == 0 && descs != nil {
|
||||
// Get pgp private keys from keyring only if no private key was passed
|
||||
gpgPrivKeys, gpgPrivKeyPasswords, err := getGPGPrivateKeys(context, gpgSecretKeyRingFiles, descs, true)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
|
||||
gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgPrivKeys, gpgPrivKeyPasswords)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
ccs = append(ccs, gpgCc)
|
||||
|
||||
} else if len(gpgSecretKeyRingFiles) > 0 {
|
||||
gpgCc, err := encconfig.DecryptWithGpgPrivKeys(gpgSecretKeyRingFiles, gpgSecretKeyPasswords)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
ccs = append(ccs, gpgCc)
|
||||
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
x509sCc, err := encconfig.DecryptWithX509s(x509s)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
ccs = append(ccs, x509sCc)
|
||||
|
||||
privKeysCc, err := encconfig.DecryptWithPrivKeys(privKeys, privKeysPasswords)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
ccs = append(ccs, privKeysCc)
|
||||
|
||||
return encconfig.CombineCryptoConfigs(ccs), nil
|
||||
}
|
||||
|
||||
// parsePlatformArray parses an array of specifiers and converts them into an array of specs.Platform
|
||||
func parsePlatformArray(specifiers []string) ([]ocispec.Platform, error) {
|
||||
var speclist []ocispec.Platform
|
||||
|
||||
for _, specifier := range specifiers {
|
||||
spec, err := platforms.Parse(specifier)
|
||||
if err != nil {
|
||||
return []ocispec.Platform{}, err
|
||||
}
|
||||
speclist = append(speclist, spec)
|
||||
}
|
||||
return speclist, nil
|
||||
}
|
||||
|
||||
// CreateCryptoConfig from the list of recipient strings and list of key paths of private keys
|
||||
func CreateCryptoConfig(recipients []string, keys []string) (encconfig.CryptoConfig, error) {
|
||||
var decryptCc *encconfig.CryptoConfig
|
||||
ccs := []encconfig.CryptoConfig{}
|
||||
if len(keys) > 0 {
|
||||
dcc, err := CreateDecryptCryptoConfig(keys, []string{})
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
decryptCc = &dcc
|
||||
ccs = append(ccs, dcc)
|
||||
}
|
||||
|
||||
if len(recipients) > 0 {
|
||||
gpgRecipients, pubKeys, x509s, err := processRecipientKeys(recipients)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
encryptCcs := []encconfig.CryptoConfig{}
|
||||
|
||||
// Create GPG client with guessed GPG version and default homedir
|
||||
gpgClient, err := ocicrypt.NewGPGClient("", "")
|
||||
gpgInstalled := err == nil
|
||||
if len(gpgRecipients) > 0 && gpgInstalled {
|
||||
gpgPubRingFile, err := gpgClient.ReadGPGPubRingFile()
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
|
||||
gpgCc, err := encconfig.EncryptWithGpg(gpgRecipients, gpgPubRingFile)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
encryptCcs = append(encryptCcs, gpgCc)
|
||||
}
|
||||
|
||||
// Create Encryption Crypto Config
|
||||
pkcs7Cc, err := encconfig.EncryptWithPkcs7(x509s)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
encryptCcs = append(encryptCcs, pkcs7Cc)
|
||||
|
||||
jweCc, err := encconfig.EncryptWithJwe(pubKeys)
|
||||
if err != nil {
|
||||
return encconfig.CryptoConfig{}, err
|
||||
}
|
||||
encryptCcs = append(encryptCcs, jweCc)
|
||||
ecc := encconfig.CombineCryptoConfigs(encryptCcs)
|
||||
if decryptCc != nil {
|
||||
ecc.EncryptConfig.AttachDecryptConfig(decryptCc.DecryptConfig)
|
||||
}
|
||||
ccs = append(ccs, ecc)
|
||||
}
|
||||
|
||||
if len(ccs) > 0 {
|
||||
return encconfig.CombineCryptoConfigs(ccs), nil
|
||||
} else {
|
||||
return encconfig.CryptoConfig{}, nil
|
||||
}
|
||||
}
|
132
vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
generated
vendored
Normal file
132
vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package jwe
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
|
||||
"github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
"github.com/containers/ocicrypt/utils"
|
||||
"github.com/pkg/errors"
|
||||
jose "gopkg.in/square/go-jose.v2"
|
||||
)
|
||||
|
||||
type jweKeyWrapper struct {
|
||||
}
|
||||
|
||||
func (kw *jweKeyWrapper) GetAnnotationID() string {
|
||||
return "org.opencontainers.image.enc.keys.jwe"
|
||||
}
|
||||
|
||||
// NewKeyWrapper returns a new key wrapping interface using jwe
|
||||
func NewKeyWrapper() keywrap.KeyWrapper {
|
||||
return &jweKeyWrapper{}
|
||||
}
|
||||
|
||||
// WrapKeys wraps the session key for recpients and encrypts the optsData, which
|
||||
// describe the symmetric key used for encrypting the layer
|
||||
func (kw *jweKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
|
||||
var joseRecipients []jose.Recipient
|
||||
|
||||
err := addPubKeys(&joseRecipients, ec.Parameters["pubkeys"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// no recipients is not an error...
|
||||
if len(joseRecipients) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
encrypter, err := jose.NewMultiEncrypter(jose.A256GCM, joseRecipients, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "jose.NewMultiEncrypter failed")
|
||||
}
|
||||
jwe, err := encrypter.Encrypt(optsData)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "JWE Encrypt failed")
|
||||
}
|
||||
return []byte(jwe.FullSerialize()), nil
|
||||
}
|
||||
|
||||
func (kw *jweKeyWrapper) UnwrapKey(dc *config.DecryptConfig, jweString []byte) ([]byte, error) {
|
||||
jwe, err := jose.ParseEncrypted(string(jweString))
|
||||
if err != nil {
|
||||
return nil, errors.New("jose.ParseEncrypted failed")
|
||||
}
|
||||
|
||||
privKeys := kw.GetPrivateKeys(dc.Parameters)
|
||||
if len(privKeys) == 0 {
|
||||
return nil, errors.New("No private keys found for JWE decryption")
|
||||
}
|
||||
privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters)
|
||||
if len(privKeysPasswords) != len(privKeys) {
|
||||
return nil, errors.New("Private key password array length must be same as that of private keys")
|
||||
}
|
||||
|
||||
for idx, privKey := range privKeys {
|
||||
key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "JWE")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, _, plain, err := jwe.DecryptMulti(key)
|
||||
if err == nil {
|
||||
return plain, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("JWE: No suitable private key found for decryption")
|
||||
}
|
||||
|
||||
func (kw *jweKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte {
|
||||
return dcparameters["privkeys"]
|
||||
}
|
||||
|
||||
func (kw *jweKeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte {
|
||||
return dcparameters["privkeys-passwords"]
|
||||
}
|
||||
|
||||
func (kw *jweKeyWrapper) GetKeyIdsFromPacket(b64jwes string) ([]uint64, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (kw *jweKeyWrapper) GetRecipients(b64jwes string) ([]string, error) {
|
||||
return []string{"[jwe]"}, nil
|
||||
}
|
||||
|
||||
func addPubKeys(joseRecipients *[]jose.Recipient, pubKeys [][]byte) error {
|
||||
if len(pubKeys) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, pubKey := range pubKeys {
|
||||
key, err := utils.ParsePublicKey(pubKey, "JWE")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
alg := jose.RSA_OAEP
|
||||
switch key.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
alg = jose.ECDH_ES_A256KW
|
||||
}
|
||||
|
||||
*joseRecipients = append(*joseRecipients, jose.Recipient{
|
||||
Algorithm: alg,
|
||||
Key: key,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
40
vendor/github.com/containers/ocicrypt/keywrap/keywrap.go
generated
vendored
Normal file
40
vendor/github.com/containers/ocicrypt/keywrap/keywrap.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package keywrap
|
||||
|
||||
import (
|
||||
"github.com/containers/ocicrypt/config"
|
||||
)
|
||||
|
||||
// KeyWrapper is the interface used for wrapping keys using
|
||||
// a specific encryption technology (pgp, jwe)
|
||||
type KeyWrapper interface {
|
||||
WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error)
|
||||
UnwrapKey(dc *config.DecryptConfig, annotation []byte) ([]byte, error)
|
||||
GetAnnotationID() string
|
||||
// GetPrivateKeys (optional) gets the array of private keys. It is an optional implementation
|
||||
// as in some key services, a private key may not be exportable (i.e. HSM)
|
||||
GetPrivateKeys(dcparameters map[string][][]byte) [][]byte
|
||||
|
||||
// GetKeyIdsFromPacket (optional) gets a list of key IDs. This is optional as some encryption
|
||||
// schemes may not have a notion of key IDs
|
||||
GetKeyIdsFromPacket(packet string) ([]uint64, error)
|
||||
|
||||
// GetRecipients (optional) gets a list of recipients. It is optional due to the validity of
|
||||
// recipients in a particular encryptiong scheme
|
||||
GetRecipients(packet string) ([]string, error)
|
||||
}
|
269
vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go
generated
vendored
Normal file
269
vendor/github.com/containers/ocicrypt/keywrap/pgp/keywrapper_gpg.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pgp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/mail"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
"golang.org/x/crypto/openpgp/packet"
|
||||
)
|
||||
|
||||
type gpgKeyWrapper struct {
|
||||
}
|
||||
|
||||
// NewKeyWrapper returns a new key wrapping interface for pgp
|
||||
func NewKeyWrapper() keywrap.KeyWrapper {
|
||||
return &gpgKeyWrapper{}
|
||||
}
|
||||
|
||||
var (
|
||||
// GPGDefaultEncryptConfig is the default configuration for layer encryption/decryption
|
||||
GPGDefaultEncryptConfig = &packet.Config{
|
||||
Rand: rand.Reader,
|
||||
DefaultHash: crypto.SHA256,
|
||||
DefaultCipher: packet.CipherAES256,
|
||||
CompressionConfig: &packet.CompressionConfig{Level: 0}, // No compression
|
||||
RSABits: 2048,
|
||||
}
|
||||
)
|
||||
|
||||
func (kw *gpgKeyWrapper) GetAnnotationID() string {
|
||||
return "org.opencontainers.image.enc.keys.pgp"
|
||||
}
|
||||
|
||||
// WrapKeys wraps the session key for recpients and encrypts the optsData, which
|
||||
// describe the symmetric key used for encrypting the layer
|
||||
func (kw *gpgKeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
|
||||
ciphertext := new(bytes.Buffer)
|
||||
el, err := kw.createEntityList(ec)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to create entity list")
|
||||
}
|
||||
if len(el) == 0 {
|
||||
// nothing to do -- not an error
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
plaintextWriter, err := openpgp.Encrypt(ciphertext,
|
||||
el, /*EntityList*/
|
||||
nil, /* Sign*/
|
||||
nil, /* FileHint */
|
||||
GPGDefaultEncryptConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = plaintextWriter.Write(optsData); err != nil {
|
||||
return nil, err
|
||||
} else if err = plaintextWriter.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ciphertext.Bytes(), err
|
||||
}
|
||||
|
||||
// UnwrapKey unwraps the symmetric key with which the layer is encrypted
|
||||
// This symmetric key is encrypted in the PGP payload.
|
||||
func (kw *gpgKeyWrapper) UnwrapKey(dc *config.DecryptConfig, pgpPacket []byte) ([]byte, error) {
|
||||
pgpPrivateKeys, pgpPrivateKeysPwd, err := kw.getKeyParameters(dc.Parameters)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for idx, pgpPrivateKey := range pgpPrivateKeys {
|
||||
r := bytes.NewBuffer(pgpPrivateKey)
|
||||
entityList, err := openpgp.ReadKeyRing(r)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "unable to parse private keys")
|
||||
}
|
||||
|
||||
var prompt openpgp.PromptFunction
|
||||
if len(pgpPrivateKeysPwd) > idx {
|
||||
responded := false
|
||||
prompt = func(keys []openpgp.Key, symmetric bool) ([]byte, error) {
|
||||
if responded {
|
||||
return nil, fmt.Errorf("don't seem to have the right password")
|
||||
}
|
||||
responded = true
|
||||
for _, key := range keys {
|
||||
if key.PrivateKey != nil {
|
||||
_ = key.PrivateKey.Decrypt(pgpPrivateKeysPwd[idx])
|
||||
}
|
||||
}
|
||||
return pgpPrivateKeysPwd[idx], nil
|
||||
}
|
||||
}
|
||||
|
||||
r = bytes.NewBuffer(pgpPacket)
|
||||
md, err := openpgp.ReadMessage(r, entityList, prompt, GPGDefaultEncryptConfig)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// we get the plain key options back
|
||||
optsData, err := ioutil.ReadAll(md.UnverifiedBody)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return optsData, nil
|
||||
}
|
||||
return nil, errors.New("PGP: No suitable key found to unwrap key")
|
||||
}
|
||||
|
||||
// GetKeyIdsFromWrappedKeys converts the base64 encoded PGPPacket to uint64 keyIds
|
||||
func (kw *gpgKeyWrapper) GetKeyIdsFromPacket(b64pgpPackets string) ([]uint64, error) {
|
||||
|
||||
var keyids []uint64
|
||||
for _, b64pgpPacket := range strings.Split(b64pgpPackets, ",") {
|
||||
pgpPacket, err := base64.StdEncoding.DecodeString(b64pgpPacket)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not decode base64 encoded PGP packet")
|
||||
}
|
||||
newids, err := kw.getKeyIDs(pgpPacket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyids = append(keyids, newids...)
|
||||
}
|
||||
return keyids, nil
|
||||
}
|
||||
|
||||
// getKeyIDs parses a PGPPacket and gets the list of recipients' key IDs
|
||||
func (kw *gpgKeyWrapper) getKeyIDs(pgpPacket []byte) ([]uint64, error) {
|
||||
var keyids []uint64
|
||||
|
||||
kbuf := bytes.NewBuffer(pgpPacket)
|
||||
packets := packet.NewReader(kbuf)
|
||||
ParsePackets:
|
||||
for {
|
||||
p, err := packets.Next()
|
||||
if err == io.EOF {
|
||||
break ParsePackets
|
||||
}
|
||||
if err != nil {
|
||||
return []uint64{}, errors.Wrapf(err, "packets.Next() failed")
|
||||
}
|
||||
switch p := p.(type) {
|
||||
case *packet.EncryptedKey:
|
||||
keyids = append(keyids, p.KeyId)
|
||||
case *packet.SymmetricallyEncrypted:
|
||||
break ParsePackets
|
||||
}
|
||||
}
|
||||
return keyids, nil
|
||||
}
|
||||
|
||||
// GetRecipients converts the wrappedKeys to an array of recipients
|
||||
func (kw *gpgKeyWrapper) GetRecipients(b64pgpPackets string) ([]string, error) {
|
||||
keyIds, err := kw.GetKeyIdsFromPacket(b64pgpPackets)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var array []string
|
||||
for _, keyid := range keyIds {
|
||||
array = append(array, "0x"+strconv.FormatUint(keyid, 16))
|
||||
}
|
||||
return array, nil
|
||||
}
|
||||
|
||||
func (kw *gpgKeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte {
|
||||
return dcparameters["gpg-privatekeys"]
|
||||
}
|
||||
|
||||
func (kw *gpgKeyWrapper) getKeyParameters(dcparameters map[string][][]byte) ([][]byte, [][]byte, error) {
|
||||
|
||||
privKeys := kw.GetPrivateKeys(dcparameters)
|
||||
if len(privKeys) == 0 {
|
||||
return nil, nil, errors.New("GPG: Missing private key parameter")
|
||||
}
|
||||
|
||||
return privKeys, dcparameters["gpg-privatekeys-passwords"], nil
|
||||
}
|
||||
|
||||
// createEntityList creates the opengpg EntityList by reading the KeyRing
|
||||
// first and then filtering out recipients' keys
|
||||
func (kw *gpgKeyWrapper) createEntityList(ec *config.EncryptConfig) (openpgp.EntityList, error) {
|
||||
pgpPubringFile := ec.Parameters["gpg-pubkeyringfile"]
|
||||
if len(pgpPubringFile) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
r := bytes.NewReader(pgpPubringFile[0])
|
||||
|
||||
entityList, err := openpgp.ReadKeyRing(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gpgRecipients := ec.Parameters["gpg-recipients"]
|
||||
if len(gpgRecipients) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
rSet := make(map[string]int)
|
||||
for _, r := range gpgRecipients {
|
||||
rSet[string(r)] = 0
|
||||
}
|
||||
|
||||
var filteredList openpgp.EntityList
|
||||
for _, entity := range entityList {
|
||||
for k := range entity.Identities {
|
||||
addr, err := mail.ParseAddress(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, r := range gpgRecipients {
|
||||
recp := string(r)
|
||||
if strings.Compare(addr.Name, recp) == 0 || strings.Compare(addr.Address, recp) == 0 {
|
||||
filteredList = append(filteredList, entity)
|
||||
rSet[recp] = rSet[recp] + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we found keys for all the Recipients...
|
||||
var buffer bytes.Buffer
|
||||
notFound := false
|
||||
buffer.WriteString("PGP: No key found for the following recipients: ")
|
||||
|
||||
for k, v := range rSet {
|
||||
if v == 0 {
|
||||
if notFound {
|
||||
buffer.WriteString(", ")
|
||||
}
|
||||
buffer.WriteString(k)
|
||||
notFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if notFound {
|
||||
return nil, errors.New(buffer.String())
|
||||
}
|
||||
|
||||
return filteredList, nil
|
||||
}
|
132
vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
generated
vendored
Normal file
132
vendor/github.com/containers/ocicrypt/keywrap/pkcs7/keywrapper_pkcs7.go
generated
vendored
Normal file
@ -0,0 +1,132 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
|
||||
"github.com/containers/ocicrypt/config"
|
||||
"github.com/containers/ocicrypt/keywrap"
|
||||
"github.com/containers/ocicrypt/utils"
|
||||
"github.com/fullsailor/pkcs7"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type pkcs7KeyWrapper struct {
|
||||
}
|
||||
|
||||
// NewKeyWrapper returns a new key wrapping interface using jwe
|
||||
func NewKeyWrapper() keywrap.KeyWrapper {
|
||||
return &pkcs7KeyWrapper{}
|
||||
}
|
||||
|
||||
func (kw *pkcs7KeyWrapper) GetAnnotationID() string {
|
||||
return "org.opencontainers.image.enc.keys.pkcs7"
|
||||
}
|
||||
|
||||
// WrapKeys wraps the session key for recpients and encrypts the optsData, which
|
||||
// describe the symmetric key used for encrypting the layer
|
||||
func (kw *pkcs7KeyWrapper) WrapKeys(ec *config.EncryptConfig, optsData []byte) ([]byte, error) {
|
||||
x509Certs, err := collectX509s(ec.Parameters["x509s"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// no recipients is not an error...
|
||||
if len(x509Certs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
pkcs7.ContentEncryptionAlgorithm = pkcs7.EncryptionAlgorithmAES128GCM
|
||||
return pkcs7.Encrypt(optsData, x509Certs)
|
||||
}
|
||||
|
||||
func collectX509s(x509s [][]byte) ([]*x509.Certificate, error) {
|
||||
if len(x509s) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var x509Certs []*x509.Certificate
|
||||
for _, x509 := range x509s {
|
||||
x509Cert, err := utils.ParseCertificate(x509, "PKCS7")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x509Certs = append(x509Certs, x509Cert)
|
||||
}
|
||||
return x509Certs, nil
|
||||
}
|
||||
|
||||
func (kw *pkcs7KeyWrapper) GetPrivateKeys(dcparameters map[string][][]byte) [][]byte {
|
||||
return dcparameters["privkeys"]
|
||||
}
|
||||
|
||||
func (kw *pkcs7KeyWrapper) getPrivateKeysPasswords(dcparameters map[string][][]byte) [][]byte {
|
||||
return dcparameters["privkeys-passwords"]
|
||||
}
|
||||
|
||||
// UnwrapKey unwraps the symmetric key with which the layer is encrypted
|
||||
// This symmetric key is encrypted in the PKCS7 payload.
|
||||
func (kw *pkcs7KeyWrapper) UnwrapKey(dc *config.DecryptConfig, pkcs7Packet []byte) ([]byte, error) {
|
||||
privKeys := kw.GetPrivateKeys(dc.Parameters)
|
||||
if len(privKeys) == 0 {
|
||||
return nil, errors.New("no private keys found for PKCS7 decryption")
|
||||
}
|
||||
privKeysPasswords := kw.getPrivateKeysPasswords(dc.Parameters)
|
||||
if len(privKeysPasswords) != len(privKeys) {
|
||||
return nil, errors.New("private key password array length must be same as that of private keys")
|
||||
}
|
||||
|
||||
x509Certs, err := collectX509s(dc.Parameters["x509s"])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(x509Certs) == 0 {
|
||||
return nil, errors.New("no x509 certificates found needed for PKCS7 decryption")
|
||||
}
|
||||
|
||||
p7, err := pkcs7.Parse(pkcs7Packet)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not parse PKCS7 packet")
|
||||
}
|
||||
|
||||
for idx, privKey := range privKeys {
|
||||
key, err := utils.ParsePrivateKey(privKey, privKeysPasswords[idx], "PKCS7")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, x509Cert := range x509Certs {
|
||||
optsData, err := p7.Decrypt(x509Cert, crypto.PrivateKey(key))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
return optsData, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("PKCS7: No suitable private key found for decryption")
|
||||
}
|
||||
|
||||
// GetKeyIdsFromWrappedKeys converts the base64 encoded Packet to uint64 keyIds;
|
||||
// We cannot do this with pkcs7
|
||||
func (kw *pkcs7KeyWrapper) GetKeyIdsFromPacket(b64pkcs7Packets string) ([]uint64, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetRecipients converts the wrappedKeys to an array of recipients
|
||||
// We cannot do this with pkcs7
|
||||
func (kw *pkcs7KeyWrapper) GetRecipients(b64pkcs7Packets string) ([]string, error) {
|
||||
return []string{"[pkcs7]"}, nil
|
||||
}
|
40
vendor/github.com/containers/ocicrypt/reader.go
generated
vendored
Normal file
40
vendor/github.com/containers/ocicrypt/reader.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ocicrypt
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
type readerAtReader struct {
|
||||
r io.ReaderAt
|
||||
off int64
|
||||
}
|
||||
|
||||
// ReaderFromReaderAt takes an io.ReaderAt and returns an io.Reader
|
||||
func ReaderFromReaderAt(r io.ReaderAt) io.Reader {
|
||||
return &readerAtReader{
|
||||
r: r,
|
||||
off: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (rar *readerAtReader) Read(p []byte) (n int, err error) {
|
||||
n, err = rar.r.ReadAt(p, rar.off)
|
||||
rar.off += int64(n)
|
||||
return n, err
|
||||
}
|
12
vendor/github.com/containers/ocicrypt/spec/spec.go
generated
vendored
Normal file
12
vendor/github.com/containers/ocicrypt/spec/spec.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package spec
|
||||
|
||||
const (
|
||||
// MediaTypeLayerEnc is MIME type used for encrypted layers.
|
||||
MediaTypeLayerEnc = "application/vnd.oci.image.layer.v1.tar+encrypted"
|
||||
// MediaTypeLayerGzipEnc is MIME type used for encrypted compressed layers.
|
||||
MediaTypeLayerGzipEnc = "application/vnd.oci.image.layer.v1.tar+gzip+encrypted"
|
||||
// MediaTypeLayerNonDistributableEnc is MIME type used for non distributable encrypted layers.
|
||||
MediaTypeLayerNonDistributableEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+encrypted"
|
||||
// MediaTypeLayerGzipEnc is MIME type used for non distributable encrypted compressed layers.
|
||||
MediaTypeLayerNonDistributableGzipEnc = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip+encrypted"
|
||||
)
|
109
vendor/github.com/containers/ocicrypt/utils/delayedreader.go
generated
vendored
Normal file
109
vendor/github.com/containers/ocicrypt/utils/delayedreader.go
generated
vendored
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// DelayedReader wraps a io.Reader and allows a client to use the Reader
|
||||
// interface. The DelayedReader holds back some buffer to the client
|
||||
// so that it can report any error that occurred on the Reader it wraps
|
||||
// early to the client while it may still have held some data back.
|
||||
type DelayedReader struct {
|
||||
reader io.Reader // Reader to Read() bytes from and delay them
|
||||
err error // error that occurred on the reader
|
||||
buffer []byte // delay buffer
|
||||
bufbytes int // number of bytes in the delay buffer to give to Read(); on '0' we return 'EOF' to caller
|
||||
bufoff int // offset in the delay buffer to give to Read()
|
||||
}
|
||||
|
||||
// NewDelayedReader wraps a io.Reader and allocates a delay buffer of bufsize bytes
|
||||
func NewDelayedReader(reader io.Reader, bufsize uint) io.Reader {
|
||||
return &DelayedReader{
|
||||
reader: reader,
|
||||
buffer: make([]byte, bufsize),
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface
|
||||
func (dr *DelayedReader) Read(p []byte) (int, error) {
|
||||
if dr.err != nil && dr.err != io.EOF {
|
||||
return 0, dr.err
|
||||
}
|
||||
|
||||
// if we are completely drained, return io.EOF
|
||||
if dr.err == io.EOF && dr.bufbytes == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// only at the beginning we fill our delay buffer in an extra step
|
||||
if dr.bufbytes < len(dr.buffer) && dr.err == nil {
|
||||
dr.bufbytes, dr.err = FillBuffer(dr.reader, dr.buffer)
|
||||
if dr.err != nil && dr.err != io.EOF {
|
||||
return 0, dr.err
|
||||
}
|
||||
}
|
||||
// dr.err != nil means we have EOF and can drain the delay buffer
|
||||
// otherwise we need to still read from the reader
|
||||
|
||||
var tmpbuf []byte
|
||||
tmpbufbytes := 0
|
||||
if dr.err == nil {
|
||||
tmpbuf = make([]byte, len(p))
|
||||
tmpbufbytes, dr.err = FillBuffer(dr.reader, tmpbuf)
|
||||
if dr.err != nil && dr.err != io.EOF {
|
||||
return 0, dr.err
|
||||
}
|
||||
}
|
||||
|
||||
// copy out of the delay buffer into 'p'
|
||||
tocopy1 := min(len(p), dr.bufbytes)
|
||||
c1 := copy(p[:tocopy1], dr.buffer[dr.bufoff:])
|
||||
dr.bufoff += c1
|
||||
dr.bufbytes -= c1
|
||||
|
||||
c2 := 0
|
||||
// can p still hold more data?
|
||||
if c1 < len(p) {
|
||||
// copy out of the tmpbuf into 'p'
|
||||
c2 = copy(p[tocopy1:], tmpbuf[:tmpbufbytes])
|
||||
}
|
||||
|
||||
// if tmpbuf holds data we need to hold onto, copy them
|
||||
// into the delay buffer
|
||||
if tmpbufbytes-c2 > 0 {
|
||||
// left-shift the delay buffer and append the tmpbuf's remaining data
|
||||
dr.buffer = dr.buffer[dr.bufoff : dr.bufoff+dr.bufbytes]
|
||||
dr.buffer = append(dr.buffer, tmpbuf[c2:tmpbufbytes]...)
|
||||
dr.bufoff = 0
|
||||
dr.bufbytes = len(dr.buffer)
|
||||
}
|
||||
|
||||
var err error
|
||||
if dr.bufbytes == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return c1 + c2, err
|
||||
}
|
31
vendor/github.com/containers/ocicrypt/utils/ioutils.go
generated
vendored
Normal file
31
vendor/github.com/containers/ocicrypt/utils/ioutils.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// FillBuffer fills the given buffer with as many bytes from the reader as possible. It returns
|
||||
// EOF if an EOF was encountered or any other error.
|
||||
func FillBuffer(reader io.Reader, buffer []byte) (int, error) {
|
||||
n, err := io.ReadFull(reader, buffer)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return n, io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
166
vendor/github.com/containers/ocicrypt/utils/testing.go
generated
vendored
Normal file
166
vendor/github.com/containers/ocicrypt/utils/testing.go
generated
vendored
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// CreateRSAKey creates an RSA key
|
||||
func CreateRSAKey(bits int) (*rsa.PrivateKey, error) {
|
||||
key, err := rsa.GenerateKey(rand.Reader, bits)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "rsa.GenerateKey failed")
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// CreateRSATestKey creates an RSA key of the given size and returns
|
||||
// the public and private key in PEM or DER format
|
||||
func CreateRSATestKey(bits int, password []byte, pemencode bool) ([]byte, []byte, error) {
|
||||
key, err := CreateRSAKey(bits)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "x509.MarshalPKIXPublicKey failed")
|
||||
}
|
||||
privData := x509.MarshalPKCS1PrivateKey(key)
|
||||
|
||||
// no more encoding needed for DER
|
||||
if !pemencode {
|
||||
return pubData, privData, nil
|
||||
}
|
||||
|
||||
publicKey := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "PUBLIC KEY",
|
||||
Bytes: pubData,
|
||||
})
|
||||
|
||||
var block *pem.Block
|
||||
|
||||
typ := "RSA PRIVATE KEY"
|
||||
if len(password) > 0 {
|
||||
block, err = x509.EncryptPEMBlock(rand.Reader, typ, privData, password, x509.PEMCipherAES256)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "x509.EncryptPEMBlock failed")
|
||||
}
|
||||
} else {
|
||||
block = &pem.Block{
|
||||
Type: typ,
|
||||
Bytes: privData,
|
||||
}
|
||||
}
|
||||
|
||||
privateKey := pem.EncodeToMemory(block)
|
||||
|
||||
return publicKey, privateKey, nil
|
||||
}
|
||||
|
||||
// CreateECDSATestKey creates and elliptic curve key for the given curve and returns
|
||||
// the public and private key in DER format
|
||||
func CreateECDSATestKey(curve elliptic.Curve) ([]byte, []byte, error) {
|
||||
key, err := ecdsa.GenerateKey(curve, rand.Reader)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "ecdsa.GenerateKey failed")
|
||||
}
|
||||
|
||||
pubData, err := x509.MarshalPKIXPublicKey(&key.PublicKey)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "x509.MarshalPKIXPublicKey failed")
|
||||
}
|
||||
|
||||
privData, err := x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "x509.MarshalECPrivateKey failed")
|
||||
}
|
||||
|
||||
return pubData, privData, nil
|
||||
}
|
||||
|
||||
// CreateTestCA creates a root CA for testing
|
||||
func CreateTestCA() (*rsa.PrivateKey, *x509.Certificate, error) {
|
||||
key, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "rsa.GenerateKey failed")
|
||||
}
|
||||
|
||||
ca := &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "test-ca",
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().AddDate(1, 0, 0),
|
||||
IsCA: true,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
caCert, err := certifyKey(&key.PublicKey, ca, key, ca)
|
||||
|
||||
return key, caCert, err
|
||||
}
|
||||
|
||||
// CertifyKey certifies a public key using the given CA's private key and cert;
|
||||
// The certificate template for the public key is optional
|
||||
func CertifyKey(pubbytes []byte, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) {
|
||||
pubKey, err := ParsePublicKey(pubbytes, "CertifyKey")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return certifyKey(pubKey, template, caKey, caCert)
|
||||
}
|
||||
|
||||
func certifyKey(pub interface{}, template *x509.Certificate, caKey *rsa.PrivateKey, caCert *x509.Certificate) (*x509.Certificate, error) {
|
||||
if template == nil {
|
||||
template = &x509.Certificate{
|
||||
SerialNumber: big.NewInt(1),
|
||||
Subject: pkix.Name{
|
||||
CommonName: "testkey",
|
||||
},
|
||||
NotBefore: time.Now(),
|
||||
NotAfter: time.Now().Add(time.Hour),
|
||||
IsCA: false,
|
||||
KeyUsage: x509.KeyUsageDigitalSignature,
|
||||
BasicConstraintsValid: true,
|
||||
}
|
||||
}
|
||||
|
||||
certDER, err := x509.CreateCertificate(rand.Reader, template, caCert, pub, caKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "x509.CreateCertificate failed")
|
||||
}
|
||||
|
||||
cert, err := x509.ParseCertificate(certDER)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "x509.ParseCertificate failed")
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
220
vendor/github.com/containers/ocicrypt/utils/utils.go
generated
vendored
Normal file
220
vendor/github.com/containers/ocicrypt/utils/utils.go
generated
vendored
Normal file
@ -0,0 +1,220 @@
|
||||
/*
|
||||
Copyright The ocicrypt Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
json "gopkg.in/square/go-jose.v2"
|
||||
)
|
||||
|
||||
// parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key
|
||||
func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) {
|
||||
jwk := json.JSONWebKey{}
|
||||
err := jwk.UnmarshalJSON(privKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix)
|
||||
}
|
||||
if jwk.IsPublic() {
|
||||
return nil, fmt.Errorf("%s: JWK is not a private key", prefix)
|
||||
}
|
||||
return &jwk, nil
|
||||
}
|
||||
|
||||
// parseJWKPublicKey parses the input byte array as a JWK
|
||||
func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) {
|
||||
jwk := json.JSONWebKey{}
|
||||
err := jwk.UnmarshalJSON(privKey)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "%s: Could not parse input as JWK", prefix)
|
||||
}
|
||||
if !jwk.IsPublic() {
|
||||
return nil, fmt.Errorf("%s: JWK is not a public key", prefix)
|
||||
}
|
||||
return &jwk, nil
|
||||
}
|
||||
|
||||
// IsPasswordError checks whether an error is related to a missing or wrong
|
||||
// password
|
||||
func IsPasswordError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
msg := strings.ToLower(err.Error())
|
||||
|
||||
return strings.Contains(msg, "password") &&
|
||||
(strings.Contains(msg, "missing") || strings.Contains(msg, "wrong"))
|
||||
}
|
||||
|
||||
// ParsePrivateKey tries to parse a private key in DER format first and
|
||||
// PEM format after, returning an error if the parsing failed
|
||||
func ParsePrivateKey(privKey, privKeyPassword []byte, prefix string) (interface{}, error) {
|
||||
key, err := x509.ParsePKCS8PrivateKey(privKey)
|
||||
if err != nil {
|
||||
key, err = x509.ParsePKCS1PrivateKey(privKey)
|
||||
if err != nil {
|
||||
key, err = x509.ParseECPrivateKey(privKey)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
block, _ := pem.Decode(privKey)
|
||||
if block != nil {
|
||||
var der []byte
|
||||
if x509.IsEncryptedPEMBlock(block) {
|
||||
if privKeyPassword == nil {
|
||||
return nil, errors.Errorf("%s: Missing password for encrypted private key", prefix)
|
||||
}
|
||||
der, err = x509.DecryptPEMBlock(block, privKeyPassword)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("%s: Wrong password: could not decrypt private key", prefix)
|
||||
}
|
||||
} else {
|
||||
der = block.Bytes
|
||||
}
|
||||
|
||||
key, err = x509.ParsePKCS8PrivateKey(der)
|
||||
if err != nil {
|
||||
key, err = x509.ParsePKCS1PrivateKey(der)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "%s: Could not parse private key", prefix)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
key, err = parseJWKPrivateKey(privKey, prefix)
|
||||
}
|
||||
}
|
||||
return key, err
|
||||
}
|
||||
|
||||
// IsPrivateKey returns true in case the given byte array represents a private key
|
||||
// It returns an error if for example the password is wrong
|
||||
func IsPrivateKey(data []byte, password []byte) (bool, error) {
|
||||
_, err := ParsePrivateKey(data, password, "")
|
||||
return err == nil, err
|
||||
}
|
||||
|
||||
// ParsePublicKey tries to parse a public key in DER format first and
|
||||
// PEM format after, returning an error if the parsing failed
|
||||
func ParsePublicKey(pubKey []byte, prefix string) (interface{}, error) {
|
||||
key, err := x509.ParsePKIXPublicKey(pubKey)
|
||||
if err != nil {
|
||||
block, _ := pem.Decode(pubKey)
|
||||
if block != nil {
|
||||
key, err = x509.ParsePKIXPublicKey(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "%s: Could not parse public key", prefix)
|
||||
}
|
||||
} else {
|
||||
key, err = parseJWKPublicKey(pubKey, prefix)
|
||||
}
|
||||
}
|
||||
return key, err
|
||||
}
|
||||
|
||||
// IsPublicKey returns true in case the given byte array represents a public key
|
||||
func IsPublicKey(data []byte) bool {
|
||||
_, err := ParsePublicKey(data, "")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// ParseCertificate tries to parse a public key in DER format first and
|
||||
// PEM format after, returning an error if the parsing failed
|
||||
func ParseCertificate(certBytes []byte, prefix string) (*x509.Certificate, error) {
|
||||
x509Cert, err := x509.ParseCertificate(certBytes)
|
||||
if err != nil {
|
||||
block, _ := pem.Decode(certBytes)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("%s: Could not PEM decode x509 certificate", prefix)
|
||||
}
|
||||
x509Cert, err = x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "%s: Could not parse x509 certificate", prefix)
|
||||
}
|
||||
}
|
||||
return x509Cert, err
|
||||
}
|
||||
|
||||
// IsCertificate returns true in case the given byte array represents an x.509 certificate
|
||||
func IsCertificate(data []byte) bool {
|
||||
_, err := ParseCertificate(data, "")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsGPGPrivateKeyRing returns true in case the given byte array represents a GPG private key ring file
|
||||
func IsGPGPrivateKeyRing(data []byte) bool {
|
||||
r := bytes.NewBuffer(data)
|
||||
_, err := openpgp.ReadKeyRing(r)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// SortDecryptionKeys parses a list of comma separated base64 entries and sorts the data into
|
||||
// a map. Each entry in the list may be either a GPG private key ring, private key, or x.509
|
||||
// certificate
|
||||
func SortDecryptionKeys(b64ItemList string) (map[string][][]byte, error) {
|
||||
dcparameters := make(map[string][][]byte)
|
||||
|
||||
for _, b64Item := range strings.Split(b64ItemList, ",") {
|
||||
var password []byte
|
||||
b64Data := strings.Split(b64Item, ":")
|
||||
keyData, err := base64.StdEncoding.DecodeString(b64Data[0])
|
||||
if err != nil {
|
||||
return nil, errors.New("Could not base64 decode a passed decryption key")
|
||||
}
|
||||
if len(b64Data) == 2 {
|
||||
password, err = base64.StdEncoding.DecodeString(b64Data[1])
|
||||
if err != nil {
|
||||
return nil, errors.New("Could not base64 decode a passed decryption key password")
|
||||
}
|
||||
}
|
||||
var key string
|
||||
isPrivKey, err := IsPrivateKey(keyData, password)
|
||||
if IsPasswordError(err) {
|
||||
return nil, err
|
||||
}
|
||||
if isPrivKey {
|
||||
key = "privkeys"
|
||||
if _, ok := dcparameters["privkeys-passwords"]; !ok {
|
||||
dcparameters["privkeys-passwords"] = [][]byte{password}
|
||||
} else {
|
||||
dcparameters["privkeys-passwords"] = append(dcparameters["privkeys-passwords"], password)
|
||||
}
|
||||
} else if IsCertificate(keyData) {
|
||||
key = "x509s"
|
||||
} else if IsGPGPrivateKeyRing(keyData) {
|
||||
key = "gpg-privatekeys"
|
||||
}
|
||||
if key != "" {
|
||||
values := dcparameters[key]
|
||||
if values == nil {
|
||||
dcparameters[key] = [][]byte{keyData}
|
||||
} else {
|
||||
dcparameters[key] = append(dcparameters[key], keyData)
|
||||
}
|
||||
} else {
|
||||
return nil, errors.New("Unknown decryption key type")
|
||||
}
|
||||
}
|
||||
|
||||
return dcparameters, nil
|
||||
}
|
14
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
14
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@ -19,9 +19,11 @@ env:
|
||||
####
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-30-1-2-1556821664"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-cloud-base-29-1-2-1541789245"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-1904-disco-v20190514"
|
||||
_BUILT_IMAGE_SUFFIX: "libpod-6228273469587456"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-31-${_BUILT_IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "fedora-30-${_BUILT_IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-19-${_BUILT_IMAGE_SUFFIX}"
|
||||
PRIOR_UBUNTU_CACHE_IMAGE_NAME: "ubuntu-18-${_BUILT_IMAGE_SUFFIX}"
|
||||
|
||||
####
|
||||
#### Command variables to help avoid duplication
|
||||
@ -49,11 +51,14 @@ gce_instance:
|
||||
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
|
||||
testing_task:
|
||||
depends_on:
|
||||
- lint
|
||||
gce_instance: # Only need to specify differences from defaults (above)
|
||||
matrix: # Duplicate this task for each matrix product.
|
||||
image_name: "${FEDORA_CACHE_IMAGE_NAME}"
|
||||
image_name: "${PRIOR_FEDORA_CACHE_IMAGE_NAME}"
|
||||
image_name: "${UBUNTU_CACHE_IMAGE_NAME}"
|
||||
# image_name: "${PRIOR_UBUNTU_CACHE_IMAGE_NAME}" # No fuse3 support
|
||||
|
||||
# Separate scripts for separate outputs, makes debugging easier.
|
||||
setup_script: '${CIRRUS_WORKING_DIR}/${SCRIPT_BASE}/setup.sh |& ${_TIMESTAMP}'
|
||||
@ -99,6 +104,7 @@ meta_task:
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
|
||||
${UBUNTU_CACHE_IMAGE_NAME}
|
||||
${PRIOR_UBUNTU_CACHE_IMAGE_NAME}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_CHANGE_IN_REPO}"
|
||||
GCPJSON: ENCRYPTED[244a93fe8b386b48b96f748342bf741350e43805eee81dd04b45093bdf737e540b993fc735df41f131835fa0f9b65826]
|
||||
@ -110,7 +116,7 @@ meta_task:
|
||||
|
||||
vendor_task:
|
||||
container:
|
||||
image: golang:1.12
|
||||
image: golang:1.13
|
||||
modules_cache:
|
||||
fingerprint_script: cat go.sum
|
||||
folder: $GOPATH/pkg/mod
|
||||
|
15
vendor/github.com/containers/storage/.travis.yml
generated
vendored
15
vendor/github.com/containers/storage/.travis.yml
generated
vendored
@ -15,30 +15,21 @@ env:
|
||||
- GO_VERSION="stable"
|
||||
DISTRO="ubuntu"
|
||||
|
||||
- GO_VERSION="1.11"
|
||||
DISTRO="ubuntu"
|
||||
|
||||
- GO_VERSION="1.12"
|
||||
- GO_VERSION="1.12.12"
|
||||
DISTRO="ubuntu"
|
||||
|
||||
# Fedora
|
||||
- GO_VERSION="stable"
|
||||
DISTRO="fedora"
|
||||
|
||||
- GO_VERSION="1.11"
|
||||
DISTRO="fedora"
|
||||
|
||||
- GO_VERSION="1.12"
|
||||
- GO_VERSION="1.12.12"
|
||||
DISTRO="fedora"
|
||||
|
||||
# CentOS
|
||||
- GO_VERSION="stable"
|
||||
DISTRO="centos"
|
||||
|
||||
- GO_VERSION="1.11"
|
||||
DISTRO="centos"
|
||||
|
||||
- GO_VERSION="1.12"
|
||||
- GO_VERSION="1.12.12"
|
||||
DISTRO="centos"
|
||||
|
||||
# GO_VERSION="stable" builds successfully, but tests fail on all platforms.
|
||||
|
3
vendor/github.com/containers/storage/Makefile
generated
vendored
3
vendor/github.com/containers/storage/Makefile
generated
vendored
@ -127,6 +127,9 @@ lint: install.tools
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-z A-Z_-]+:.*?## / {gsub(" ",",",$$1);gsub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-21s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v `pwd`:/src -w /src golang make vendor
|
||||
|
||||
vendor:
|
||||
export GO111MODULE=on \
|
||||
$(GO) mod tidy && \
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.13.5
|
||||
1.14.0
|
||||
|
2
vendor/github.com/containers/storage/drivers/copy/copy_linux.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/copy/copy_linux.go
generated
vendored
@ -155,7 +155,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
|
||||
|
||||
switch mode := f.Mode(); {
|
||||
case mode.IsRegular():
|
||||
id := fileID{dev: stat.Dev, ino: stat.Ino}
|
||||
id := fileID{dev: uint64(stat.Dev), ino: stat.Ino}
|
||||
if copyMode == Hardlink {
|
||||
isHardlink = true
|
||||
if err2 := os.Link(srcPath, dstPath); err2 != nil {
|
||||
|
2
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
@ -48,6 +48,8 @@ const (
|
||||
FsMagicZfs = FsMagic(0x2fc12fc1)
|
||||
// FsMagicOverlay filesystem id for overlay
|
||||
FsMagicOverlay = FsMagic(0x794C7630)
|
||||
// FsMagicFUSE filesystem id for FUSE
|
||||
FsMagicFUSE = FsMagic(0x65735546)
|
||||
)
|
||||
|
||||
var (
|
||||
|
42
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
42
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -231,13 +231,18 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||
}
|
||||
}
|
||||
|
||||
fileSystemType := graphdriver.FsMagicOverlay
|
||||
if opts.mountProgram != "" {
|
||||
fileSystemType = graphdriver.FsMagicFUSE
|
||||
}
|
||||
|
||||
d := &Driver{
|
||||
name: "overlay",
|
||||
home: home,
|
||||
runhome: runhome,
|
||||
uidMaps: options.UIDMaps,
|
||||
gidMaps: options.GIDMaps,
|
||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
|
||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(fileSystemType)),
|
||||
supportsDType: supportsDType,
|
||||
usingMetacopy: usingMetacopy,
|
||||
locker: locker.New(),
|
||||
@ -1016,8 +1021,39 @@ func (d *Driver) Put(id string) error {
|
||||
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
|
||||
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
|
||||
|
||||
unmounted := false
|
||||
|
||||
if d.options.mountProgram != "" {
|
||||
// Attempt to unmount the FUSE mount using either fusermount or fusermount3.
|
||||
// If they fail, fallback to unix.Unmount
|
||||
for _, v := range []string{"fusermount3", "fusermount"} {
|
||||
err := exec.Command(v, "-u", mountpoint).Run()
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
logrus.Debugf("Error unmounting %s with %s - %v", mountpoint, v, err)
|
||||
}
|
||||
if err == nil {
|
||||
unmounted = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// If fusermount|fusermount3 failed to unmount the FUSE file system, make sure all
|
||||
// pending changes are propagated to the file system
|
||||
if !unmounted {
|
||||
fd, err := unix.Open(mountpoint, unix.O_DIRECTORY, 0)
|
||||
if err == nil {
|
||||
if err := unix.Syncfs(fd); err != nil {
|
||||
logrus.Debugf("Error Syncfs(%s) - %v", mountpoint, err)
|
||||
}
|
||||
unix.Close(fd)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !unmounted {
|
||||
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) {
|
||||
logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
|
||||
|
16
vendor/github.com/containers/storage/go.mod
generated
vendored
16
vendor/github.com/containers/storage/go.mod
generated
vendored
@ -3,22 +3,22 @@ module github.com/containers/storage
|
||||
require (
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/DataDog/zstd v1.4.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.4.12
|
||||
github.com/Microsoft/go-winio v0.4.14
|
||||
github.com/Microsoft/hcsshim v0.8.6
|
||||
github.com/docker/docker v0.0.0-20171019062838-86f080cff091
|
||||
github.com/docker/docker v0.0.0-20171019062838-86f080cff091 // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
github.com/klauspost/compress v1.7.2
|
||||
github.com/klauspost/compress v1.9.2
|
||||
github.com/klauspost/cpuid v1.2.1 // indirect
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/mattn/go-shellwords v1.0.5
|
||||
github.com/mattn/go-shellwords v1.0.6
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1
|
||||
github.com/opencontainers/runc v1.0.0-rc8
|
||||
github.com/opencontainers/selinux v1.2.2
|
||||
github.com/opencontainers/runc v1.0.0-rc9
|
||||
github.com/opencontainers/selinux v1.3.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7
|
||||
github.com/sirupsen/logrus v1.4.2
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible
|
||||
github.com/vbatts/tar-split v0.11.1
|
||||
@ -26,3 +26,5 @@ require (
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb
|
||||
gotest.tools v0.0.0-20190624233834-05ebafbffc79
|
||||
)
|
||||
|
||||
go 1.13
|
||||
|
51
vendor/github.com/containers/storage/go.sum
generated
vendored
51
vendor/github.com/containers/storage/go.sum
generated
vendored
@ -4,8 +4,20 @@ github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo=
|
||||
github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/Microsoft/go-winio v0.4.12 h1:xAfWHN1IrQ0NJ9TBC0KBZoqLjzDTr1ML+4MywiUOryc=
|
||||
github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA=
|
||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b h1:T4nWG1TXIxeor8mAu5bFguPJgSIGhZqv/f0z55KCrJM=
|
||||
github.com/checkpoint-restore/go-criu v0.0.0-20190109184317-bdb7599cd87b/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho=
|
||||
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50 h1:WMpHmC6AxwWb9hMqhudkqG7A/p14KiMnl6d3r1iUMjU=
|
||||
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -13,10 +25,18 @@ github.com/docker/docker v0.0.0-20171019062838-86f080cff091 h1:QpxpTw4MJeOzbC7X0
|
||||
github.com/docker/docker v0.0.0-20171019062838-86f080cff091/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4=
|
||||
github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/klauspost/compress v1.7.2 h1:liMOoeIvFpr9kEvalrZ7VVBA4wGf7zfOgwBjzz/5g2Y=
|
||||
github.com/klauspost/compress v1.7.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.1 h1:TWy0o9J9c6LK9C8t7Msh6IAJNXbsU/nvKLTQUU5HdaY=
|
||||
github.com/klauspost/compress v1.9.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
|
||||
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w=
|
||||
github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
|
||||
@ -25,16 +45,26 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGi
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/mattn/go-shellwords v1.0.5 h1:JhhFTIOslh5ZsPrpa3Wdg8bF0WI3b44EMblmU9wIsXc=
|
||||
github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mattn/go-shellwords v1.0.6 h1:9Jok5pILi5S1MnDirGVTufYGtksUs/V2BWUP3ZkeUUI=
|
||||
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
|
||||
github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618 h1:7InQ7/zrOh6SlFjaXFubv0xX0HsuC9qJsdqm7bNQpYM=
|
||||
github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc8 h1:dDCFes8Hj1r/i5qnypONo5jdOme/8HWZC/aNDyhECt0=
|
||||
github.com/opencontainers/runc v1.0.0-rc8/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v1.0.1 h1:wY4pOY8fBdSIvs9+IDHC55thBuEulhzfSgKeC1yFvzQ=
|
||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.2.2 h1:Kx9J6eDG5/24A6DtUquGSpJQ+m2MUTahn4FtGEe8bFg=
|
||||
github.com/opencontainers/selinux v1.2.2/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
|
||||
github.com/opencontainers/selinux v1.3.0 h1:xsI95WzPZu5exzA6JzkLSfdr/DilzOhCJOqGe5TgR0g=
|
||||
github.com/opencontainers/selinux v1.3.0/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
@ -42,28 +72,49 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7 h1:gGBSHPOU7g8YjTbhwn+lvFm2VDEhhA+PwDIlstkgSxE=
|
||||
github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1 h1:NJjM5DNFOs0s3kYE1WUOr6G8V97sdt46rlXTMfXGWBo=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
||||
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vbatts/tar-split v0.11.1 h1:0Odu65rhcZ3JZaPHxl7tCI3V/C/Q9Zf82UFravl02dE=
|
||||
github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g=
|
||||
github.com/vishvananda/netlink v1.0.0 h1:bqNY2lgheFIu1meHUFSH3d7vG93AFyqg3oGbJCOJgSM=
|
||||
github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f h1:nBX3nTcmxEtHSERBJaIo1Qa26VwRaopnZmfDQUXsF4I=
|
||||
github.com/vishvananda/netns v0.0.0-20190625233234-7109fa855b0f/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20180810170437-e96c4e24768d/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v0.0.0-20190624233834-05ebafbffc79 h1:C+K4iPg1rIvmCf4JjelkbWv2jeWevEwp05Lz8XfTYgE=
|
||||
gotest.tools v0.0.0-20190624233834-05ebafbffc79/go.mod h1:R//lfYlUuTOTfblYI3lGoAAAebUdzjvbmQsuB7Ykd90=
|
||||
|
3
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -821,11 +821,12 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
// is asking for that file no matter what - which is true
|
||||
// for some files, like .dockerignore and Dockerfile (sometimes)
|
||||
if include != relFilePath {
|
||||
skip, err = pm.Matches(relFilePath)
|
||||
matches, err := pm.IsMatch(relFilePath)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error matching %s: %v", relFilePath, err)
|
||||
return err
|
||||
}
|
||||
skip = matches
|
||||
}
|
||||
|
||||
if skip {
|
||||
|
13
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
13
vendor/github.com/containers/storage/pkg/archive/archive_linux.go
generated
vendored
@ -61,10 +61,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
|
||||
}
|
||||
if statErr == nil {
|
||||
if stat.Mode()&os.ModeCharDevice != 0 {
|
||||
// It's a whiteout for this directory, so it can't have been
|
||||
// both deleted and recreated in the layer we're diffing.
|
||||
s := stat.Sys().(*syscall.Stat_t)
|
||||
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
|
||||
if isWhiteOut(stat) {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
@ -98,8 +95,7 @@ func (o overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi
|
||||
// If it's whiteout for a parent directory, then the
|
||||
// original directory wasn't inherited into this layer,
|
||||
// so we don't need to emit whiteout for it.
|
||||
s := stat.Sys().(*syscall.Stat_t)
|
||||
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
|
||||
if isWhiteOut(stat) {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
@ -141,3 +137,8 @@ func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool,
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func isWhiteOut(stat os.FileInfo) bool {
|
||||
s := stat.Sys().(*syscall.Stat_t)
|
||||
return major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0
|
||||
}
|
||||
|
17
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
17
vendor/github.com/containers/storage/pkg/archive/changes_linux.go
generated
vendored
@ -307,9 +307,7 @@ func overlayLowerContainsWhiteout(root, path string) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
if err == nil && stat.Mode()&os.ModeCharDevice != 0 {
|
||||
// Check if there's whiteout for the specified item in the specified layer.
|
||||
s := stat.Sys().(*syscall.Stat_t)
|
||||
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
|
||||
if isWhiteOut(stat) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@ -319,8 +317,7 @@ func overlayLowerContainsWhiteout(root, path string) (bool, error) {
|
||||
func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (string, error) {
|
||||
// If it's a whiteout item, then a file or directory with that name is removed by this layer.
|
||||
if fi.Mode()&os.ModeCharDevice != 0 {
|
||||
s := fi.Sys().(*syscall.Stat_t)
|
||||
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
|
||||
if isWhiteOut(fi) {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
@ -350,10 +347,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
|
||||
}
|
||||
if err == nil {
|
||||
if stat.Mode()&os.ModeCharDevice != 0 {
|
||||
// It's a whiteout for this directory, so it can't have been
|
||||
// deleted in this layer.
|
||||
s := stat.Sys().(*syscall.Stat_t)
|
||||
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
|
||||
if isWhiteOut(stat) {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
@ -370,10 +364,7 @@ func overlayDeletedFile(layers []string, root, path string, fi os.FileInfo) (str
|
||||
}
|
||||
if err == nil {
|
||||
if stat.Mode()&os.ModeCharDevice != 0 {
|
||||
// If it's whiteout for a parent directory, then the
|
||||
// original directory wasn't inherited into the top layer.
|
||||
s := stat.Sys().(*syscall.Stat_t)
|
||||
if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
|
||||
if isWhiteOut(stat) {
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
194
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
194
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
@ -1,5 +1,9 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
|
||||
// TOML config table.
|
||||
type ThinpoolOptionsConfig struct {
|
||||
@ -47,6 +51,9 @@ type ThinpoolOptionsConfig struct {
|
||||
// devices.
|
||||
MountOpt string `toml:"mountopt"`
|
||||
|
||||
// Size
|
||||
Size string `toml:"size"`
|
||||
|
||||
// UseDeferredDeletion marks device for deferred deletion
|
||||
UseDeferredDeletion string `toml:"use_deferred_deletion"`
|
||||
|
||||
@ -59,6 +66,47 @@ type ThinpoolOptionsConfig struct {
|
||||
XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"`
|
||||
}
|
||||
|
||||
type AufsOptionsConfig struct {
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt"`
|
||||
}
|
||||
|
||||
type BtrfsOptionsConfig struct {
|
||||
// MinSpace is the minimal spaces allocated to the device
|
||||
MinSpace string `toml:"min_space"`
|
||||
// Size
|
||||
Size string `toml:"size"`
|
||||
}
|
||||
|
||||
type OverlayOptionsConfig struct {
|
||||
// IgnoreChownErrors is a flag for whether chown errors should be
|
||||
// ignored when building an image.
|
||||
IgnoreChownErrors string `toml:"ignore_chown_errors"`
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt"`
|
||||
// Alternative program to use for the mount of the file system
|
||||
MountProgram string `toml:"mount_program"`
|
||||
// Size
|
||||
Size string `toml:"size"`
|
||||
// Do not create a bind mount on the storage home
|
||||
SkipMountHome string `toml:"skip_mount_home"`
|
||||
}
|
||||
|
||||
type VfsOptionsConfig struct {
|
||||
// IgnoreChownErrors is a flag for whether chown errors should be
|
||||
// ignored when building an image.
|
||||
IgnoreChownErrors string `toml:"ignore_chown_errors"`
|
||||
}
|
||||
|
||||
type ZfsOptionsConfig struct {
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt"`
|
||||
// Name is the File System name of the ZFS File system
|
||||
Name string `toml:"fsname"`
|
||||
// Size
|
||||
Size string `toml:"size"`
|
||||
}
|
||||
|
||||
// OptionsConfig represents the "storage.options" TOML config table.
|
||||
type OptionsConfig struct {
|
||||
// AdditionalImagesStores is the location of additional read/only
|
||||
@ -83,12 +131,158 @@ type OptionsConfig struct {
|
||||
// RemapGroup is the name of one or more entries in /etc/subgid which
|
||||
// should be used to set up default GID mappings.
|
||||
RemapGroup string `toml:"remap-group"`
|
||||
|
||||
// Aufs container options to be handed to aufs drivers
|
||||
Aufs struct{ AufsOptionsConfig } `toml:"aufs"`
|
||||
|
||||
// Btrfs container options to be handed to btrfs drivers
|
||||
Btrfs struct{ BtrfsOptionsConfig } `toml:"btrfs"`
|
||||
|
||||
// Thinpool container options to be handed to thinpool drivers
|
||||
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
|
||||
|
||||
// Overlay container options to be handed to overlay drivers
|
||||
Overlay struct{ OverlayOptionsConfig } `toml:"overlay"`
|
||||
|
||||
// Vfs container options to be handed to VFS drivers
|
||||
Vfs struct{ VfsOptionsConfig } `toml:"vfs"`
|
||||
|
||||
// Zfs container options to be handed to ZFS drivers
|
||||
Zfs struct{ ZfsOptionsConfig } `toml:"zfs"`
|
||||
|
||||
// Do not create a bind mount on the storage home
|
||||
SkipMountHome string `toml:"skip_mount_home"`
|
||||
|
||||
// Alternative program to use for the mount of the file system
|
||||
MountProgram string `toml:"mount_program"`
|
||||
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt"`
|
||||
}
|
||||
|
||||
// GetGraphDriverOptions returns the driver specific options
|
||||
func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
|
||||
var doptions []string
|
||||
switch driverName {
|
||||
case "aufs":
|
||||
if options.Aufs.MountOpt != "" {
|
||||
return append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Aufs.MountOpt))
|
||||
} else if options.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
||||
}
|
||||
|
||||
case "btrfs":
|
||||
if options.Btrfs.MinSpace != "" {
|
||||
return append(doptions, fmt.Sprintf("%s.min_space=%s", driverName, options.Btrfs.MinSpace))
|
||||
}
|
||||
if options.Btrfs.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Btrfs.Size))
|
||||
} else if options.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
|
||||
case "devicemapper":
|
||||
if options.Thinpool.AutoExtendPercent != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", options.Thinpool.AutoExtendPercent))
|
||||
}
|
||||
if options.Thinpool.AutoExtendThreshold != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", options.Thinpool.AutoExtendThreshold))
|
||||
}
|
||||
if options.Thinpool.BaseSize != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.basesize=%s", options.Thinpool.BaseSize))
|
||||
}
|
||||
if options.Thinpool.BlockSize != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.blocksize=%s", options.Thinpool.BlockSize))
|
||||
}
|
||||
if options.Thinpool.DirectLvmDevice != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.directlvm_device=%s", options.Thinpool.DirectLvmDevice))
|
||||
}
|
||||
if options.Thinpool.DirectLvmDeviceForce != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.directlvm_device_force=%s", options.Thinpool.DirectLvmDeviceForce))
|
||||
}
|
||||
if options.Thinpool.Fs != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.fs=%s", options.Thinpool.Fs))
|
||||
}
|
||||
if options.Thinpool.LogLevel != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.libdm_log_level=%s", options.Thinpool.LogLevel))
|
||||
}
|
||||
if options.Thinpool.MinFreeSpace != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.min_free_space=%s", options.Thinpool.MinFreeSpace))
|
||||
}
|
||||
if options.Thinpool.MkfsArg != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.mkfsarg=%s", options.Thinpool.MkfsArg))
|
||||
}
|
||||
if options.Thinpool.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Thinpool.MountOpt))
|
||||
} else if options.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
||||
}
|
||||
|
||||
if options.Thinpool.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Thinpool.Size))
|
||||
} else if options.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
|
||||
if options.Thinpool.UseDeferredDeletion != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.use_deferred_deletion=%s", options.Thinpool.UseDeferredDeletion))
|
||||
}
|
||||
if options.Thinpool.UseDeferredRemoval != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.use_deferred_removal=%s", options.Thinpool.UseDeferredRemoval))
|
||||
}
|
||||
if options.Thinpool.XfsNoSpaceMaxRetries != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", options.Thinpool.XfsNoSpaceMaxRetries))
|
||||
}
|
||||
|
||||
case "overlay":
|
||||
if options.Overlay.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors))
|
||||
} else if options.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors))
|
||||
}
|
||||
if options.Overlay.MountProgram != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.Overlay.MountProgram))
|
||||
} else if options.MountProgram != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mount_program=%s", driverName, options.MountProgram))
|
||||
}
|
||||
if options.Overlay.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Overlay.MountOpt))
|
||||
} else if options.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
||||
}
|
||||
if options.Overlay.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Overlay.Size))
|
||||
} else if options.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
|
||||
if options.Overlay.SkipMountHome != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.Overlay.SkipMountHome))
|
||||
} else if options.SkipMountHome != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.skip_mount_home=%s", driverName, options.SkipMountHome))
|
||||
}
|
||||
|
||||
case "vfs":
|
||||
if options.Vfs.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors))
|
||||
} else if options.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.IgnoreChownErrors))
|
||||
}
|
||||
|
||||
case "zfs":
|
||||
if options.Zfs.Name != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.fsname=%s", driverName, options.Zfs.Name))
|
||||
}
|
||||
if options.Zfs.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.Zfs.MountOpt))
|
||||
} else if options.MountOpt != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.mountopt=%s", driverName, options.MountOpt))
|
||||
}
|
||||
if options.Zfs.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Zfs.Size))
|
||||
} else if options.Size != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.size=%s", driverName, options.Size))
|
||||
}
|
||||
}
|
||||
return doptions
|
||||
}
|
||||
|
82
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
82
vendor/github.com/containers/storage/pkg/fileutils/fileutils.go
generated
vendored
@ -57,6 +57,7 @@ func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
// Deprecated: Please use the `MatchesResult` method instead.
|
||||
// Matches matches path against all the patterns. Matches is not safe to be
|
||||
// called concurrently
|
||||
func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||
@ -96,6 +97,85 @@ func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
type MatchResult struct {
|
||||
isMatched bool
|
||||
matches, excludes uint
|
||||
}
|
||||
|
||||
// Excludes returns true if the overall result is matched
|
||||
func (m *MatchResult) IsMatched() bool {
|
||||
return m.isMatched
|
||||
}
|
||||
|
||||
// Excludes returns the amount of matches of an MatchResult
|
||||
func (m *MatchResult) Matches() uint {
|
||||
return m.matches
|
||||
}
|
||||
|
||||
// Excludes returns the amount of excludes of an MatchResult
|
||||
func (m *MatchResult) Excludes() uint {
|
||||
return m.excludes
|
||||
}
|
||||
|
||||
// MatchesResult verifies the provided filepath against all patterns.
|
||||
// It returns the `*MatchResult` result for the patterns on success, otherwise
|
||||
// an error. This method is not safe to be called concurrently.
|
||||
func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) {
|
||||
file = filepath.FromSlash(file)
|
||||
parentPath := filepath.Dir(file)
|
||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||
res = &MatchResult{false, 0, 0}
|
||||
|
||||
for _, pattern := range pm.patterns {
|
||||
negative := false
|
||||
|
||||
if pattern.exclusion {
|
||||
negative = true
|
||||
}
|
||||
|
||||
match, err := pattern.match(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !match && parentPath != "." {
|
||||
// Check to see if the pattern matches one of our parent dirs.
|
||||
if len(pattern.dirs) <= len(parentPathDirs) {
|
||||
match, _ = pattern.match(strings.Join(
|
||||
parentPathDirs[:len(pattern.dirs)],
|
||||
string(os.PathSeparator)),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
res.isMatched = !negative
|
||||
if negative {
|
||||
res.excludes++
|
||||
} else {
|
||||
res.matches++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if res.matches > 0 {
|
||||
logrus.Debugf("Skipping excluded path: %s", file)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// IsMatch verifies the provided filepath against all patterns and returns true
|
||||
// if it matches. A match is valid if the last match is a positive one.
|
||||
// It returns an error on failure and is not safe to be called concurrently.
|
||||
func (pm *PatternMatcher) IsMatch(file string) (matched bool, err error) {
|
||||
res, err := pm.MatchesResult(file)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return res.isMatched, nil
|
||||
}
|
||||
|
||||
// Exclusions returns true if any of the patterns define exclusions
|
||||
func (pm *PatternMatcher) Exclusions() bool {
|
||||
return pm.exclusions
|
||||
@ -228,7 +308,7 @@ func Matches(file string, patterns []string) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return pm.Matches(file)
|
||||
return pm.IsMatch(file)
|
||||
}
|
||||
|
||||
// CopyFile copies from src to dst until either EOF is reached
|
||||
|
2
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go
generated
vendored
@ -63,7 +63,7 @@ func GetKernelVersion() (*VersionInfo, error) {
|
||||
}
|
||||
|
||||
KVI.major = int(dwVersion & 0xFF)
|
||||
KVI.minor = int((dwVersion & 0XFF00) >> 8)
|
||||
KVI.minor = int((dwVersion & 0xFF00) >> 8)
|
||||
KVI.build = int((dwVersion & 0xFFFF0000) >> 16)
|
||||
|
||||
return KVI, nil
|
||||
|
2
vendor/github.com/containers/storage/pkg/system/stat_linux.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/system/stat_linux.go
generated
vendored
@ -8,7 +8,7 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
|
||||
mode: s.Mode,
|
||||
uid: s.Uid,
|
||||
gid: s.Gid,
|
||||
rdev: s.Rdev,
|
||||
rdev: uint64(s.Rdev),
|
||||
mtim: s.Mtim}, nil
|
||||
}
|
||||
|
||||
|
44
vendor/github.com/containers/storage/storage.conf
generated
vendored
44
vendor/github.com/containers/storage/storage.conf
generated
vendored
@ -21,25 +21,6 @@ graphroot = "/var/lib/containers/storage"
|
||||
additionalimagestores = [
|
||||
]
|
||||
|
||||
# Size is used to set a maximum size of the container image. Only supported by
|
||||
# certain container storage drivers.
|
||||
size = ""
|
||||
|
||||
# Path to an helper program to use for mounting the file system instead of mounting it
|
||||
# directly.
|
||||
#mount_program = "/usr/bin/fuse-overlayfs"
|
||||
|
||||
# mountopt specifies comma separated list of extra mount options
|
||||
mountopt = "nodev"
|
||||
|
||||
# ignore_chown_errors can be set to allow a non privileged user running with
|
||||
# a single UID within a user namespace to run containers. The user can pull
|
||||
# and use any image even those with multiple uids. Note multiple UIDs will be
|
||||
# squasheddown to the default uid in the container. These images will have no
|
||||
# separation between the users in the container. Only supported for the overlay
|
||||
# and vfs drivers.
|
||||
#ignore_chown_errors = false
|
||||
|
||||
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
|
||||
# a container, to the UIDs/GIDs as they should appear outside of the container,
|
||||
# and the length of the range of UIDs/GIDs. Additional mapped sets can be
|
||||
@ -61,6 +42,28 @@ mountopt = "nodev"
|
||||
# remap-user = "storage"
|
||||
# remap-group = "storage"
|
||||
|
||||
[storage.options.overlay]
|
||||
# ignore_chown_errors can be set to allow a non privileged user running with
|
||||
# a single UID within a user namespace to run containers. The user can pull
|
||||
# and use any image even those with multiple uids. Note multiple UIDs will be
|
||||
# squashed down to the default uid in the container. These images will have no
|
||||
# separation between the users in the container. Only supported for the overlay
|
||||
# and vfs drivers.
|
||||
#ignore_chown_errors = false
|
||||
|
||||
# Path to an helper program to use for mounting the file system instead of mounting it
|
||||
# directly.
|
||||
#mount_program = "/usr/bin/fuse-overlayfs"
|
||||
|
||||
# mountopt specifies comma separated list of extra mount options
|
||||
mountopt = "nodev"
|
||||
|
||||
# Set to skip a PRIVATE bind mount on the storage home directory.
|
||||
skip_mount_home = "false"
|
||||
|
||||
# Size is used to set a maximum size of the container image.
|
||||
# size = ""
|
||||
|
||||
[storage.options.thinpool]
|
||||
# Storage Options for thinpool
|
||||
|
||||
@ -111,6 +114,9 @@ mountopt = "nodev"
|
||||
# device.
|
||||
# mkfsarg = ""
|
||||
|
||||
# Size is used to set a maximum size of the container image.
|
||||
# size = ""
|
||||
|
||||
# use_deferred_removal marks devicemapper block device for deferred removal.
|
||||
# If the thinpool is in use when the driver attempts to remove it, the driver
|
||||
# tells the kernel to remove it as soon as possible. Note this does not free
|
||||
|
57
vendor/github.com/containers/storage/store.go
generated
vendored
57
vendor/github.com/containers/storage/store.go
generated
vendored
@ -18,7 +18,7 @@ import (
|
||||
"github.com/BurntSushi/toml"
|
||||
drivers "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/config"
|
||||
cfg "github.com/containers/storage/pkg/config"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
@ -3274,10 +3274,10 @@ func DefaultConfigFile(rootless bool) (string, error) {
|
||||
// TOML-friendly explicit tables used for conversions.
|
||||
type tomlConfig struct {
|
||||
Storage struct {
|
||||
Driver string `toml:"driver"`
|
||||
RunRoot string `toml:"runroot"`
|
||||
GraphRoot string `toml:"graphroot"`
|
||||
Options struct{ config.OptionsConfig } `toml:"options"`
|
||||
Driver string `toml:"driver"`
|
||||
RunRoot string `toml:"runroot"`
|
||||
GraphRoot string `toml:"graphroot"`
|
||||
Options cfg.OptionsConfig `toml:"options"`
|
||||
} `toml:"storage"`
|
||||
}
|
||||
|
||||
@ -3307,50 +3307,6 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
||||
if config.Storage.GraphRoot != "" {
|
||||
storeOptions.GraphRoot = config.Storage.GraphRoot
|
||||
}
|
||||
if config.Storage.Options.Thinpool.AutoExtendPercent != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_percent=%s", config.Storage.Options.Thinpool.AutoExtendPercent))
|
||||
}
|
||||
|
||||
if config.Storage.Options.Thinpool.AutoExtendThreshold != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.thinp_autoextend_threshold=%s", config.Storage.Options.Thinpool.AutoExtendThreshold))
|
||||
}
|
||||
|
||||
if config.Storage.Options.Thinpool.BaseSize != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.basesize=%s", config.Storage.Options.Thinpool.BaseSize))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.BlockSize != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.blocksize=%s", config.Storage.Options.Thinpool.BlockSize))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.DirectLvmDevice != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device=%s", config.Storage.Options.Thinpool.DirectLvmDevice))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.DirectLvmDeviceForce != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.directlvm_device_force=%s", config.Storage.Options.Thinpool.DirectLvmDeviceForce))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.Fs != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.fs=%s", config.Storage.Options.Thinpool.Fs))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.LogLevel != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.libdm_log_level=%s", config.Storage.Options.Thinpool.LogLevel))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.MinFreeSpace != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.min_free_space=%s", config.Storage.Options.Thinpool.MinFreeSpace))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.MkfsArg != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.mkfsarg=%s", config.Storage.Options.Thinpool.MkfsArg))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.MountOpt != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.Thinpool.MountOpt))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.UseDeferredDeletion != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_deletion=%s", config.Storage.Options.Thinpool.UseDeferredDeletion))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.UseDeferredRemoval != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.use_deferred_removal=%s", config.Storage.Options.Thinpool.UseDeferredRemoval))
|
||||
}
|
||||
if config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("dm.xfs_nospace_max_retries=%s", config.Storage.Options.Thinpool.XfsNoSpaceMaxRetries))
|
||||
}
|
||||
for _, s := range config.Storage.Options.AdditionalImageStores {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s))
|
||||
}
|
||||
@ -3397,6 +3353,9 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
|
||||
if os.Getenv("STORAGE_DRIVER") != "" {
|
||||
storeOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER")
|
||||
}
|
||||
|
||||
storeOptions.GraphDriverOptions = cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)
|
||||
|
||||
if os.Getenv("STORAGE_OPTS") != "" {
|
||||
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...)
|
||||
}
|
||||
|
2
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
2
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
package credentials
|
||||
|
||||
// Version holds a string describing the current version
|
||||
const Version = "0.6.0"
|
||||
const Version = "0.6.3"
|
||||
|
7
vendor/github.com/fullsailor/pkcs7/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/fullsailor/pkcs7/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.9
|
||||
- "1.10"
|
||||
- tip
|
@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
Copyright (c) 2015 Andrew Smith
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
8
vendor/github.com/fullsailor/pkcs7/README.md
generated
vendored
Normal file
8
vendor/github.com/fullsailor/pkcs7/README.md
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# pkcs7
|
||||
|
||||
[](https://godoc.org/github.com/fullsailor/pkcs7)
|
||||
[](https://travis-ci.org/fullsailor/pkcs7)
|
||||
|
||||
pkcs7 implements parsing and creating signed and enveloped messages.
|
||||
|
||||
- Documentation on [GoDoc](http://godoc.org/github.com/fullsailor/pkcs7)
|
248
vendor/github.com/fullsailor/pkcs7/ber.go
generated
vendored
Normal file
248
vendor/github.com/fullsailor/pkcs7/ber.go
generated
vendored
Normal file
@ -0,0 +1,248 @@
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// var encodeIndent = 0
|
||||
|
||||
type asn1Object interface {
|
||||
EncodeTo(writer *bytes.Buffer) error
|
||||
}
|
||||
|
||||
type asn1Structured struct {
|
||||
tagBytes []byte
|
||||
content []asn1Object
|
||||
}
|
||||
|
||||
func (s asn1Structured) EncodeTo(out *bytes.Buffer) error {
|
||||
//fmt.Printf("%s--> tag: % X\n", strings.Repeat("| ", encodeIndent), s.tagBytes)
|
||||
//encodeIndent++
|
||||
inner := new(bytes.Buffer)
|
||||
for _, obj := range s.content {
|
||||
err := obj.EncodeTo(inner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//encodeIndent--
|
||||
out.Write(s.tagBytes)
|
||||
encodeLength(out, inner.Len())
|
||||
out.Write(inner.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
type asn1Primitive struct {
|
||||
tagBytes []byte
|
||||
length int
|
||||
content []byte
|
||||
}
|
||||
|
||||
func (p asn1Primitive) EncodeTo(out *bytes.Buffer) error {
|
||||
_, err := out.Write(p.tagBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = encodeLength(out, p.length); err != nil {
|
||||
return err
|
||||
}
|
||||
//fmt.Printf("%s--> tag: % X length: %d\n", strings.Repeat("| ", encodeIndent), p.tagBytes, p.length)
|
||||
//fmt.Printf("%s--> content length: %d\n", strings.Repeat("| ", encodeIndent), len(p.content))
|
||||
out.Write(p.content)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ber2der(ber []byte) ([]byte, error) {
|
||||
if len(ber) == 0 {
|
||||
return nil, errors.New("ber2der: input ber is empty")
|
||||
}
|
||||
//fmt.Printf("--> ber2der: Transcoding %d bytes\n", len(ber))
|
||||
out := new(bytes.Buffer)
|
||||
|
||||
obj, _, err := readObject(ber, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
obj.EncodeTo(out)
|
||||
|
||||
// if offset < len(ber) {
|
||||
// return nil, fmt.Errorf("ber2der: Content longer than expected. Got %d, expected %d", offset, len(ber))
|
||||
//}
|
||||
|
||||
return out.Bytes(), nil
|
||||
}
|
||||
|
||||
// encodes lengths that are longer than 127 into string of bytes
|
||||
func marshalLongLength(out *bytes.Buffer, i int) (err error) {
|
||||
n := lengthLength(i)
|
||||
|
||||
for ; n > 0; n-- {
|
||||
err = out.WriteByte(byte(i >> uint((n-1)*8)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// computes the byte length of an encoded length value
|
||||
func lengthLength(i int) (numBytes int) {
|
||||
numBytes = 1
|
||||
for i > 255 {
|
||||
numBytes++
|
||||
i >>= 8
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// encodes the length in DER format
|
||||
// If the length fits in 7 bits, the value is encoded directly.
|
||||
//
|
||||
// Otherwise, the number of bytes to encode the length is first determined.
|
||||
// This number is likely to be 4 or less for a 32bit length. This number is
|
||||
// added to 0x80. The length is encoded in big endian encoding follow after
|
||||
//
|
||||
// Examples:
|
||||
// length | byte 1 | bytes n
|
||||
// 0 | 0x00 | -
|
||||
// 120 | 0x78 | -
|
||||
// 200 | 0x81 | 0xC8
|
||||
// 500 | 0x82 | 0x01 0xF4
|
||||
//
|
||||
func encodeLength(out *bytes.Buffer, length int) (err error) {
|
||||
if length >= 128 {
|
||||
l := lengthLength(length)
|
||||
err = out.WriteByte(0x80 | byte(l))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = marshalLongLength(out, length)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = out.WriteByte(byte(length))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func readObject(ber []byte, offset int) (asn1Object, int, error) {
|
||||
//fmt.Printf("\n====> Starting readObject at offset: %d\n\n", offset)
|
||||
tagStart := offset
|
||||
b := ber[offset]
|
||||
offset++
|
||||
tag := b & 0x1F // last 5 bits
|
||||
if tag == 0x1F {
|
||||
tag = 0
|
||||
for ber[offset] >= 0x80 {
|
||||
tag = tag*128 + ber[offset] - 0x80
|
||||
offset++
|
||||
}
|
||||
tag = tag*128 + ber[offset] - 0x80
|
||||
offset++
|
||||
}
|
||||
tagEnd := offset
|
||||
|
||||
kind := b & 0x20
|
||||
/*
|
||||
if kind == 0 {
|
||||
fmt.Print("--> Primitive\n")
|
||||
} else {
|
||||
fmt.Print("--> Constructed\n")
|
||||
}
|
||||
*/
|
||||
// read length
|
||||
var length int
|
||||
l := ber[offset]
|
||||
offset++
|
||||
indefinite := false
|
||||
if l > 0x80 {
|
||||
numberOfBytes := (int)(l & 0x7F)
|
||||
if numberOfBytes > 4 { // int is only guaranteed to be 32bit
|
||||
return nil, 0, errors.New("ber2der: BER tag length too long")
|
||||
}
|
||||
if numberOfBytes == 4 && (int)(ber[offset]) > 0x7F {
|
||||
return nil, 0, errors.New("ber2der: BER tag length is negative")
|
||||
}
|
||||
if 0x0 == (int)(ber[offset]) {
|
||||
return nil, 0, errors.New("ber2der: BER tag length has leading zero")
|
||||
}
|
||||
//fmt.Printf("--> (compute length) indicator byte: %x\n", l)
|
||||
//fmt.Printf("--> (compute length) length bytes: % X\n", ber[offset:offset+numberOfBytes])
|
||||
for i := 0; i < numberOfBytes; i++ {
|
||||
length = length*256 + (int)(ber[offset])
|
||||
offset++
|
||||
}
|
||||
} else if l == 0x80 {
|
||||
indefinite = true
|
||||
} else {
|
||||
length = (int)(l)
|
||||
}
|
||||
|
||||
//fmt.Printf("--> length : %d\n", length)
|
||||
contentEnd := offset + length
|
||||
if contentEnd > len(ber) {
|
||||
return nil, 0, errors.New("ber2der: BER tag length is more than available data")
|
||||
}
|
||||
//fmt.Printf("--> content start : %d\n", offset)
|
||||
//fmt.Printf("--> content end : %d\n", contentEnd)
|
||||
//fmt.Printf("--> content : % X\n", ber[offset:contentEnd])
|
||||
var obj asn1Object
|
||||
if indefinite && kind == 0 {
|
||||
return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding")
|
||||
}
|
||||
if kind == 0 {
|
||||
obj = asn1Primitive{
|
||||
tagBytes: ber[tagStart:tagEnd],
|
||||
length: length,
|
||||
content: ber[offset:contentEnd],
|
||||
}
|
||||
} else {
|
||||
var subObjects []asn1Object
|
||||
for (offset < contentEnd) || indefinite {
|
||||
var subObj asn1Object
|
||||
var err error
|
||||
subObj, offset, err = readObject(ber, offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
subObjects = append(subObjects, subObj)
|
||||
|
||||
if indefinite {
|
||||
terminated, err := isIndefiniteTermination(ber, offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if terminated {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
obj = asn1Structured{
|
||||
tagBytes: ber[tagStart:tagEnd],
|
||||
content: subObjects,
|
||||
}
|
||||
}
|
||||
|
||||
// Apply indefinite form length with 0x0000 terminator.
|
||||
if indefinite {
|
||||
contentEnd = offset + 2
|
||||
}
|
||||
|
||||
return obj, contentEnd, nil
|
||||
}
|
||||
|
||||
func isIndefiniteTermination(ber []byte, offset int) (bool, error) {
|
||||
if len(ber) - offset < 2 {
|
||||
return false, errors.New("ber2der: Invalid BER format")
|
||||
}
|
||||
|
||||
return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil
|
||||
}
|
962
vendor/github.com/fullsailor/pkcs7/pkcs7.go
generated
vendored
Normal file
962
vendor/github.com/fullsailor/pkcs7/pkcs7.go
generated
vendored
Normal file
@ -0,0 +1,962 @@
|
||||
// Package pkcs7 implements parsing and generation of some PKCS#7 structures.
|
||||
package pkcs7
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
_ "crypto/sha1" // for crypto.SHA1
|
||||
)
|
||||
|
||||
// PKCS7 Represents a PKCS7 structure
|
||||
type PKCS7 struct {
|
||||
Content []byte
|
||||
Certificates []*x509.Certificate
|
||||
CRLs []pkix.CertificateList
|
||||
Signers []signerInfo
|
||||
raw interface{}
|
||||
}
|
||||
|
||||
type contentInfo struct {
|
||||
ContentType asn1.ObjectIdentifier
|
||||
Content asn1.RawValue `asn1:"explicit,optional,tag:0"`
|
||||
}
|
||||
|
||||
// ErrUnsupportedContentType is returned when a PKCS7 content is not supported.
|
||||
// Currently only Data (1.2.840.113549.1.7.1), Signed Data (1.2.840.113549.1.7.2),
|
||||
// and Enveloped Data are supported (1.2.840.113549.1.7.3)
|
||||
var ErrUnsupportedContentType = errors.New("pkcs7: cannot parse data: unimplemented content type")
|
||||
|
||||
type unsignedData []byte
|
||||
|
||||
var (
|
||||
oidData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 1}
|
||||
oidSignedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2}
|
||||
oidEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 3}
|
||||
oidSignedAndEnvelopedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 4}
|
||||
oidDigestedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 5}
|
||||
oidEncryptedData = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 6}
|
||||
oidAttributeContentType = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 3}
|
||||
oidAttributeMessageDigest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 4}
|
||||
oidAttributeSigningTime = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 5}
|
||||
)
|
||||
|
||||
type signedData struct {
|
||||
Version int `asn1:"default:1"`
|
||||
DigestAlgorithmIdentifiers []pkix.AlgorithmIdentifier `asn1:"set"`
|
||||
ContentInfo contentInfo
|
||||
Certificates rawCertificates `asn1:"optional,tag:0"`
|
||||
CRLs []pkix.CertificateList `asn1:"optional,tag:1"`
|
||||
SignerInfos []signerInfo `asn1:"set"`
|
||||
}
|
||||
|
||||
type rawCertificates struct {
|
||||
Raw asn1.RawContent
|
||||
}
|
||||
|
||||
type envelopedData struct {
|
||||
Version int
|
||||
RecipientInfos []recipientInfo `asn1:"set"`
|
||||
EncryptedContentInfo encryptedContentInfo
|
||||
}
|
||||
|
||||
type recipientInfo struct {
|
||||
Version int
|
||||
IssuerAndSerialNumber issuerAndSerial
|
||||
KeyEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedKey []byte
|
||||
}
|
||||
|
||||
type encryptedContentInfo struct {
|
||||
ContentType asn1.ObjectIdentifier
|
||||
ContentEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedContent asn1.RawValue `asn1:"tag:0,optional"`
|
||||
}
|
||||
|
||||
type attribute struct {
|
||||
Type asn1.ObjectIdentifier
|
||||
Value asn1.RawValue `asn1:"set"`
|
||||
}
|
||||
|
||||
type issuerAndSerial struct {
|
||||
IssuerName asn1.RawValue
|
||||
SerialNumber *big.Int
|
||||
}
|
||||
|
||||
// MessageDigestMismatchError is returned when the signer data digest does not
|
||||
// match the computed digest for the contained content
|
||||
type MessageDigestMismatchError struct {
|
||||
ExpectedDigest []byte
|
||||
ActualDigest []byte
|
||||
}
|
||||
|
||||
func (err *MessageDigestMismatchError) Error() string {
|
||||
return fmt.Sprintf("pkcs7: Message digest mismatch\n\tExpected: %X\n\tActual : %X", err.ExpectedDigest, err.ActualDigest)
|
||||
}
|
||||
|
||||
type signerInfo struct {
|
||||
Version int `asn1:"default:1"`
|
||||
IssuerAndSerialNumber issuerAndSerial
|
||||
DigestAlgorithm pkix.AlgorithmIdentifier
|
||||
AuthenticatedAttributes []attribute `asn1:"optional,tag:0"`
|
||||
DigestEncryptionAlgorithm pkix.AlgorithmIdentifier
|
||||
EncryptedDigest []byte
|
||||
UnauthenticatedAttributes []attribute `asn1:"optional,tag:1"`
|
||||
}
|
||||
|
||||
// Parse decodes a DER encoded PKCS7 package
|
||||
func Parse(data []byte) (p7 *PKCS7, err error) {
|
||||
if len(data) == 0 {
|
||||
return nil, errors.New("pkcs7: input data is empty")
|
||||
}
|
||||
var info contentInfo
|
||||
der, err := ber2der(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rest, err := asn1.Unmarshal(der, &info)
|
||||
if len(rest) > 0 {
|
||||
err = asn1.SyntaxError{Msg: "trailing data"}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// fmt.Printf("--> Content Type: %s", info.ContentType)
|
||||
switch {
|
||||
case info.ContentType.Equal(oidSignedData):
|
||||
return parseSignedData(info.Content.Bytes)
|
||||
case info.ContentType.Equal(oidEnvelopedData):
|
||||
return parseEnvelopedData(info.Content.Bytes)
|
||||
}
|
||||
return nil, ErrUnsupportedContentType
|
||||
}
|
||||
|
||||
func parseSignedData(data []byte) (*PKCS7, error) {
|
||||
var sd signedData
|
||||
asn1.Unmarshal(data, &sd)
|
||||
certs, err := sd.Certificates.Parse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// fmt.Printf("--> Signed Data Version %d\n", sd.Version)
|
||||
|
||||
var compound asn1.RawValue
|
||||
var content unsignedData
|
||||
|
||||
// The Content.Bytes maybe empty on PKI responses.
|
||||
if len(sd.ContentInfo.Content.Bytes) > 0 {
|
||||
if _, err := asn1.Unmarshal(sd.ContentInfo.Content.Bytes, &compound); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Compound octet string
|
||||
if compound.IsCompound {
|
||||
if _, err = asn1.Unmarshal(compound.Bytes, &content); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// assuming this is tag 04
|
||||
content = compound.Bytes
|
||||
}
|
||||
return &PKCS7{
|
||||
Content: content,
|
||||
Certificates: certs,
|
||||
CRLs: sd.CRLs,
|
||||
Signers: sd.SignerInfos,
|
||||
raw: sd}, nil
|
||||
}
|
||||
|
||||
func (raw rawCertificates) Parse() ([]*x509.Certificate, error) {
|
||||
if len(raw.Raw) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var val asn1.RawValue
|
||||
if _, err := asn1.Unmarshal(raw.Raw, &val); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return x509.ParseCertificates(val.Bytes)
|
||||
}
|
||||
|
||||
func parseEnvelopedData(data []byte) (*PKCS7, error) {
|
||||
var ed envelopedData
|
||||
if _, err := asn1.Unmarshal(data, &ed); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &PKCS7{
|
||||
raw: ed,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify checks the signatures of a PKCS7 object
|
||||
// WARNING: Verify does not check signing time or verify certificate chains at
|
||||
// this time.
|
||||
func (p7 *PKCS7) Verify() (err error) {
|
||||
if len(p7.Signers) == 0 {
|
||||
return errors.New("pkcs7: Message has no signers")
|
||||
}
|
||||
for _, signer := range p7.Signers {
|
||||
if err := verifySignature(p7, signer); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verifySignature(p7 *PKCS7, signer signerInfo) error {
|
||||
signedData := p7.Content
|
||||
hash, err := getHashForOID(signer.DigestAlgorithm.Algorithm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(signer.AuthenticatedAttributes) > 0 {
|
||||
// TODO(fullsailor): First check the content type match
|
||||
var digest []byte
|
||||
err := unmarshalAttribute(signer.AuthenticatedAttributes, oidAttributeMessageDigest, &digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h := hash.New()
|
||||
h.Write(p7.Content)
|
||||
computed := h.Sum(nil)
|
||||
if !hmac.Equal(digest, computed) {
|
||||
return &MessageDigestMismatchError{
|
||||
ExpectedDigest: digest,
|
||||
ActualDigest: computed,
|
||||
}
|
||||
}
|
||||
// TODO(fullsailor): Optionally verify certificate chain
|
||||
// TODO(fullsailor): Optionally verify signingTime against certificate NotAfter/NotBefore
|
||||
signedData, err = marshalAttributes(signer.AuthenticatedAttributes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cert := getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
|
||||
if cert == nil {
|
||||
return errors.New("pkcs7: No certificate for signer")
|
||||
}
|
||||
|
||||
algo := getSignatureAlgorithmFromAI(signer.DigestEncryptionAlgorithm)
|
||||
if algo == x509.UnknownSignatureAlgorithm {
|
||||
// I'm not sure what the spec here is, and the openssl sources were not
|
||||
// helpful. But, this is what App Store receipts appear to do.
|
||||
// The DigestEncryptionAlgorithm is just "rsaEncryption (PKCS #1)"
|
||||
// But we're expecting a digest + encryption algorithm. So... we're going
|
||||
// to determine an algorithm based on the DigestAlgorithm and this
|
||||
// encryption algorithm.
|
||||
if signer.DigestEncryptionAlgorithm.Algorithm.Equal(oidEncryptionAlgorithmRSA) {
|
||||
algo = getRSASignatureAlgorithmForDigestAlgorithm(hash)
|
||||
}
|
||||
}
|
||||
return cert.CheckSignature(algo, signedData, signer.EncryptedDigest)
|
||||
}
|
||||
|
||||
func marshalAttributes(attrs []attribute) ([]byte, error) {
|
||||
encodedAttributes, err := asn1.Marshal(struct {
|
||||
A []attribute `asn1:"set"`
|
||||
}{A: attrs})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove the leading sequence octets
|
||||
var raw asn1.RawValue
|
||||
asn1.Unmarshal(encodedAttributes, &raw)
|
||||
return raw.Bytes, nil
|
||||
}
|
||||
|
||||
var (
|
||||
oidDigestAlgorithmSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26}
|
||||
oidEncryptionAlgorithmRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
|
||||
)
|
||||
|
||||
func getCertFromCertsByIssuerAndSerial(certs []*x509.Certificate, ias issuerAndSerial) *x509.Certificate {
|
||||
for _, cert := range certs {
|
||||
if isCertMatchForIssuerAndSerial(cert, ias) {
|
||||
return cert
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getHashForOID(oid asn1.ObjectIdentifier) (crypto.Hash, error) {
|
||||
switch {
|
||||
case oid.Equal(oidDigestAlgorithmSHA1):
|
||||
return crypto.SHA1, nil
|
||||
case oid.Equal(oidSHA256):
|
||||
return crypto.SHA256, nil
|
||||
}
|
||||
return crypto.Hash(0), ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
func getRSASignatureAlgorithmForDigestAlgorithm(hash crypto.Hash) x509.SignatureAlgorithm {
|
||||
for _, details := range signatureAlgorithmDetails {
|
||||
if details.pubKeyAlgo == x509.RSA && details.hash == hash {
|
||||
return details.algo
|
||||
}
|
||||
}
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
|
||||
// GetOnlySigner returns an x509.Certificate for the first signer of the signed
|
||||
// data payload. If there are more or less than one signer, nil is returned
|
||||
func (p7 *PKCS7) GetOnlySigner() *x509.Certificate {
|
||||
if len(p7.Signers) != 1 {
|
||||
return nil
|
||||
}
|
||||
signer := p7.Signers[0]
|
||||
return getCertFromCertsByIssuerAndSerial(p7.Certificates, signer.IssuerAndSerialNumber)
|
||||
}
|
||||
|
||||
// ErrUnsupportedAlgorithm tells you when our quick dev assumptions have failed
|
||||
var ErrUnsupportedAlgorithm = errors.New("pkcs7: cannot decrypt data: only RSA, DES, DES-EDE3, AES-256-CBC and AES-128-GCM supported")
|
||||
|
||||
// ErrNotEncryptedContent is returned when attempting to Decrypt data that is not encrypted data
|
||||
var ErrNotEncryptedContent = errors.New("pkcs7: content data is a decryptable data type")
|
||||
|
||||
// Decrypt decrypts encrypted content info for recipient cert and private key
|
||||
func (p7 *PKCS7) Decrypt(cert *x509.Certificate, pk crypto.PrivateKey) ([]byte, error) {
|
||||
data, ok := p7.raw.(envelopedData)
|
||||
if !ok {
|
||||
return nil, ErrNotEncryptedContent
|
||||
}
|
||||
recipient := selectRecipientForCertificate(data.RecipientInfos, cert)
|
||||
if recipient.EncryptedKey == nil {
|
||||
return nil, errors.New("pkcs7: no enveloped recipient for provided certificate")
|
||||
}
|
||||
if priv := pk.(*rsa.PrivateKey); priv != nil {
|
||||
var contentKey []byte
|
||||
contentKey, err := rsa.DecryptPKCS1v15(rand.Reader, priv, recipient.EncryptedKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data.EncryptedContentInfo.decrypt(contentKey)
|
||||
}
|
||||
fmt.Printf("Unsupported Private Key: %v\n", pk)
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
var oidEncryptionAlgorithmDESCBC = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 7}
|
||||
var oidEncryptionAlgorithmDESEDE3CBC = asn1.ObjectIdentifier{1, 2, 840, 113549, 3, 7}
|
||||
var oidEncryptionAlgorithmAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42}
|
||||
var oidEncryptionAlgorithmAES128GCM = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 6}
|
||||
var oidEncryptionAlgorithmAES128CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 2}
|
||||
|
||||
func (eci encryptedContentInfo) decrypt(key []byte) ([]byte, error) {
|
||||
alg := eci.ContentEncryptionAlgorithm.Algorithm
|
||||
if !alg.Equal(oidEncryptionAlgorithmDESCBC) &&
|
||||
!alg.Equal(oidEncryptionAlgorithmDESEDE3CBC) &&
|
||||
!alg.Equal(oidEncryptionAlgorithmAES256CBC) &&
|
||||
!alg.Equal(oidEncryptionAlgorithmAES128CBC) &&
|
||||
!alg.Equal(oidEncryptionAlgorithmAES128GCM) {
|
||||
fmt.Printf("Unsupported Content Encryption Algorithm: %s\n", alg)
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// EncryptedContent can either be constructed of multple OCTET STRINGs
|
||||
// or _be_ a tagged OCTET STRING
|
||||
var cyphertext []byte
|
||||
if eci.EncryptedContent.IsCompound {
|
||||
// Complex case to concat all of the children OCTET STRINGs
|
||||
var buf bytes.Buffer
|
||||
cypherbytes := eci.EncryptedContent.Bytes
|
||||
for {
|
||||
var part []byte
|
||||
cypherbytes, _ = asn1.Unmarshal(cypherbytes, &part)
|
||||
buf.Write(part)
|
||||
if cypherbytes == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
cyphertext = buf.Bytes()
|
||||
} else {
|
||||
// Simple case, the bytes _are_ the cyphertext
|
||||
cyphertext = eci.EncryptedContent.Bytes
|
||||
}
|
||||
|
||||
var block cipher.Block
|
||||
var err error
|
||||
|
||||
switch {
|
||||
case alg.Equal(oidEncryptionAlgorithmDESCBC):
|
||||
block, err = des.NewCipher(key)
|
||||
case alg.Equal(oidEncryptionAlgorithmDESEDE3CBC):
|
||||
block, err = des.NewTripleDESCipher(key)
|
||||
case alg.Equal(oidEncryptionAlgorithmAES256CBC):
|
||||
fallthrough
|
||||
case alg.Equal(oidEncryptionAlgorithmAES128GCM), alg.Equal(oidEncryptionAlgorithmAES128CBC):
|
||||
block, err = aes.NewCipher(key)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if alg.Equal(oidEncryptionAlgorithmAES128GCM) {
|
||||
params := aesGCMParameters{}
|
||||
paramBytes := eci.ContentEncryptionAlgorithm.Parameters.Bytes
|
||||
|
||||
_, err := asn1.Unmarshal(paramBytes, ¶ms)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(params.Nonce) != gcm.NonceSize() {
|
||||
return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
|
||||
}
|
||||
if params.ICVLen != gcm.Overhead() {
|
||||
return nil, errors.New("pkcs7: encryption algorithm parameters are incorrect")
|
||||
}
|
||||
|
||||
plaintext, err := gcm.Open(nil, params.Nonce, cyphertext, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
iv := eci.ContentEncryptionAlgorithm.Parameters.Bytes
|
||||
if len(iv) != block.BlockSize() {
|
||||
return nil, errors.New("pkcs7: encryption algorithm parameters are malformed")
|
||||
}
|
||||
mode := cipher.NewCBCDecrypter(block, iv)
|
||||
plaintext := make([]byte, len(cyphertext))
|
||||
mode.CryptBlocks(plaintext, cyphertext)
|
||||
if plaintext, err = unpad(plaintext, mode.BlockSize()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
func selectRecipientForCertificate(recipients []recipientInfo, cert *x509.Certificate) recipientInfo {
|
||||
for _, recp := range recipients {
|
||||
if isCertMatchForIssuerAndSerial(cert, recp.IssuerAndSerialNumber) {
|
||||
return recp
|
||||
}
|
||||
}
|
||||
return recipientInfo{}
|
||||
}
|
||||
|
||||
func isCertMatchForIssuerAndSerial(cert *x509.Certificate, ias issuerAndSerial) bool {
|
||||
return cert.SerialNumber.Cmp(ias.SerialNumber) == 0 && bytes.Compare(cert.RawIssuer, ias.IssuerName.FullBytes) == 0
|
||||
}
|
||||
|
||||
func pad(data []byte, blocklen int) ([]byte, error) {
|
||||
if blocklen < 1 {
|
||||
return nil, fmt.Errorf("invalid blocklen %d", blocklen)
|
||||
}
|
||||
padlen := blocklen - (len(data) % blocklen)
|
||||
if padlen == 0 {
|
||||
padlen = blocklen
|
||||
}
|
||||
pad := bytes.Repeat([]byte{byte(padlen)}, padlen)
|
||||
return append(data, pad...), nil
|
||||
}
|
||||
|
||||
func unpad(data []byte, blocklen int) ([]byte, error) {
|
||||
if blocklen < 1 {
|
||||
return nil, fmt.Errorf("invalid blocklen %d", blocklen)
|
||||
}
|
||||
if len(data)%blocklen != 0 || len(data) == 0 {
|
||||
return nil, fmt.Errorf("invalid data len %d", len(data))
|
||||
}
|
||||
|
||||
// the last byte is the length of padding
|
||||
padlen := int(data[len(data)-1])
|
||||
|
||||
// check padding integrity, all bytes should be the same
|
||||
pad := data[len(data)-padlen:]
|
||||
for _, padbyte := range pad {
|
||||
if padbyte != byte(padlen) {
|
||||
return nil, errors.New("invalid padding")
|
||||
}
|
||||
}
|
||||
|
||||
return data[:len(data)-padlen], nil
|
||||
}
|
||||
|
||||
func unmarshalAttribute(attrs []attribute, attributeType asn1.ObjectIdentifier, out interface{}) error {
|
||||
for _, attr := range attrs {
|
||||
if attr.Type.Equal(attributeType) {
|
||||
_, err := asn1.Unmarshal(attr.Value.Bytes, out)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return errors.New("pkcs7: attribute type not in attributes")
|
||||
}
|
||||
|
||||
// UnmarshalSignedAttribute decodes a single attribute from the signer info
|
||||
func (p7 *PKCS7) UnmarshalSignedAttribute(attributeType asn1.ObjectIdentifier, out interface{}) error {
|
||||
sd, ok := p7.raw.(signedData)
|
||||
if !ok {
|
||||
return errors.New("pkcs7: payload is not signedData content")
|
||||
}
|
||||
if len(sd.SignerInfos) < 1 {
|
||||
return errors.New("pkcs7: payload has no signers")
|
||||
}
|
||||
attributes := sd.SignerInfos[0].AuthenticatedAttributes
|
||||
return unmarshalAttribute(attributes, attributeType, out)
|
||||
}
|
||||
|
||||
// SignedData is an opaque data structure for creating signed data payloads
|
||||
type SignedData struct {
|
||||
sd signedData
|
||||
certs []*x509.Certificate
|
||||
messageDigest []byte
|
||||
}
|
||||
|
||||
// Attribute represents a key value pair attribute. Value must be marshalable byte
|
||||
// `encoding/asn1`
|
||||
type Attribute struct {
|
||||
Type asn1.ObjectIdentifier
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// SignerInfoConfig are optional values to include when adding a signer
|
||||
type SignerInfoConfig struct {
|
||||
ExtraSignedAttributes []Attribute
|
||||
}
|
||||
|
||||
// NewSignedData initializes a SignedData with content
|
||||
func NewSignedData(data []byte) (*SignedData, error) {
|
||||
content, err := asn1.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ci := contentInfo{
|
||||
ContentType: oidData,
|
||||
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
|
||||
}
|
||||
digAlg := pkix.AlgorithmIdentifier{
|
||||
Algorithm: oidDigestAlgorithmSHA1,
|
||||
}
|
||||
h := crypto.SHA1.New()
|
||||
h.Write(data)
|
||||
md := h.Sum(nil)
|
||||
sd := signedData{
|
||||
ContentInfo: ci,
|
||||
Version: 1,
|
||||
DigestAlgorithmIdentifiers: []pkix.AlgorithmIdentifier{digAlg},
|
||||
}
|
||||
return &SignedData{sd: sd, messageDigest: md}, nil
|
||||
}
|
||||
|
||||
type attributes struct {
|
||||
types []asn1.ObjectIdentifier
|
||||
values []interface{}
|
||||
}
|
||||
|
||||
// Add adds the attribute, maintaining insertion order
|
||||
func (attrs *attributes) Add(attrType asn1.ObjectIdentifier, value interface{}) {
|
||||
attrs.types = append(attrs.types, attrType)
|
||||
attrs.values = append(attrs.values, value)
|
||||
}
|
||||
|
||||
type sortableAttribute struct {
|
||||
SortKey []byte
|
||||
Attribute attribute
|
||||
}
|
||||
|
||||
type attributeSet []sortableAttribute
|
||||
|
||||
func (sa attributeSet) Len() int {
|
||||
return len(sa)
|
||||
}
|
||||
|
||||
func (sa attributeSet) Less(i, j int) bool {
|
||||
return bytes.Compare(sa[i].SortKey, sa[j].SortKey) < 0
|
||||
}
|
||||
|
||||
func (sa attributeSet) Swap(i, j int) {
|
||||
sa[i], sa[j] = sa[j], sa[i]
|
||||
}
|
||||
|
||||
func (sa attributeSet) Attributes() []attribute {
|
||||
attrs := make([]attribute, len(sa))
|
||||
for i, attr := range sa {
|
||||
attrs[i] = attr.Attribute
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
func (attrs *attributes) ForMarshaling() ([]attribute, error) {
|
||||
sortables := make(attributeSet, len(attrs.types))
|
||||
for i := range sortables {
|
||||
attrType := attrs.types[i]
|
||||
attrValue := attrs.values[i]
|
||||
asn1Value, err := asn1.Marshal(attrValue)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attr := attribute{
|
||||
Type: attrType,
|
||||
Value: asn1.RawValue{Tag: 17, IsCompound: true, Bytes: asn1Value}, // 17 == SET tag
|
||||
}
|
||||
encoded, err := asn1.Marshal(attr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sortables[i] = sortableAttribute{
|
||||
SortKey: encoded,
|
||||
Attribute: attr,
|
||||
}
|
||||
}
|
||||
sort.Sort(sortables)
|
||||
return sortables.Attributes(), nil
|
||||
}
|
||||
|
||||
// AddSigner signs attributes about the content and adds certificate to payload
|
||||
func (sd *SignedData) AddSigner(cert *x509.Certificate, pkey crypto.PrivateKey, config SignerInfoConfig) error {
|
||||
attrs := &attributes{}
|
||||
attrs.Add(oidAttributeContentType, sd.sd.ContentInfo.ContentType)
|
||||
attrs.Add(oidAttributeMessageDigest, sd.messageDigest)
|
||||
attrs.Add(oidAttributeSigningTime, time.Now())
|
||||
for _, attr := range config.ExtraSignedAttributes {
|
||||
attrs.Add(attr.Type, attr.Value)
|
||||
}
|
||||
finalAttrs, err := attrs.ForMarshaling()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
signature, err := signAttributes(finalAttrs, pkey, crypto.SHA1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ias, err := cert2issuerAndSerial(cert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
signer := signerInfo{
|
||||
AuthenticatedAttributes: finalAttrs,
|
||||
DigestAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidDigestAlgorithmSHA1},
|
||||
DigestEncryptionAlgorithm: pkix.AlgorithmIdentifier{Algorithm: oidSignatureSHA1WithRSA},
|
||||
IssuerAndSerialNumber: ias,
|
||||
EncryptedDigest: signature,
|
||||
Version: 1,
|
||||
}
|
||||
// create signature of signed attributes
|
||||
sd.certs = append(sd.certs, cert)
|
||||
sd.sd.SignerInfos = append(sd.sd.SignerInfos, signer)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddCertificate adds the certificate to the payload. Useful for parent certificates
|
||||
func (sd *SignedData) AddCertificate(cert *x509.Certificate) {
|
||||
sd.certs = append(sd.certs, cert)
|
||||
}
|
||||
|
||||
// Detach removes content from the signed data struct to make it a detached signature.
|
||||
// This must be called right before Finish()
|
||||
func (sd *SignedData) Detach() {
|
||||
sd.sd.ContentInfo = contentInfo{ContentType: oidData}
|
||||
}
|
||||
|
||||
// Finish marshals the content and its signers
|
||||
func (sd *SignedData) Finish() ([]byte, error) {
|
||||
sd.sd.Certificates = marshalCertificates(sd.certs)
|
||||
inner, err := asn1.Marshal(sd.sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outer := contentInfo{
|
||||
ContentType: oidSignedData,
|
||||
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: inner, IsCompound: true},
|
||||
}
|
||||
return asn1.Marshal(outer)
|
||||
}
|
||||
|
||||
func cert2issuerAndSerial(cert *x509.Certificate) (issuerAndSerial, error) {
|
||||
var ias issuerAndSerial
|
||||
// The issuer RDNSequence has to match exactly the sequence in the certificate
|
||||
// We cannot use cert.Issuer.ToRDNSequence() here since it mangles the sequence
|
||||
ias.IssuerName = asn1.RawValue{FullBytes: cert.RawIssuer}
|
||||
ias.SerialNumber = cert.SerialNumber
|
||||
|
||||
return ias, nil
|
||||
}
|
||||
|
||||
// signs the DER encoded form of the attributes with the private key
|
||||
func signAttributes(attrs []attribute, pkey crypto.PrivateKey, hash crypto.Hash) ([]byte, error) {
|
||||
attrBytes, err := marshalAttributes(attrs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h := hash.New()
|
||||
h.Write(attrBytes)
|
||||
hashed := h.Sum(nil)
|
||||
switch priv := pkey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
return rsa.SignPKCS1v15(rand.Reader, priv, crypto.SHA1, hashed)
|
||||
}
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
||||
|
||||
// concats and wraps the certificates in the RawValue structure
|
||||
func marshalCertificates(certs []*x509.Certificate) rawCertificates {
|
||||
var buf bytes.Buffer
|
||||
for _, cert := range certs {
|
||||
buf.Write(cert.Raw)
|
||||
}
|
||||
rawCerts, _ := marshalCertificateBytes(buf.Bytes())
|
||||
return rawCerts
|
||||
}
|
||||
|
||||
// Even though, the tag & length are stripped out during marshalling the
|
||||
// RawContent, we have to encode it into the RawContent. If its missing,
|
||||
// then `asn1.Marshal()` will strip out the certificate wrapper instead.
|
||||
func marshalCertificateBytes(certs []byte) (rawCertificates, error) {
|
||||
var val = asn1.RawValue{Bytes: certs, Class: 2, Tag: 0, IsCompound: true}
|
||||
b, err := asn1.Marshal(val)
|
||||
if err != nil {
|
||||
return rawCertificates{}, err
|
||||
}
|
||||
return rawCertificates{Raw: b}, nil
|
||||
}
|
||||
|
||||
// DegenerateCertificate creates a signed data structure containing only the
|
||||
// provided certificate or certificate chain.
|
||||
func DegenerateCertificate(cert []byte) ([]byte, error) {
|
||||
rawCert, err := marshalCertificateBytes(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
emptyContent := contentInfo{ContentType: oidData}
|
||||
sd := signedData{
|
||||
Version: 1,
|
||||
ContentInfo: emptyContent,
|
||||
Certificates: rawCert,
|
||||
CRLs: []pkix.CertificateList{},
|
||||
}
|
||||
content, err := asn1.Marshal(sd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signedContent := contentInfo{
|
||||
ContentType: oidSignedData,
|
||||
Content: asn1.RawValue{Class: 2, Tag: 0, Bytes: content, IsCompound: true},
|
||||
}
|
||||
return asn1.Marshal(signedContent)
|
||||
}
|
||||
|
||||
const (
|
||||
EncryptionAlgorithmDESCBC = iota
|
||||
EncryptionAlgorithmAES128GCM
|
||||
)
|
||||
|
||||
// ContentEncryptionAlgorithm determines the algorithm used to encrypt the
|
||||
// plaintext message. Change the value of this variable to change which
|
||||
// algorithm is used in the Encrypt() function.
|
||||
var ContentEncryptionAlgorithm = EncryptionAlgorithmDESCBC
|
||||
|
||||
// ErrUnsupportedEncryptionAlgorithm is returned when attempting to encrypt
|
||||
// content with an unsupported algorithm.
|
||||
var ErrUnsupportedEncryptionAlgorithm = errors.New("pkcs7: cannot encrypt content: only DES-CBC and AES-128-GCM supported")
|
||||
|
||||
const nonceSize = 12
|
||||
|
||||
type aesGCMParameters struct {
|
||||
Nonce []byte `asn1:"tag:4"`
|
||||
ICVLen int
|
||||
}
|
||||
|
||||
func encryptAES128GCM(content []byte) ([]byte, *encryptedContentInfo, error) {
|
||||
// Create AES key and nonce
|
||||
key := make([]byte, 16)
|
||||
nonce := make([]byte, nonceSize)
|
||||
|
||||
_, err := rand.Read(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
_, err = rand.Read(nonce)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Encrypt content
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ciphertext := gcm.Seal(nil, nonce, content, nil)
|
||||
|
||||
// Prepare ASN.1 Encrypted Content Info
|
||||
paramSeq := aesGCMParameters{
|
||||
Nonce: nonce,
|
||||
ICVLen: gcm.Overhead(),
|
||||
}
|
||||
|
||||
paramBytes, err := asn1.Marshal(paramSeq)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
eci := encryptedContentInfo{
|
||||
ContentType: oidData,
|
||||
ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: oidEncryptionAlgorithmAES128GCM,
|
||||
Parameters: asn1.RawValue{
|
||||
Tag: asn1.TagSequence,
|
||||
Bytes: paramBytes,
|
||||
},
|
||||
},
|
||||
EncryptedContent: marshalEncryptedContent(ciphertext),
|
||||
}
|
||||
|
||||
return key, &eci, nil
|
||||
}
|
||||
|
||||
func encryptDESCBC(content []byte) ([]byte, *encryptedContentInfo, error) {
|
||||
// Create DES key & CBC IV
|
||||
key := make([]byte, 8)
|
||||
iv := make([]byte, des.BlockSize)
|
||||
_, err := rand.Read(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
_, err = rand.Read(iv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Encrypt padded content
|
||||
block, err := des.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
mode := cipher.NewCBCEncrypter(block, iv)
|
||||
plaintext, err := pad(content, mode.BlockSize())
|
||||
cyphertext := make([]byte, len(plaintext))
|
||||
mode.CryptBlocks(cyphertext, plaintext)
|
||||
|
||||
// Prepare ASN.1 Encrypted Content Info
|
||||
eci := encryptedContentInfo{
|
||||
ContentType: oidData,
|
||||
ContentEncryptionAlgorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: oidEncryptionAlgorithmDESCBC,
|
||||
Parameters: asn1.RawValue{Tag: 4, Bytes: iv},
|
||||
},
|
||||
EncryptedContent: marshalEncryptedContent(cyphertext),
|
||||
}
|
||||
|
||||
return key, &eci, nil
|
||||
}
|
||||
|
||||
// Encrypt creates and returns an envelope data PKCS7 structure with encrypted
|
||||
// recipient keys for each recipient public key.
|
||||
//
|
||||
// The algorithm used to perform encryption is determined by the current value
|
||||
// of the global ContentEncryptionAlgorithm package variable. By default, the
|
||||
// value is EncryptionAlgorithmDESCBC. To use a different algorithm, change the
|
||||
// value before calling Encrypt(). For example:
|
||||
//
|
||||
// ContentEncryptionAlgorithm = EncryptionAlgorithmAES128GCM
|
||||
//
|
||||
// TODO(fullsailor): Add support for encrypting content with other algorithms
|
||||
func Encrypt(content []byte, recipients []*x509.Certificate) ([]byte, error) {
|
||||
var eci *encryptedContentInfo
|
||||
var key []byte
|
||||
var err error
|
||||
|
||||
// Apply chosen symmetric encryption method
|
||||
switch ContentEncryptionAlgorithm {
|
||||
case EncryptionAlgorithmDESCBC:
|
||||
key, eci, err = encryptDESCBC(content)
|
||||
|
||||
case EncryptionAlgorithmAES128GCM:
|
||||
key, eci, err = encryptAES128GCM(content)
|
||||
|
||||
default:
|
||||
return nil, ErrUnsupportedEncryptionAlgorithm
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare each recipient's encrypted cipher key
|
||||
recipientInfos := make([]recipientInfo, len(recipients))
|
||||
for i, recipient := range recipients {
|
||||
encrypted, err := encryptKey(key, recipient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ias, err := cert2issuerAndSerial(recipient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info := recipientInfo{
|
||||
Version: 0,
|
||||
IssuerAndSerialNumber: ias,
|
||||
KeyEncryptionAlgorithm: pkix.AlgorithmIdentifier{
|
||||
Algorithm: oidEncryptionAlgorithmRSA,
|
||||
},
|
||||
EncryptedKey: encrypted,
|
||||
}
|
||||
recipientInfos[i] = info
|
||||
}
|
||||
|
||||
// Prepare envelope content
|
||||
envelope := envelopedData{
|
||||
EncryptedContentInfo: *eci,
|
||||
Version: 0,
|
||||
RecipientInfos: recipientInfos,
|
||||
}
|
||||
innerContent, err := asn1.Marshal(envelope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Prepare outer payload structure
|
||||
wrapper := contentInfo{
|
||||
ContentType: oidEnvelopedData,
|
||||
Content: asn1.RawValue{Class: 2, Tag: 0, IsCompound: true, Bytes: innerContent},
|
||||
}
|
||||
|
||||
return asn1.Marshal(wrapper)
|
||||
}
|
||||
|
||||
func marshalEncryptedContent(content []byte) asn1.RawValue {
|
||||
asn1Content, _ := asn1.Marshal(content)
|
||||
return asn1.RawValue{Tag: 0, Class: 2, Bytes: asn1Content, IsCompound: true}
|
||||
}
|
||||
|
||||
func encryptKey(key []byte, recipient *x509.Certificate) ([]byte, error) {
|
||||
if pub := recipient.PublicKey.(*rsa.PublicKey); pub != nil {
|
||||
return rsa.EncryptPKCS1v15(rand.Reader, pub, key)
|
||||
}
|
||||
return nil, ErrUnsupportedAlgorithm
|
||||
}
|
133
vendor/github.com/fullsailor/pkcs7/x509.go
generated
vendored
Normal file
133
vendor/github.com/fullsailor/pkcs7/x509.go
generated
vendored
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the go/golang LICENSE file.
|
||||
|
||||
package pkcs7
|
||||
|
||||
// These are private constants and functions from the crypto/x509 package that
|
||||
// are useful when dealing with signatures verified by x509 certificates
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
"encoding/asn1"
|
||||
)
|
||||
|
||||
var (
|
||||
oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2}
|
||||
oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4}
|
||||
oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5}
|
||||
oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11}
|
||||
oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12}
|
||||
oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13}
|
||||
oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10}
|
||||
oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3}
|
||||
oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2}
|
||||
oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1}
|
||||
oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2}
|
||||
oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3}
|
||||
oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4}
|
||||
|
||||
oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1}
|
||||
oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2}
|
||||
oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3}
|
||||
|
||||
oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8}
|
||||
|
||||
// oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA
|
||||
// but it's specified by ISO. Microsoft's makecert.exe has been known
|
||||
// to produce certificates with this OID.
|
||||
oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29}
|
||||
)
|
||||
|
||||
var signatureAlgorithmDetails = []struct {
|
||||
algo x509.SignatureAlgorithm
|
||||
name string
|
||||
oid asn1.ObjectIdentifier
|
||||
pubKeyAlgo x509.PublicKeyAlgorithm
|
||||
hash crypto.Hash
|
||||
}{
|
||||
{x509.MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */},
|
||||
{x509.MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, x509.RSA, crypto.MD5},
|
||||
{x509.SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
|
||||
{x509.SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, x509.RSA, crypto.SHA1},
|
||||
{x509.SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256},
|
||||
{x509.SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384},
|
||||
{x509.SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512},
|
||||
{x509.SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA256},
|
||||
{x509.SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA384},
|
||||
{x509.SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, x509.RSA, crypto.SHA512},
|
||||
{x509.DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1},
|
||||
{x509.DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256},
|
||||
{x509.ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1},
|
||||
{x509.ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256},
|
||||
{x509.ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384},
|
||||
{x509.ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512},
|
||||
}
|
||||
|
||||
// pssParameters reflects the parameters in an AlgorithmIdentifier that
|
||||
// specifies RSA PSS. See https://tools.ietf.org/html/rfc3447#appendix-A.2.3
|
||||
type pssParameters struct {
|
||||
// The following three fields are not marked as
|
||||
// optional because the default values specify SHA-1,
|
||||
// which is no longer suitable for use in signatures.
|
||||
Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"`
|
||||
MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"`
|
||||
SaltLength int `asn1:"explicit,tag:2"`
|
||||
TrailerField int `asn1:"optional,explicit,tag:3,default:1"`
|
||||
}
|
||||
|
||||
// asn1.NullBytes is not available prior to Go 1.9
|
||||
var nullBytes = []byte{5, 0}
|
||||
|
||||
func getSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) x509.SignatureAlgorithm {
|
||||
if !ai.Algorithm.Equal(oidSignatureRSAPSS) {
|
||||
for _, details := range signatureAlgorithmDetails {
|
||||
if ai.Algorithm.Equal(details.oid) {
|
||||
return details.algo
|
||||
}
|
||||
}
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
|
||||
// RSA PSS is special because it encodes important parameters
|
||||
// in the Parameters.
|
||||
|
||||
var params pssParameters
|
||||
if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil {
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
|
||||
var mgf1HashFunc pkix.AlgorithmIdentifier
|
||||
if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil {
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
|
||||
// PSS is greatly overburdened with options. This code forces
|
||||
// them into three buckets by requiring that the MGF1 hash
|
||||
// function always match the message hash function (as
|
||||
// recommended in
|
||||
// https://tools.ietf.org/html/rfc3447#section-8.1), that the
|
||||
// salt length matches the hash length, and that the trailer
|
||||
// field has the default value.
|
||||
if !bytes.Equal(params.Hash.Parameters.FullBytes, nullBytes) ||
|
||||
!params.MGF.Algorithm.Equal(oidMGF1) ||
|
||||
!mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) ||
|
||||
!bytes.Equal(mgf1HashFunc.Parameters.FullBytes, nullBytes) ||
|
||||
params.TrailerField != 1 {
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
||||
|
||||
switch {
|
||||
case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32:
|
||||
return x509.SHA256WithRSAPSS
|
||||
case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48:
|
||||
return x509.SHA384WithRSAPSS
|
||||
case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64:
|
||||
return x509.SHA512WithRSAPSS
|
||||
}
|
||||
|
||||
return x509.UnknownSignatureAlgorithm
|
||||
}
|
2
vendor/github.com/imdario/mergo/.travis.yml
generated
vendored
2
vendor/github.com/imdario/mergo/.travis.yml
generated
vendored
@ -4,4 +4,6 @@ install:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
after_script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
|
||||
|
18
vendor/github.com/imdario/mergo/README.md
generated
vendored
18
vendor/github.com/imdario/mergo/README.md
generated
vendored
@ -13,6 +13,7 @@ It is ready for production use. [It is used in several projects by Docker, Googl
|
||||
[![Build Status][1]][2]
|
||||
[![Coverage Status][7]][8]
|
||||
[![Sourcegraph][9]][10]
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
|
||||
|
||||
[1]: https://travis-ci.org/imdario/mergo.png
|
||||
[2]: https://travis-ci.org/imdario/mergo
|
||||
@ -27,7 +28,7 @@ It is ready for production use. [It is used in several projects by Docker, Googl
|
||||
|
||||
### Latest release
|
||||
|
||||
[Release v0.3.6](https://github.com/imdario/mergo/releases/tag/v0.3.6).
|
||||
[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7).
|
||||
|
||||
### Important note
|
||||
|
||||
@ -217,6 +218,21 @@ If I can help you, you have an idea or you are using Mergo in your projects, don
|
||||
|
||||
Written by [Dario Castañé](http://dario.im).
|
||||
|
||||
## Top Contributors
|
||||
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
|
||||
[](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
|
||||
|
||||
|
||||
## License
|
||||
|
||||
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
|
||||
|
||||
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
|
||||
|
1
vendor/github.com/imdario/mergo/map.go
generated
vendored
1
vendor/github.com/imdario/mergo/map.go
generated
vendored
@ -72,6 +72,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
||||
case reflect.Struct:
|
||||
srcMap := src.Interface().(map[string]interface{})
|
||||
for key := range srcMap {
|
||||
config.overwriteWithEmptyValue = true
|
||||
srcValue := srcMap[key]
|
||||
fieldName := changeInitialCase(key, unicode.ToUpper)
|
||||
dstElement := dst.FieldByName(fieldName)
|
||||
|
57
vendor/github.com/imdario/mergo/merge.go
generated
vendored
57
vendor/github.com/imdario/mergo/merge.go
generated
vendored
@ -26,9 +26,12 @@ func hasExportedField(dst reflect.Value) (exported bool) {
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Overwrite bool
|
||||
AppendSlice bool
|
||||
Transformers Transformers
|
||||
Overwrite bool
|
||||
AppendSlice bool
|
||||
TypeCheck bool
|
||||
Transformers Transformers
|
||||
overwriteWithEmptyValue bool
|
||||
overwriteSliceWithEmptyValue bool
|
||||
}
|
||||
|
||||
type Transformers interface {
|
||||
@ -40,6 +43,10 @@ type Transformers interface {
|
||||
// short circuiting on recursive types.
|
||||
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
|
||||
overwrite := config.Overwrite
|
||||
typeCheck := config.TypeCheck
|
||||
overwriteWithEmptySrc := config.overwriteWithEmptyValue
|
||||
overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
|
||||
config.overwriteWithEmptyValue = false
|
||||
|
||||
if !src.IsValid() {
|
||||
return
|
||||
@ -74,7 +81,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
|
||||
if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
@ -125,22 +132,25 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
dstSlice = reflect.ValueOf(dstElement.Interface())
|
||||
}
|
||||
|
||||
if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
|
||||
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
|
||||
if typeCheck && srcSlice.Type() != dstSlice.Type() {
|
||||
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
|
||||
}
|
||||
dstSlice = srcSlice
|
||||
} else if config.AppendSlice {
|
||||
if srcSlice.Type() != dstSlice.Type() {
|
||||
return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
|
||||
return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
|
||||
}
|
||||
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
|
||||
}
|
||||
dst.SetMapIndex(key, dstSlice)
|
||||
}
|
||||
}
|
||||
if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map {
|
||||
if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
|
||||
continue
|
||||
}
|
||||
|
||||
if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) {
|
||||
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
@ -151,7 +161,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
if !dst.CanSet() {
|
||||
break
|
||||
}
|
||||
if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
|
||||
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
|
||||
dst.Set(src)
|
||||
} else if config.AppendSlice {
|
||||
if src.Type() != dst.Type() {
|
||||
@ -165,11 +175,21 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
if src.IsNil() {
|
||||
break
|
||||
}
|
||||
if src.Kind() != reflect.Interface {
|
||||
|
||||
if dst.Kind() != reflect.Ptr && src.Type().AssignableTo(dst.Type()) {
|
||||
if dst.IsNil() || overwrite {
|
||||
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if src.Kind() != reflect.Interface {
|
||||
if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
|
||||
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
|
||||
dst.Set(src)
|
||||
}
|
||||
} else if src.Kind() == reflect.Ptr {
|
||||
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
@ -191,10 +211,11 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
return
|
||||
}
|
||||
default:
|
||||
if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
|
||||
if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@ -206,7 +227,7 @@ func Merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return merge(dst, src, opts...)
|
||||
}
|
||||
|
||||
// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
|
||||
// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
|
||||
// non-empty src attribute values.
|
||||
// Deprecated: use Merge(…) with WithOverride
|
||||
func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
@ -225,11 +246,21 @@ func WithOverride(config *Config) {
|
||||
config.Overwrite = true
|
||||
}
|
||||
|
||||
// WithAppendSlice will make merge append slices instead of overwriting it
|
||||
// WithOverride will make merge override empty dst slice with empty src slice.
|
||||
func WithOverrideEmptySlice(config *Config) {
|
||||
config.overwriteSliceWithEmptyValue = true
|
||||
}
|
||||
|
||||
// WithAppendSlice will make merge append slices instead of overwriting it.
|
||||
func WithAppendSlice(config *Config) {
|
||||
config.AppendSlice = true
|
||||
}
|
||||
|
||||
// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
|
||||
func WithTypeCheck(config *Config) {
|
||||
config.TypeCheck = true
|
||||
}
|
||||
|
||||
func merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
|
42
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
42
vendor/github.com/klauspost/compress/flate/crc32_amd64.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
//+build !noasm
|
||||
//+build !appengine
|
||||
//+build !gccgo
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"github.com/klauspost/cpuid"
|
||||
)
|
||||
|
||||
// crc32sse returns a hash for the first 4 bytes of the slice
|
||||
// len(a) must be >= 4.
|
||||
//go:noescape
|
||||
func crc32sse(a []byte) uint32
|
||||
|
||||
// crc32sseAll calculates hashes for each 4-byte set in a.
|
||||
// dst must be east len(a) - 4 in size.
|
||||
// The size is not checked by the assembly.
|
||||
//go:noescape
|
||||
func crc32sseAll(a []byte, dst []uint32)
|
||||
|
||||
// matchLenSSE4 returns the number of matching bytes in a and b
|
||||
// up to length 'max'. Both slices must be at least 'max'
|
||||
// bytes in size.
|
||||
//
|
||||
// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions.
|
||||
//
|
||||
//go:noescape
|
||||
func matchLenSSE4(a, b []byte, max int) int
|
||||
|
||||
// histogram accumulates a histogram of b in h.
|
||||
// h must be at least 256 entries in length,
|
||||
// and must be cleared before calling this function.
|
||||
//go:noescape
|
||||
func histogram(b []byte, h []int32)
|
||||
|
||||
// Detect SSE 4.2 feature.
|
||||
func init() {
|
||||
useSSE42 = cpuid.CPU.SSE42()
|
||||
}
|
214
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
214
vendor/github.com/klauspost/compress/flate/crc32_amd64.s
generated
vendored
@ -1,214 +0,0 @@
|
||||
//+build !noasm
|
||||
//+build !appengine
|
||||
//+build !gccgo
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
// func crc32sse(a []byte) uint32
|
||||
TEXT ·crc32sse(SB), 4, $0
|
||||
MOVQ a+0(FP), R10
|
||||
XORQ BX, BX
|
||||
|
||||
// CRC32 dword (R10), EBX
|
||||
BYTE $0xF2; BYTE $0x41; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0x1a
|
||||
|
||||
MOVL BX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func crc32sseAll(a []byte, dst []uint32)
|
||||
TEXT ·crc32sseAll(SB), 4, $0
|
||||
MOVQ a+0(FP), R8 // R8: src
|
||||
MOVQ a_len+8(FP), R10 // input length
|
||||
MOVQ dst+24(FP), R9 // R9: dst
|
||||
SUBQ $4, R10
|
||||
JS end
|
||||
JZ one_crc
|
||||
MOVQ R10, R13
|
||||
SHRQ $2, R10 // len/4
|
||||
ANDQ $3, R13 // len&3
|
||||
XORQ BX, BX
|
||||
ADDQ $1, R13
|
||||
TESTQ R10, R10
|
||||
JZ rem_loop
|
||||
|
||||
crc_loop:
|
||||
MOVQ (R8), R11
|
||||
XORQ BX, BX
|
||||
XORQ DX, DX
|
||||
XORQ DI, DI
|
||||
MOVQ R11, R12
|
||||
SHRQ $8, R11
|
||||
MOVQ R12, AX
|
||||
MOVQ R11, CX
|
||||
SHRQ $16, R12
|
||||
SHRQ $16, R11
|
||||
MOVQ R12, SI
|
||||
|
||||
// CRC32 EAX, EBX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
|
||||
// CRC32 ECX, EDX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd1
|
||||
|
||||
// CRC32 ESI, EDI
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xfe
|
||||
MOVL BX, (R9)
|
||||
MOVL DX, 4(R9)
|
||||
MOVL DI, 8(R9)
|
||||
|
||||
XORQ BX, BX
|
||||
MOVL R11, AX
|
||||
|
||||
// CRC32 EAX, EBX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
MOVL BX, 12(R9)
|
||||
|
||||
ADDQ $16, R9
|
||||
ADDQ $4, R8
|
||||
XORQ BX, BX
|
||||
SUBQ $1, R10
|
||||
JNZ crc_loop
|
||||
|
||||
rem_loop:
|
||||
MOVL (R8), AX
|
||||
|
||||
// CRC32 EAX, EBX
|
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
|
||||
MOVL BX, (R9)
|
||||
ADDQ $4, R9
|
||||
ADDQ $1, R8
|
||||
XORQ BX, BX
|
||||
SUBQ $1, R13
|
||||
JNZ rem_loop
|
||||
|
||||
end:
|
||||
RET
|
||||
|
||||
one_crc:
|
||||
MOVQ $1, R13
|
||||
XORQ BX, BX
|
||||
JMP rem_loop
|
||||
|
||||
// func matchLenSSE4(a, b []byte, max int) int
|
||||
TEXT ·matchLenSSE4(SB), 4, $0
|
||||
MOVQ a_base+0(FP), SI
|
||||
MOVQ b_base+24(FP), DI
|
||||
MOVQ DI, DX
|
||||
MOVQ max+48(FP), CX
|
||||
|
||||
cmp8:
|
||||
// As long as we are 8 or more bytes before the end of max, we can load and
|
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
|
||||
CMPQ CX, $8
|
||||
JLT cmp1
|
||||
MOVQ (SI), AX
|
||||
MOVQ (DI), BX
|
||||
CMPQ AX, BX
|
||||
JNE bsf
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
SUBQ $8, CX
|
||||
JMP cmp8
|
||||
|
||||
bsf:
|
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return
|
||||
// the index of the first byte that differs. The BSF instruction finds the
|
||||
// least significant 1 bit, the amd64 architecture is little-endian, and
|
||||
// the shift by 3 converts a bit index to a byte index.
|
||||
XORQ AX, BX
|
||||
BSFQ BX, BX
|
||||
SHRQ $3, BX
|
||||
ADDQ BX, DI
|
||||
|
||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
|
||||
SUBQ DX, DI
|
||||
MOVQ DI, ret+56(FP)
|
||||
RET
|
||||
|
||||
cmp1:
|
||||
// In the slices' tail, compare 1 byte at a time.
|
||||
CMPQ CX, $0
|
||||
JEQ matchLenEnd
|
||||
MOVB (SI), AX
|
||||
MOVB (DI), BX
|
||||
CMPB AX, BX
|
||||
JNE matchLenEnd
|
||||
ADDQ $1, SI
|
||||
ADDQ $1, DI
|
||||
SUBQ $1, CX
|
||||
JMP cmp1
|
||||
|
||||
matchLenEnd:
|
||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return.
|
||||
SUBQ DX, DI
|
||||
MOVQ DI, ret+56(FP)
|
||||
RET
|
||||
|
||||
// func histogram(b []byte, h []int32)
|
||||
TEXT ·histogram(SB), 4, $0
|
||||
MOVQ b+0(FP), SI // SI: &b
|
||||
MOVQ b_len+8(FP), R9 // R9: len(b)
|
||||
MOVQ h+24(FP), DI // DI: Histogram
|
||||
MOVQ R9, R8
|
||||
SHRQ $3, R8
|
||||
JZ hist1
|
||||
XORQ R11, R11
|
||||
|
||||
loop_hist8:
|
||||
MOVQ (SI), R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
MOVB R10, R11
|
||||
INCL (DI)(R11*4)
|
||||
SHRQ $8, R10
|
||||
|
||||
INCL (DI)(R10*4)
|
||||
|
||||
ADDQ $8, SI
|
||||
DECQ R8
|
||||
JNZ loop_hist8
|
||||
|
||||
hist1:
|
||||
ANDQ $7, R9
|
||||
JZ end_hist
|
||||
XORQ R10, R10
|
||||
|
||||
loop_hist1:
|
||||
MOVB (SI), R10
|
||||
INCL (DI)(R10*4)
|
||||
INCQ SI
|
||||
DECQ R9
|
||||
JNZ loop_hist1
|
||||
|
||||
end_hist:
|
||||
RET
|
35
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
35
vendor/github.com/klauspost/compress/flate/crc32_noasm.go
generated
vendored
@ -1,35 +0,0 @@
|
||||
//+build !amd64 noasm appengine gccgo
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
package flate
|
||||
|
||||
func init() {
|
||||
useSSE42 = false
|
||||
}
|
||||
|
||||
// crc32sse should never be called.
|
||||
func crc32sse(a []byte) uint32 {
|
||||
panic("no assembler")
|
||||
}
|
||||
|
||||
// crc32sseAll should never be called.
|
||||
func crc32sseAll(a []byte, dst []uint32) {
|
||||
panic("no assembler")
|
||||
}
|
||||
|
||||
// matchLenSSE4 should never be called.
|
||||
func matchLenSSE4(a, b []byte, max int) int {
|
||||
panic("no assembler")
|
||||
return 0
|
||||
}
|
||||
|
||||
// histogram accumulates a histogram of b in h.
|
||||
//
|
||||
// len(h) must be >= 256, and h's elements must be all zeroes.
|
||||
func histogram(b []byte, h []int32) {
|
||||
h = h[:256]
|
||||
for _, t := range b {
|
||||
h[t]++
|
||||
}
|
||||
}
|
678
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
678
vendor/github.com/klauspost/compress/flate/deflate.go
generated
vendored
@ -50,8 +50,6 @@ const (
|
||||
skipNever = math.MaxInt32
|
||||
)
|
||||
|
||||
var useSSE42 bool
|
||||
|
||||
type compressionLevel struct {
|
||||
good, lazy, nice, chain, fastSkipHashing, level int
|
||||
}
|
||||
@ -97,9 +95,8 @@ type advancedState struct {
|
||||
hashOffset int
|
||||
|
||||
// input window: unprocessed data is window[index:windowEnd]
|
||||
index int
|
||||
bulkHasher func([]byte, []uint32)
|
||||
hashMatch [maxMatchLength + minMatchLength]uint32
|
||||
index int
|
||||
hashMatch [maxMatchLength + minMatchLength]uint32
|
||||
}
|
||||
|
||||
type compressor struct {
|
||||
@ -120,7 +117,7 @@ type compressor struct {
|
||||
|
||||
// queued output tokens
|
||||
tokens tokens
|
||||
snap fastEnc
|
||||
fast fastEnc
|
||||
state *advancedState
|
||||
}
|
||||
|
||||
@ -164,14 +161,14 @@ func (d *compressor) fillDeflate(b []byte) int {
|
||||
return n
|
||||
}
|
||||
|
||||
func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
|
||||
func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
|
||||
if index > 0 || eof {
|
||||
var window []byte
|
||||
if d.blockStart <= index {
|
||||
window = d.window[d.blockStart:index]
|
||||
}
|
||||
d.blockStart = index
|
||||
d.w.writeBlock(tok.tokens[:tok.n], eof, window)
|
||||
d.w.writeBlock(tok, eof, window)
|
||||
return d.w.err
|
||||
}
|
||||
return nil
|
||||
@ -180,20 +177,20 @@ func (d *compressor) writeBlock(tok tokens, index int, eof bool) error {
|
||||
// writeBlockSkip writes the current block and uses the number of tokens
|
||||
// to determine if the block should be stored on no matches, or
|
||||
// only huffman encoded.
|
||||
func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
|
||||
func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
|
||||
if index > 0 || eof {
|
||||
if d.blockStart <= index {
|
||||
window := d.window[d.blockStart:index]
|
||||
// If we removed less than a 64th of all literals
|
||||
// we huffman compress the block.
|
||||
if int(tok.n) > len(window)-int(tok.n>>6) {
|
||||
d.w.writeBlockHuff(eof, window)
|
||||
d.w.writeBlockHuff(eof, window, d.sync)
|
||||
} else {
|
||||
// Write a dynamic huffman block.
|
||||
d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window)
|
||||
d.w.writeBlockDynamic(tok, eof, window, d.sync)
|
||||
}
|
||||
} else {
|
||||
d.w.writeBlock(tok.tokens[:tok.n], eof, nil)
|
||||
d.w.writeBlock(tok, eof, nil)
|
||||
}
|
||||
d.blockStart = index
|
||||
return d.w.err
|
||||
@ -208,8 +205,16 @@ func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error {
|
||||
func (d *compressor) fillWindow(b []byte) {
|
||||
// Do not fill window if we are in store-only mode,
|
||||
// use constant or Snappy compression.
|
||||
switch d.compressionLevel.level {
|
||||
case 0, 1, 2:
|
||||
if d.level == 0 {
|
||||
return
|
||||
}
|
||||
if d.fast != nil {
|
||||
// encode the last data, but discard the result
|
||||
if len(b) > maxMatchOffset {
|
||||
b = b[len(b)-maxMatchOffset:]
|
||||
}
|
||||
d.fast.Encode(&d.tokens, b)
|
||||
d.tokens.Reset()
|
||||
return
|
||||
}
|
||||
s := d.state
|
||||
@ -236,7 +241,7 @@ func (d *compressor) fillWindow(b []byte) {
|
||||
}
|
||||
|
||||
dst := s.hashMatch[:dstSize]
|
||||
s.bulkHasher(tocheck, dst)
|
||||
bulkHash4(tocheck, dst)
|
||||
var newH uint32
|
||||
for i, val := range dst {
|
||||
di := i + startindex
|
||||
@ -284,62 +289,7 @@ func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead
|
||||
|
||||
for i := prevHead; tries > 0; tries-- {
|
||||
if wEnd == win[i+length] {
|
||||
n := matchLen(win[i:], wPos, minMatchLook)
|
||||
|
||||
if n > length && (n > minMatchLength || pos-i <= 4096) {
|
||||
length = n
|
||||
offset = pos - i
|
||||
ok = true
|
||||
if n >= nice {
|
||||
// The match is good enough that we don't try to find a better one.
|
||||
break
|
||||
}
|
||||
wEnd = win[pos+n]
|
||||
}
|
||||
}
|
||||
if i == minIndex {
|
||||
// hashPrev[i & windowMask] has already been overwritten, so stop now.
|
||||
break
|
||||
}
|
||||
i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
|
||||
if i < minIndex || i < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Try to find a match starting at index whose length is greater than prevSize.
|
||||
// We only look at chainCount possibilities before giving up.
|
||||
// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
|
||||
func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) {
|
||||
minMatchLook := maxMatchLength
|
||||
if lookahead < minMatchLook {
|
||||
minMatchLook = lookahead
|
||||
}
|
||||
|
||||
win := d.window[0 : pos+minMatchLook]
|
||||
|
||||
// We quit when we get a match that's at least nice long
|
||||
nice := len(win) - pos
|
||||
if d.nice < nice {
|
||||
nice = d.nice
|
||||
}
|
||||
|
||||
// If we've got a match that's good enough, only look in 1/4 the chain.
|
||||
tries := d.chain
|
||||
length = prevLength
|
||||
if length >= d.good {
|
||||
tries >>= 2
|
||||
}
|
||||
|
||||
wEnd := win[pos+length]
|
||||
wPos := win[pos:]
|
||||
minIndex := pos - windowSize
|
||||
|
||||
for i := prevHead; tries > 0; tries-- {
|
||||
if wEnd == win[i+length] {
|
||||
n := matchLenSSE4(win[i:], wPos, minMatchLook)
|
||||
n := matchLen(win[i:i+minMatchLook], wPos)
|
||||
|
||||
if n > length && (n > minMatchLength || pos-i <= 4096) {
|
||||
length = n
|
||||
@ -372,44 +322,29 @@ func (d *compressor) writeStoredBlock(buf []byte) error {
|
||||
return d.w.err
|
||||
}
|
||||
|
||||
const hashmul = 0x1e35a7bd
|
||||
|
||||
// hash4 returns a hash representation of the first 4 bytes
|
||||
// of the supplied slice.
|
||||
// The caller must ensure that len(b) >= 4.
|
||||
func hash4(b []byte) uint32 {
|
||||
return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits)
|
||||
b = b[:4]
|
||||
return hash4u(uint32(b[3])|uint32(b[2])<<8|uint32(b[1])<<16|uint32(b[0])<<24, hashBits)
|
||||
}
|
||||
|
||||
// bulkHash4 will compute hashes using the same
|
||||
// algorithm as hash4
|
||||
func bulkHash4(b []byte, dst []uint32) {
|
||||
if len(b) < minMatchLength {
|
||||
if len(b) < 4 {
|
||||
return
|
||||
}
|
||||
hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
|
||||
dst[0] = (hb * hashmul) >> (32 - hashBits)
|
||||
end := len(b) - minMatchLength + 1
|
||||
dst[0] = hash4u(hb, hashBits)
|
||||
end := len(b) - 4 + 1
|
||||
for i := 1; i < end; i++ {
|
||||
hb = (hb << 8) | uint32(b[i+3])
|
||||
dst[i] = (hb * hashmul) >> (32 - hashBits)
|
||||
dst[i] = hash4u(hb, hashBits)
|
||||
}
|
||||
}
|
||||
|
||||
// matchLen returns the number of matching bytes in a and b
|
||||
// up to length 'max'. Both slices must be at least 'max'
|
||||
// bytes in size.
|
||||
func matchLen(a, b []byte, max int) int {
|
||||
a = a[:max]
|
||||
b = b[:len(a)]
|
||||
for i, av := range a {
|
||||
if b[i] != av {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func (d *compressor) initDeflate() {
|
||||
d.window = make([]byte, 2*windowSize)
|
||||
d.byteAvailable = false
|
||||
@ -424,149 +359,6 @@ func (d *compressor) initDeflate() {
|
||||
s.offset = 0
|
||||
s.hash = 0
|
||||
s.chainHead = -1
|
||||
s.bulkHasher = bulkHash4
|
||||
if useSSE42 {
|
||||
s.bulkHasher = crc32sseAll
|
||||
}
|
||||
}
|
||||
|
||||
// Assumes that d.fastSkipHashing != skipNever,
|
||||
// otherwise use deflateLazy
|
||||
func (d *compressor) deflate() {
|
||||
s := d.state
|
||||
// Sanity enables additional runtime tests.
|
||||
// It's intended to be used during development
|
||||
// to supplement the currently ad-hoc unit tests.
|
||||
const sanity = false
|
||||
|
||||
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
|
||||
return
|
||||
}
|
||||
|
||||
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
|
||||
if s.index < s.maxInsertIndex {
|
||||
s.hash = hash4(d.window[s.index : s.index+minMatchLength])
|
||||
}
|
||||
|
||||
for {
|
||||
if sanity && s.index > d.windowEnd {
|
||||
panic("index > windowEnd")
|
||||
}
|
||||
lookahead := d.windowEnd - s.index
|
||||
if lookahead < minMatchLength+maxMatchLength {
|
||||
if !d.sync {
|
||||
return
|
||||
}
|
||||
if sanity && s.index > d.windowEnd {
|
||||
panic("index > windowEnd")
|
||||
}
|
||||
if lookahead == 0 {
|
||||
if d.tokens.n > 0 {
|
||||
if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if s.index < s.maxInsertIndex {
|
||||
// Update the hash
|
||||
s.hash = hash4(d.window[s.index : s.index+minMatchLength])
|
||||
ch := s.hashHead[s.hash&hashMask]
|
||||
s.chainHead = int(ch)
|
||||
s.hashPrev[s.index&windowMask] = ch
|
||||
s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset)
|
||||
}
|
||||
s.length = minMatchLength - 1
|
||||
s.offset = 0
|
||||
minIndex := s.index - windowSize
|
||||
if minIndex < 0 {
|
||||
minIndex = 0
|
||||
}
|
||||
|
||||
if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 {
|
||||
if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
|
||||
s.length = newLength
|
||||
s.offset = newOffset
|
||||
}
|
||||
}
|
||||
if s.length >= minMatchLength {
|
||||
s.ii = 0
|
||||
// There was a match at the previous step, and the current match is
|
||||
// not better. Output the previous match.
|
||||
// "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3
|
||||
d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize))
|
||||
d.tokens.n++
|
||||
// Insert in the hash table all strings up to the end of the match.
|
||||
// index and index-1 are already inserted. If there is not enough
|
||||
// lookahead, the last two strings are not inserted into the hash
|
||||
// table.
|
||||
if s.length <= d.fastSkipHashing {
|
||||
var newIndex int
|
||||
newIndex = s.index + s.length
|
||||
// Calculate missing hashes
|
||||
end := newIndex
|
||||
if end > s.maxInsertIndex {
|
||||
end = s.maxInsertIndex
|
||||
}
|
||||
end += minMatchLength - 1
|
||||
startindex := s.index + 1
|
||||
if startindex > s.maxInsertIndex {
|
||||
startindex = s.maxInsertIndex
|
||||
}
|
||||
tocheck := d.window[startindex:end]
|
||||
dstSize := len(tocheck) - minMatchLength + 1
|
||||
if dstSize > 0 {
|
||||
dst := s.hashMatch[:dstSize]
|
||||
bulkHash4(tocheck, dst)
|
||||
var newH uint32
|
||||
for i, val := range dst {
|
||||
di := i + startindex
|
||||
newH = val & hashMask
|
||||
// Get previous value with the same hash.
|
||||
// Our chain should point to the previous value.
|
||||
s.hashPrev[di&windowMask] = s.hashHead[newH]
|
||||
// Set the head of the hash chain to us.
|
||||
s.hashHead[newH] = uint32(di + s.hashOffset)
|
||||
}
|
||||
s.hash = newH
|
||||
}
|
||||
s.index = newIndex
|
||||
} else {
|
||||
// For matches this long, we don't bother inserting each individual
|
||||
// item into the table.
|
||||
s.index += s.length
|
||||
if s.index < s.maxInsertIndex {
|
||||
s.hash = hash4(d.window[s.index : s.index+minMatchLength])
|
||||
}
|
||||
}
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
// The block includes the current character
|
||||
if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
} else {
|
||||
s.ii++
|
||||
end := s.index + int(s.ii>>uint(d.fastSkipHashing)) + 1
|
||||
if end > d.windowEnd {
|
||||
end = d.windowEnd
|
||||
}
|
||||
for i := s.index; i < end; i++ {
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
|
||||
d.tokens.n++
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
}
|
||||
s.index = end
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
|
||||
@ -603,15 +395,14 @@ func (d *compressor) deflateLazy() {
|
||||
// Flush current output block if any.
|
||||
if d.byteAvailable {
|
||||
// There is still one pending token that needs to be flushed
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
|
||||
d.tokens.n++
|
||||
d.tokens.AddLiteral(d.window[s.index-1])
|
||||
d.byteAvailable = false
|
||||
}
|
||||
if d.tokens.n > 0 {
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
d.tokens.Reset()
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -642,8 +433,7 @@ func (d *compressor) deflateLazy() {
|
||||
if prevLength >= minMatchLength && s.length <= prevLength {
|
||||
// There was a match at the previous step, and the current match is
|
||||
// not better. Output the previous match.
|
||||
d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
|
||||
d.tokens.n++
|
||||
d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
|
||||
|
||||
// Insert in the hash table all strings up to the end of the match.
|
||||
// index and index-1 are already inserted. If there is not enough
|
||||
@ -684,10 +474,10 @@ func (d *compressor) deflateLazy() {
|
||||
s.length = minMatchLength - 1
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
// The block includes the current character
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
d.tokens.Reset()
|
||||
}
|
||||
} else {
|
||||
// Reset, if we got a match this run.
|
||||
@ -697,13 +487,12 @@ func (d *compressor) deflateLazy() {
|
||||
// We have a byte waiting. Emit it.
|
||||
if d.byteAvailable {
|
||||
s.ii++
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
|
||||
d.tokens.n++
|
||||
d.tokens.AddLiteral(d.window[s.index-1])
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
d.tokens.Reset()
|
||||
}
|
||||
s.index++
|
||||
|
||||
@ -716,343 +505,24 @@ func (d *compressor) deflateLazy() {
|
||||
break
|
||||
}
|
||||
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
|
||||
d.tokens.n++
|
||||
d.tokens.AddLiteral(d.window[s.index-1])
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
d.tokens.Reset()
|
||||
}
|
||||
s.index++
|
||||
}
|
||||
// Flush last byte
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
|
||||
d.tokens.n++
|
||||
d.tokens.AddLiteral(d.window[s.index-1])
|
||||
d.byteAvailable = false
|
||||
// s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
}
|
||||
} else {
|
||||
s.index++
|
||||
d.byteAvailable = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Assumes that d.fastSkipHashing != skipNever,
|
||||
// otherwise use deflateLazySSE
|
||||
func (d *compressor) deflateSSE() {
|
||||
s := d.state
|
||||
// Sanity enables additional runtime tests.
|
||||
// It's intended to be used during development
|
||||
// to supplement the currently ad-hoc unit tests.
|
||||
const sanity = false
|
||||
|
||||
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
|
||||
return
|
||||
}
|
||||
|
||||
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
|
||||
if s.index < s.maxInsertIndex {
|
||||
s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
|
||||
}
|
||||
|
||||
for {
|
||||
if sanity && s.index > d.windowEnd {
|
||||
panic("index > windowEnd")
|
||||
}
|
||||
lookahead := d.windowEnd - s.index
|
||||
if lookahead < minMatchLength+maxMatchLength {
|
||||
if !d.sync {
|
||||
return
|
||||
}
|
||||
if sanity && s.index > d.windowEnd {
|
||||
panic("index > windowEnd")
|
||||
}
|
||||
if lookahead == 0 {
|
||||
if d.tokens.n > 0 {
|
||||
if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if s.index < s.maxInsertIndex {
|
||||
// Update the hash
|
||||
s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
|
||||
ch := s.hashHead[s.hash]
|
||||
s.chainHead = int(ch)
|
||||
s.hashPrev[s.index&windowMask] = ch
|
||||
s.hashHead[s.hash] = uint32(s.index + s.hashOffset)
|
||||
}
|
||||
s.length = minMatchLength - 1
|
||||
s.offset = 0
|
||||
minIndex := s.index - windowSize
|
||||
if minIndex < 0 {
|
||||
minIndex = 0
|
||||
}
|
||||
|
||||
if s.chainHead-s.hashOffset >= minIndex && lookahead > minMatchLength-1 {
|
||||
if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
|
||||
s.length = newLength
|
||||
s.offset = newOffset
|
||||
}
|
||||
}
|
||||
if s.length >= minMatchLength {
|
||||
s.ii = 0
|
||||
// There was a match at the previous step, and the current match is
|
||||
// not better. Output the previous match.
|
||||
// "s.length-3" should NOT be "s.length-minMatchLength", since the format always assume 3
|
||||
d.tokens.tokens[d.tokens.n] = matchToken(uint32(s.length-3), uint32(s.offset-minOffsetSize))
|
||||
d.tokens.n++
|
||||
// Insert in the hash table all strings up to the end of the match.
|
||||
// index and index-1 are already inserted. If there is not enough
|
||||
// lookahead, the last two strings are not inserted into the hash
|
||||
// table.
|
||||
if s.length <= d.fastSkipHashing {
|
||||
var newIndex int
|
||||
newIndex = s.index + s.length
|
||||
// Calculate missing hashes
|
||||
end := newIndex
|
||||
if end > s.maxInsertIndex {
|
||||
end = s.maxInsertIndex
|
||||
}
|
||||
end += minMatchLength - 1
|
||||
startindex := s.index + 1
|
||||
if startindex > s.maxInsertIndex {
|
||||
startindex = s.maxInsertIndex
|
||||
}
|
||||
tocheck := d.window[startindex:end]
|
||||
dstSize := len(tocheck) - minMatchLength + 1
|
||||
if dstSize > 0 {
|
||||
dst := s.hashMatch[:dstSize]
|
||||
|
||||
crc32sseAll(tocheck, dst)
|
||||
var newH uint32
|
||||
for i, val := range dst {
|
||||
di := i + startindex
|
||||
newH = val & hashMask
|
||||
// Get previous value with the same hash.
|
||||
// Our chain should point to the previous value.
|
||||
s.hashPrev[di&windowMask] = s.hashHead[newH]
|
||||
// Set the head of the hash chain to us.
|
||||
s.hashHead[newH] = uint32(di + s.hashOffset)
|
||||
}
|
||||
s.hash = newH
|
||||
}
|
||||
s.index = newIndex
|
||||
} else {
|
||||
// For matches this long, we don't bother inserting each individual
|
||||
// item into the table.
|
||||
s.index += s.length
|
||||
if s.index < s.maxInsertIndex {
|
||||
s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
|
||||
}
|
||||
}
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
// The block includes the current character
|
||||
if d.err = d.writeBlockSkip(d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
} else {
|
||||
s.ii++
|
||||
end := s.index + int(s.ii>>5) + 1
|
||||
if end > d.windowEnd {
|
||||
end = d.windowEnd
|
||||
}
|
||||
for i := s.index; i < end; i++ {
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i]))
|
||||
d.tokens.n++
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
}
|
||||
s.index = end
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
|
||||
// meaning it always has lazy matching on.
|
||||
func (d *compressor) deflateLazySSE() {
|
||||
s := d.state
|
||||
// Sanity enables additional runtime tests.
|
||||
// It's intended to be used during development
|
||||
// to supplement the currently ad-hoc unit tests.
|
||||
const sanity = false
|
||||
|
||||
if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
|
||||
return
|
||||
}
|
||||
|
||||
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
|
||||
if s.index < s.maxInsertIndex {
|
||||
s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
|
||||
}
|
||||
|
||||
for {
|
||||
if sanity && s.index > d.windowEnd {
|
||||
panic("index > windowEnd")
|
||||
}
|
||||
lookahead := d.windowEnd - s.index
|
||||
if lookahead < minMatchLength+maxMatchLength {
|
||||
if !d.sync {
|
||||
return
|
||||
}
|
||||
if sanity && s.index > d.windowEnd {
|
||||
panic("index > windowEnd")
|
||||
}
|
||||
if lookahead == 0 {
|
||||
// Flush current output block if any.
|
||||
if d.byteAvailable {
|
||||
// There is still one pending token that needs to be flushed
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
|
||||
d.tokens.n++
|
||||
d.byteAvailable = false
|
||||
}
|
||||
if d.tokens.n > 0 {
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if s.index < s.maxInsertIndex {
|
||||
// Update the hash
|
||||
s.hash = crc32sse(d.window[s.index:s.index+minMatchLength]) & hashMask
|
||||
ch := s.hashHead[s.hash]
|
||||
s.chainHead = int(ch)
|
||||
s.hashPrev[s.index&windowMask] = ch
|
||||
s.hashHead[s.hash] = uint32(s.index + s.hashOffset)
|
||||
}
|
||||
prevLength := s.length
|
||||
prevOffset := s.offset
|
||||
s.length = minMatchLength - 1
|
||||
s.offset = 0
|
||||
minIndex := s.index - windowSize
|
||||
if minIndex < 0 {
|
||||
minIndex = 0
|
||||
}
|
||||
|
||||
if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
|
||||
if newLength, newOffset, ok := d.findMatchSSE(s.index, s.chainHead-s.hashOffset, minMatchLength-1, lookahead); ok {
|
||||
s.length = newLength
|
||||
s.offset = newOffset
|
||||
}
|
||||
}
|
||||
if prevLength >= minMatchLength && s.length <= prevLength {
|
||||
// There was a match at the previous step, and the current match is
|
||||
// not better. Output the previous match.
|
||||
d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
|
||||
d.tokens.n++
|
||||
|
||||
// Insert in the hash table all strings up to the end of the match.
|
||||
// index and index-1 are already inserted. If there is not enough
|
||||
// lookahead, the last two strings are not inserted into the hash
|
||||
// table.
|
||||
var newIndex int
|
||||
newIndex = s.index + prevLength - 1
|
||||
// Calculate missing hashes
|
||||
end := newIndex
|
||||
if end > s.maxInsertIndex {
|
||||
end = s.maxInsertIndex
|
||||
}
|
||||
end += minMatchLength - 1
|
||||
startindex := s.index + 1
|
||||
if startindex > s.maxInsertIndex {
|
||||
startindex = s.maxInsertIndex
|
||||
}
|
||||
tocheck := d.window[startindex:end]
|
||||
dstSize := len(tocheck) - minMatchLength + 1
|
||||
if dstSize > 0 {
|
||||
dst := s.hashMatch[:dstSize]
|
||||
crc32sseAll(tocheck, dst)
|
||||
var newH uint32
|
||||
for i, val := range dst {
|
||||
di := i + startindex
|
||||
newH = val & hashMask
|
||||
// Get previous value with the same hash.
|
||||
// Our chain should point to the previous value.
|
||||
s.hashPrev[di&windowMask] = s.hashHead[newH]
|
||||
// Set the head of the hash chain to us.
|
||||
s.hashHead[newH] = uint32(di + s.hashOffset)
|
||||
}
|
||||
s.hash = newH
|
||||
}
|
||||
|
||||
s.index = newIndex
|
||||
d.byteAvailable = false
|
||||
s.length = minMatchLength - 1
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
// The block includes the current character
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
} else {
|
||||
// Reset, if we got a match this run.
|
||||
if s.length >= minMatchLength {
|
||||
s.ii = 0
|
||||
}
|
||||
// We have a byte waiting. Emit it.
|
||||
if d.byteAvailable {
|
||||
s.ii++
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
|
||||
d.tokens.n++
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
s.index++
|
||||
|
||||
// If we have a long run of no matches, skip additional bytes
|
||||
// Resets when s.ii overflows after 64KB.
|
||||
if s.ii > 31 {
|
||||
n := int(s.ii >> 6)
|
||||
for j := 0; j < n; j++ {
|
||||
if s.index >= d.windowEnd-1 {
|
||||
break
|
||||
}
|
||||
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
|
||||
d.tokens.n++
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
}
|
||||
s.index++
|
||||
}
|
||||
// Flush last byte
|
||||
d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[s.index-1]))
|
||||
d.tokens.n++
|
||||
d.byteAvailable = false
|
||||
// s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
|
||||
if d.tokens.n == maxFlateBlockTokens {
|
||||
if d.err = d.writeBlock(d.tokens, s.index, false); d.err != nil {
|
||||
return
|
||||
}
|
||||
d.tokens.n = 0
|
||||
d.tokens.Reset()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -1085,17 +555,17 @@ func (d *compressor) storeHuff() {
|
||||
if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
|
||||
return
|
||||
}
|
||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
|
||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
|
||||
d.err = d.w.err
|
||||
d.windowEnd = 0
|
||||
}
|
||||
|
||||
// storeHuff will compress and store the currently added data,
|
||||
// storeFast will compress and store the currently added data,
|
||||
// if enough has been accumulated or we at the end of the stream.
|
||||
// Any error that occurred will be in d.err
|
||||
func (d *compressor) storeSnappy() {
|
||||
func (d *compressor) storeFast() {
|
||||
// We only compress if we have maxStoreBlockSize.
|
||||
if d.windowEnd < maxStoreBlockSize {
|
||||
if d.windowEnd < len(d.window) {
|
||||
if !d.sync {
|
||||
return
|
||||
}
|
||||
@ -1106,32 +576,30 @@ func (d *compressor) storeSnappy() {
|
||||
}
|
||||
if d.windowEnd <= 32 {
|
||||
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
|
||||
d.tokens.n = 0
|
||||
d.windowEnd = 0
|
||||
} else {
|
||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
|
||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
|
||||
d.err = d.w.err
|
||||
}
|
||||
d.tokens.n = 0
|
||||
d.tokens.Reset()
|
||||
d.windowEnd = 0
|
||||
d.snap.Reset()
|
||||
d.fast.Reset()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
d.snap.Encode(&d.tokens, d.window[:d.windowEnd])
|
||||
d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
|
||||
// If we made zero matches, store the block as is.
|
||||
if int(d.tokens.n) == d.windowEnd {
|
||||
if d.tokens.n == 0 {
|
||||
d.err = d.writeStoredBlock(d.window[:d.windowEnd])
|
||||
// If we removed less than 1/16th, huffman compress the block.
|
||||
} else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
|
||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd])
|
||||
d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
|
||||
d.err = d.w.err
|
||||
} else {
|
||||
d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd])
|
||||
d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
|
||||
d.err = d.w.err
|
||||
}
|
||||
d.tokens.n = 0
|
||||
d.tokens.Reset()
|
||||
d.windowEnd = 0
|
||||
}
|
||||
|
||||
@ -1176,36 +644,26 @@ func (d *compressor) init(w io.Writer, level int) (err error) {
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).store
|
||||
case level == ConstantCompression:
|
||||
d.w.logReusePenalty = uint(4)
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).storeHuff
|
||||
case level >= 1 && level <= 4:
|
||||
d.snap = newFastEnc(level)
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).storeSnappy
|
||||
case level == DefaultCompression:
|
||||
level = 5
|
||||
fallthrough
|
||||
case 5 <= level && level <= 9:
|
||||
case level >= 1 && level <= 6:
|
||||
d.w.logReusePenalty = uint(level + 1)
|
||||
d.fast = newFastEnc(level)
|
||||
d.window = make([]byte, maxStoreBlockSize)
|
||||
d.fill = (*compressor).fillBlock
|
||||
d.step = (*compressor).storeFast
|
||||
case 7 <= level && level <= 9:
|
||||
d.w.logReusePenalty = uint(level)
|
||||
d.state = &advancedState{}
|
||||
d.compressionLevel = levels[level]
|
||||
d.initDeflate()
|
||||
d.fill = (*compressor).fillDeflate
|
||||
if d.fastSkipHashing == skipNever {
|
||||
if useSSE42 {
|
||||
d.step = (*compressor).deflateLazySSE
|
||||
} else {
|
||||
d.step = (*compressor).deflateLazy
|
||||
}
|
||||
} else {
|
||||
if useSSE42 {
|
||||
d.step = (*compressor).deflateSSE
|
||||
} else {
|
||||
d.step = (*compressor).deflate
|
||||
|
||||
}
|
||||
}
|
||||
d.step = (*compressor).deflateLazy
|
||||
default:
|
||||
return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
|
||||
}
|
||||
@ -1218,10 +676,10 @@ func (d *compressor) reset(w io.Writer) {
|
||||
d.sync = false
|
||||
d.err = nil
|
||||
// We only need to reset a few things for Snappy.
|
||||
if d.snap != nil {
|
||||
d.snap.Reset()
|
||||
if d.fast != nil {
|
||||
d.fast.Reset()
|
||||
d.windowEnd = 0
|
||||
d.tokens.n = 0
|
||||
d.tokens.Reset()
|
||||
return
|
||||
}
|
||||
switch d.compressionLevel.chain {
|
||||
@ -1240,7 +698,7 @@ func (d *compressor) reset(w io.Writer) {
|
||||
s.hashOffset = 1
|
||||
s.index, d.windowEnd = 0, 0
|
||||
d.blockStart, d.byteAvailable = 0, false
|
||||
d.tokens.n = 0
|
||||
d.tokens.Reset()
|
||||
s.length = minMatchLength - 1
|
||||
s.offset = 0
|
||||
s.hash = 0
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user