diff --git a/Dockerfile b/Dockerfile index 28d82980..6e067809 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM fedora -RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md2man \ +RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2man \ # storage deps btrfs-progs-devel \ device-mapper-devel \ diff --git a/systemtest/helpers.bash b/systemtest/helpers.bash index d30e0676..bc2c00a3 100644 --- a/systemtest/helpers.bash +++ b/systemtest/helpers.bash @@ -106,6 +106,23 @@ function run_skopeo() { fi } +################# +# log_and_run # log a command for later debugging, then run it +################# +# +# When diagnosing a test failure, it can be really nice to see the +# more important commands that have been run in test setup: openssl, +# podman registry, other complex commands that can give one a boost +# when trying to reproduce problems. This simple wrapper takes a +# command as its arg, echoes it to stdout (with a '$' prefix), +# then runs the command. BATS does not show stdout unless there's +# an error. Use this judiciously. +# +function log_and_run() { + echo "\$ $*" + "$@" +} + ######### # die # Abort with helpful message ######### @@ -276,7 +293,7 @@ start_registry() { fi if ! egrep -q "^$testuser:" $AUTHDIR/htpasswd; then - $PODMAN run --rm --entrypoint htpasswd registry:2 \ + log_and_run $PODMAN run --rm --entrypoint htpasswd registry:2 \ -Bbn $testuser $testpassword >> $AUTHDIR/htpasswd fi @@ -291,7 +308,7 @@ start_registry() { if [[ -n $create_cert ]]; then CERT=$AUTHDIR/domain.crt if [ ! -e $CERT ]; then - openssl req -newkey rsa:4096 -nodes -sha256 \ + log_and_run openssl req -newkey rsa:4096 -nodes -sha256 \ -keyout $AUTHDIR/domain.key -x509 -days 2 \ -out $CERT \ -subj "/C=US/ST=Foo/L=Bar/O=Red Hat, Inc./CN=localhost" @@ -306,15 +323,15 @@ start_registry() { # test the client. (If client sees a matching .key file, it fails) # Thanks to Miloslav Trmac for this hint. mkdir -p $TESTDIR/client-auth - cp $CERT $TESTDIR/client-auth/ + log_and_run cp $CERT $TESTDIR/client-auth/ fi - $PODMAN run -d --name $name "${reg_args[@]}" registry:2 + log_and_run $PODMAN run -d --name $name "${reg_args[@]}" registry:2 # Wait for registry to actually come up timeout=10 while [[ $timeout -ge 1 ]]; do - if curl localhost:$port/; then + if echo -n >/dev/tcp/127.0.0.1/$port; then return fi diff --git a/vendor.conf b/vendor.conf index 553a63fe..463680bc 100644 --- a/vendor.conf +++ b/vendor.conf @@ -11,7 +11,7 @@ golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7 github.com/containers/storage v1.12.10 github.com/sirupsen/logrus v1.0.0 -github.com/go-check/check v1 +github.com/go-check/check 788fd78401277ebd861206a03c884797c6ec5541 github.com/stretchr/testify v1.1.3 github.com/davecgh/go-spew v1.1.1 github.com/pmezard/go-difflib 5d4384ee4fb2527b0a1256a821ebfc92f91efefc @@ -47,7 +47,7 @@ github.com/xeipuuv/gojsonschema v1.1.0 go4.org ce4c26f7be8eb27dc77f996b08d286dd80bc4a01 https://github.com/camlistore/go4 github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce # -- end OCI image validation requirements -github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9 +github.com/mtrmac/gpgme v0.1.2 # openshift/origin' k8s dependencies as of OpenShift v1.1.5 k8s.io/client-go kubernetes-1.10.13-beta.0 github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee diff --git a/vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json b/vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json new file mode 100644 index 00000000..ccb4eda0 --- /dev/null +++ b/vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json @@ -0,0 +1,66 @@ +{ + "title": "JSON embedded in an atomic container signature", + "description": "This schema is a supplement to atomic-signature.md in this directory.\n\nConsumers of the JSON MUST use the processing rules documented in atomic-signature.md, especially the requirements for the 'critical' subjobject.\n\nWhenever this schema and atomic-signature.md, or the github.com/containers/image/signature implementation, differ,\nit is the atomic-signature.md document, or the github.com/containers/image/signature implementation, which governs.\n\nUsers are STRONGLY RECOMMENDED to use the github.com/containeres/image/signature implementation instead of writing\ntheir own, ESPECIALLY when consuming signatures, so that the policy.json format can be shared by all image consumers.\n", + "type": "object", + "required": [ + "critical", + "optional" + ], + "additionalProperties": false, + "properties": { + "critical": { + "type": "object", + "required": [ + "type", + "image", + "identity" + ], + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "atomic container signature" + ] + }, + "image": { + "type": "object", + "required": [ + "docker-manifest-digest" + ], + "additionalProperties": false, + "properties": { + "docker-manifest-digest": { + "type": "string" + } + } + }, + "identity": { + "type": "object", + "required": [ + "docker-reference" + ], + "additionalProperties": false, + "properties": { + "docker-reference": { + "type": "string" + } + } + } + } + }, + "optional": { + "type": "object", + "description": "All members are optional, but if they are included, they must be valid.", + "additionalProperties": true, + "properties": { + "creator": { + "type": "string" + }, + "timestamp": { + "type": "integer" + } + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/containers/image/docs/containers-certs.d.5.md b/vendor/github.com/containers/image/docs/containers-certs.d.5.md new file mode 100644 index 00000000..ffd7e4bf --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-certs.d.5.md @@ -0,0 +1,28 @@ +% containers-certs.d(5) + +# NAME +containers-certs.d - Directory for storing custom container-registry TLS configurations + +# DESCRIPTION +A custom TLS configuration for a container registry can be configured by creating a directory under `/etc/containers/certs.d`. +The name of the directory must correspond to the `host:port` of the registry (e.g., `my-registry.com:5000`). + +## Directory Structure +A certs directory can contain one or more files with the following extensions: + +* `*.crt` files with this extensions will be interpreted as CA certificates +* `*.cert` files with this extensions will be interpreted as client certificates +* `*.key` files with this extensions will be interpreted as client keys + +Note that the client certificate-key pair will be selected by the file name (e.g., `client.{cert,key}`). +An examplary setup for a registry running at `my-registry.com:5000` may look as follows: +``` +/etc/containers/certs.d/ <- Certificate directory +└── my-registry.com:5000 <- Hostname:port + ├── client.cert <- Client certificate + ├── client.key <- Client key + └── ca.crt <- Certificate authority that signed the registry certificate +``` + +# HISTORY +Feb 2019, Originally compiled by Valentin Rothberg diff --git a/vendor/github.com/containers/image/docs/containers-policy.json.5.md b/vendor/github.com/containers/image/docs/containers-policy.json.5.md new file mode 100644 index 00000000..2859d81b --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-policy.json.5.md @@ -0,0 +1,283 @@ +% CONTAINERS-POLICY.JSON(5) policy.json Man Page +% Miloslav Trmač +% September 2016 + +# NAME +containers-policy.json - syntax for the signature verification policy file + +## DESCRIPTION + +Signature verification policy files are used to specify policy, e.g. trusted keys, +applicable when deciding whether to accept an image, or individual signatures of that image, as valid. + +The default policy is stored (unless overridden at compile-time) at `/etc/containers/policy.json`; +applications performing verification may allow using a different policy instead. + +## FORMAT + +The signature verification policy file, usually called `policy.json`, +uses a JSON format. Unlike some other JSON files, its parsing is fairly strict: +unrecognized, duplicated or otherwise invalid fields cause the entire file, +and usually the entire operation, to be rejected. + +The purpose of the policy file is to define a set of *policy requirements* for a container image, +usually depending on its location (where it is being pulled from) or otherwise defined identity. + +Policy requirements can be defined for: + +- An individual *scope* in a *transport*. + The *transport* values are the same as the transport prefixes when pushing/pulling images (e.g. `docker:`, `atomic:`), + and *scope* values are defined by each transport; see below for more details. + + Usually, a scope can be defined to match a single image, and various prefixes of + such a most specific scope define namespaces of matching images. +- A default policy for a single transport, expressed using an empty string as a scope +- A global default policy. + +If multiple policy requirements match a given image, only the requirements from the most specific match apply, +the more general policy requirements definitions are ignored. + +This is expressed in JSON using the top-level syntax +```js +{ + "default": [/* policy requirements: global default */] + "transports": { + transport_name: { + "": [/* policy requirements: default for transport $transport_name */], + scope_1: [/* policy requirements: default for $scope_1 in $transport_name */], + scope_2: [/*…*/] + /*…*/ + }, + transport_name_2: {/*…*/} + /*…*/ + } +} +``` + +The global `default` set of policy requirements is mandatory; all of the other fields +(`transports` itself, any specific transport, the transport-specific default, etc.) are optional. + + +## Supported transports and their scopes + +### `atomic:` + +The `atomic:` transport refers to images in an Atomic Registry. + +Supported scopes use the form _hostname_[`:`_port_][`/`_namespace_[`/`_imagestream_ [`:`_tag_]]], +i.e. either specifying a complete name of a tagged image, or prefix denoting +a host/namespace/image stream. + +*Note:* The _hostname_ and _port_ refer to the Docker registry host and port (the one used +e.g. for `docker pull`), _not_ to the OpenShift API host and port. + +### `dir:` + +The `dir:` transport refers to images stored in local directories. + +Supported scopes are paths of directories (either containing a single image or +subdirectories possibly containing images). + +*Note:* The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored. + +The top-level scope `"/"` is forbidden; use the transport default scope `""`, +for consistency with other transports. + +### `docker:` + +The `docker:` transport refers to images in a registry implementing the "Docker Registry HTTP API V2". + +Scopes matching individual images are named Docker references *in the fully expanded form*, either +using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`). + +More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), +a repository namespace, or a registry host (by only specifying the host name). + +### `oci:` + +The `oci:` transport refers to images in directories compliant with "Open Container Image Layout Specification". + +Supported scopes use the form _directory_`:`_tag_, and _directory_ referring to +a directory containing one or more tags, or any of the parent directories. + +*Note:* See `dir:` above for semantics and restrictions on the directory paths, they apply to `oci:` equivalently. + +### `tarball:` + +The `tarball:` transport refers to tarred up container root filesystems. + +Scopes are ignored. + +## Policy Requirements + +Using the mechanisms above, a set of policy requirements is looked up. The policy requirements +are represented as a JSON array of individual requirement objects. For an image to be accepted, +*all* of the requirements must be satisfied simulatenously. + +The policy requirements can also be used to decide whether an individual signature is accepted (= is signed by a recognized key of a known author); +in that case some requirements may apply only to some signatures, but each signature must be accepted by *at least one* requirement object. + +The following requirement objects are supported: + +### `insecureAcceptAnything` + +A simple requirement with the following syntax + +```json +{"type":"insecureAcceptAnything"} +``` + +This requirement accepts any image (but note that other requirements in the array still apply). + +When deciding to accept an individual signature, this requirement does not have any effect; it does *not* cause the signature to be accepted, though. + +This is useful primarily for policy scopes where no signature verification is required; +because the array of policy requirements must not be empty, this requirement is used +to represent the lack of requirements explicitly. + +### `reject` + +A simple requirement with the following syntax: + +```json +{"type":"reject"} +``` + +This requirement rejects every image, and every signature. + +### `signedBy` + +This requirement requires an image to be signed with an expected identity, or accepts a signature if it is using an expected identity and key. + +```js +{ + "type": "signedBy", + "keyType": "GPGKeys", /* The only currently supported value */ + "keyPath": "/path/to/local/keyring/file", + "keyData": "base64-encoded-keyring-data", + "signedIdentity": identity_requirement +} +``` + + +Exactly one of `keyPath` and `keyData` must be present, containing a GPG keyring of one or more public keys. Only signatures made by these keys are accepted. + +The `signedIdentity` field, a JSON object, specifies what image identity the signature claims about the image. +One of the following alternatives are supported: + +- The identity in the signature must exactly match the image identity. Note that with this, referencing an image by digest (with a signature claiming a _repository_`:`_tag_ identity) will fail. + + ```json + {"type":"matchExact"} + ``` +- If the image identity carries a tag, the identity in the signature must exactly match; + if the image identity uses a digest reference, the identity in the signature must be in the same repository as the image identity (using any tag). + + (Note that with images identified using digest references, the digest from the reference is validated even before signature verification starts.) + + ```json + {"type":"matchRepoDigestOrExact"} + ``` +- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifing an exact image version. + + ```json + {"type":"matchRepository"} + ``` +- The identity in the signature must exactly match a specified identity. + This is useful e.g. when locally mirroring images signed using their public identity. + + ```js + { + "type": "exactReference", + "dockerReference": docker_reference_value + } + ``` +- The identity in the signature must be in the same repository as a specified identity. + This combines the properties of `matchRepository` and `exactReference`. + + ```js + { + "type": "exactRepository", + "dockerRepository": docker_repository_value + } + ``` + +If the `signedIdentity` field is missing, it is treated as `matchRepoDigestOrExact`. + +*Note*: `matchExact`, `matchRepoDigestOrExact` and `matchRepository` can be only used if a Docker-like image identity is +provided by the transport. In particular, the `dir:` and `oci:` transports can be only +used with `exactReference` or `exactRepository`. + + + +## Examples + +It is *strongly* recommended to set the `default` policy to `reject`, and then +selectively allow individual transports and scopes as desired. + +### A reasonably locked-down system + +(Note that the `/*`…`*/` comments are not valid in JSON, and must not be used in real policies.) + +```js +{ + "default": [{"type": "reject"}], /* Reject anything not explicitly allowed */ + "transports": { + "docker": { + /* Allow installing images from a specific repository namespace, without cryptographic verification. + This namespace includes images like openshift/hello-openshift and openshift/origin. */ + "docker.io/openshift": [{"type": "insecureAcceptAnything"}], + /* Similarly, allow installing the “official” busybox images. Note how the fully expanded + form, with the explicit /library/, must be used. */ + "docker.io/library/busybox": [{"type": "insecureAcceptAnything"}] + /* Other docker: images use the global default policy and are rejected */ + }, + "dir": { + "": [{"type": "insecureAcceptAnything"}] /* Allow any images originating in local directories */ + }, + "atomic": { + /* The common case: using a known key for a repository or set of repositories */ + "hostname:5000/myns/official": [ + { + "type": "signedBy", + "keyType": "GPGKeys", + "keyPath": "/path/to/official-pubkey.gpg" + } + ], + /* A more complex example, for a repository which contains a mirror of a third-party product, + which must be signed-off by local IT */ + "hostname:5000/vendor/product": [ + { /* Require the image to be signed by the original vendor, using the vendor's repository location. */ + "type": "signedBy", + "keyType": "GPGKeys", + "keyPath": "/path/to/vendor-pubkey.gpg", + "signedIdentity": { + "type": "exactRepository", + "dockerRepository": "vendor-hostname/product/repository" + } + }, + { /* Require the image to _also_ be signed by a local reviewer. */ + "type": "signedBy", + "keyType": "GPGKeys", + "keyPath": "/path/to/reviewer-pubkey.gpg" + } + ] + } + } +} +``` + +### Completely disable security, allow all images, do not trust any signatures + +```json +{ + "default": [{"type": "insecureAcceptAnything"}] +} +``` +## SEE ALSO + atomic(1) + +## HISTORY +August 2018, Rename to containers-policy.json(5) by Valentin Rothberg + +September 2016, Originally compiled by Miloslav Trmač diff --git a/vendor/github.com/containers/image/docs/containers-registries.conf.5.md b/vendor/github.com/containers/image/docs/containers-registries.conf.5.md new file mode 100644 index 00000000..6b9abff0 --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-registries.conf.5.md @@ -0,0 +1,177 @@ +% CONTAINERS-REGISTRIES.CONF(5) System-wide registry configuration file +% Brent Baude +% Aug 2017 + +# NAME +containers-registries.conf - Syntax of System Registry Configuration File + +# DESCRIPTION +The CONTAINERS-REGISTRIES configuration file is a system-wide configuration +file for container image registries. The file format is TOML. + +By default, the configuration file is located at `/etc/containers/registries.conf`. + +# FORMATS + +## VERSION 2 +VERSION 2 is the latest format of the `registries.conf` and is currently in +beta. This means in general VERSION 1 should be used in production environments +for now. + +### GLOBAL SETTINGS + +`unqualified-search-registries` +: An array of _host_[`:`_port_] registries to try when pulling an unqualified image, in order. + +### NAMESPACED `[[registry]]` SETTINGS + +The bulk of the configuration is represented as an array of `[[registry]]` +TOML tables; the settings may therefore differ among different registries +as well as among different namespaces/repositories within a registry. + +#### Choosing a `[[registry]]` TOML table + +Given an image name, a single `[[registry]]` TOML table is chosen based on its `prefix` field. + +`prefix` +: A prefix of the user-specified image name, i.e. using one of the following formats: + - _host_[`:`_port_] + - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…] + - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_ + - _host_[`:`_port_]`/`_namespace_[`/`_namespace_…]`/`_repo_(`:`_tag|`@`_digest_) + + The user-specified image name must start with the specified `prefix` (and continue + with the appropriate separator) for a particular `[[registry]]` TOML table to be + considered; (only) the TOML table with the longest match is used. + + As a special case, the `prefix` field can be missing; if so, it defaults to the value + of the `location` field (described below). + +#### Per-namespace settings + +`insecure` +: `true` or `false`. + By default, container runtimes require TLS when retrieving images from a registry. + If `insecure` is set to `true`, unencrypted HTTP as well as TLS connections with untrusted + certificates are allowed. + +`blocked` +: `true` or `false`. + If `true`, pulling images with matching names is forbidden. + +#### Remapping and mirroring registries + +The user-specified image reference is, primarily, a "logical" image name, always used for naming +the image. By default, the image reference also directly specifies the registry and repository +to use, but the following options can be used to redirect the underlying accesses +to different registry servers or locations (e.g. to support configurations with no access to the +internet without having to change `Dockerfile`s, or to add redundancy). + +`location` +: Accepts the same format as the `prefix` field, and specifies the physical location + of the `prefix`-rooted namespace. + + By default, this equal to `prefix` (in which case `prefix` can be omitted and the + `[[registry]]` TOML table can only specify `location`). + + Example: Given + ``` + prefix = "example.com/foo" + location = "internal-registry-for-example.net/bar" + ``` + requests for the image `example.com/foo/myimage:latest` will actually work with the + `internal-registry-for-example.net/bar/myimage:latest` image. + +`mirror` +: An array of TOML tables specifiying (possibly-partial) mirrors for the + `prefix`-rooted namespace. + + The mirrors are attempted in the specified order; the first one that can be + contacted and contains the image will be used (and if none of the mirrors contains the image, + the primary location specified by the `registry.location` field, or using the unmodified + user-specified reference, is tried last). + + Each TOML table in the `mirror` array can contain the following fields, with the same semantics + as if specified in the `[[registry]]` TOML table directly: + - `location` + - `insecure` + +`mirror-by-digest-only` +: `true` or `false`. + If `true`, mirrors will only be used during pulling if the image reference includes a digest. + Referencing an image by digest ensures that the same is always used + (whereas referencing an image by a tag may cause different registries to return + different images if the tag mapping is out of sync). + + Note that if this is `true`, images referenced by a tag will only use the primary + registry, failing if that registry is not accessible. + +*Note*: Redirection and mirrors are currently processed only when reading images, not when pushing +to a registry; that may change in the future. + +### EXAMPLE + +``` +unqualified-search-registries = ["example.com"] + +[[registry]] +prefix = "example.com/foo" +insecure = false +blocked = false +location = "internal-registry-for-example.com/bar" + +[[registry.mirror]] +location = "example-mirror-0.local/mirror-for-foo" + +[[registry.mirror]] +location = "example-mirror-1.local/mirrors/foo" +insecure = true +``` +Given the above, a pull of `example.com/foo/image:latest` will try: + 1. `example-mirror-0.local/mirror-for-foo/image:latest` + 2. `example-mirror-1.local/mirrors/foo/image:latest` + 3. `internal-registry-for-example.net/bar/myimage:latest` + +in order, and use the first one that exists. + +## VERSION 1 +VERSION 1 can be used as alternative to the VERSION 2, but it does not support +using registry mirrors, longest-prefix matches, or location rewriting. + +The TOML format is used to build a simple list of registries under three +categories: `registries.search`, `registries.insecure`, and `registries.block`. +You can list multiple registries using a comma separated list. + +Search registries are used when the caller of a container runtime does not fully specify the +container image that they want to execute. These registries are prepended onto the front +of the specified container image until the named image is found at a registry. + +Note that insecure registries can be used for any registry, not just the registries listed +under search. + +The `registries.insecure` and `registries.block` lists have the same meaning as the +`insecure` and `blocked` fields in VERSION 2. + +### EXAMPLE +The following example configuration defines two searchable registries, one +insecure registry, and two blocked registries. + +``` +[registries.search] +registries = ['registry1.com', 'registry2.com'] + +[registries.insecure] +registries = ['registry3.com'] + +[registries.block] +registries = ['registry.untrusted.com', 'registry.unsafe.com'] +``` + +# HISTORY +Mar 2019, Added additional configuration format by Sascha Grunert + +Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg + +Jun 2018, Updated by Tom Sweeney + +Aug 2017, Originally compiled by Brent Baude diff --git a/vendor/github.com/containers/image/docs/containers-registries.d.5.md b/vendor/github.com/containers/image/docs/containers-registries.d.5.md new file mode 100644 index 00000000..dffe3874 --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-registries.d.5.md @@ -0,0 +1,128 @@ +% CONTAINERS-REGISTRIES.D(5) Registries.d Man Page +% Miloslav Trmač +% August 2016 + +# NAME +containers-registries.d - Directory for various registries configurations + +# DESCRIPTION + +The registries configuration directory contains configuration for various registries +(servers storing remote container images), and for content stored in them, +so that the configuration does not have to be provided in command-line options over and over for every command, +and so that it can be shared by all users of containers/image. + +By default (unless overridden at compile-time), the registries configuration directory is `/etc/containers/registries.d`; +applications may allow using a different directory instead. + +## Directory Structure + +The directory may contain any number of files with the extension `.yaml`, +each using the YAML format. Other than the mandatory extension, names of the files +don’t matter. + +The contents of these files are merged together; to have a well-defined and easy to understand +behavior, there can be only one configuration section describing a single namespace within a registry +(in particular there can be at most one one `default-docker` section across all files, +and there can be at most one instance of any key under the the `docker` section; +these sections are documented later). + +Thus, it is forbidden to have two conflicting configurations for a single registry or scope, +and it is also forbidden to split a configuration for a single registry or scope across +more than one file (even if they are not semantically in conflict). + +## Registries, Scopes and Search Order + +Each YAML file must contain a “YAML mapping” (key-value pairs). Two top-level keys are defined: + +- `default-docker` is the _configuration section_ (as documented below) + for registries implementing "Docker Registry HTTP API V2". + + This key is optional. + +- `docker` is a mapping, using individual registries implementing "Docker Registry HTTP API V2", + or namespaces and individual images within these registries, as keys; + the value assigned to any such key is a _configuration section_. + + This key is optional. + + Scopes matching individual images are named Docker references *in the fully expanded form*, either + using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`). + + More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), + a repository namespace, or a registry host (and a port if it differs from the default). + + Note that if a registry is accessed using a hostname+port configuration, the port-less hostname + is _not_ used as parent scope. + +When searching for a configuration to apply for an individual container image, only +the configuration for the most-precisely matching scope is used; configuration using +more general scopes is ignored. For example, if _any_ configuration exists for +`docker.io/library/busybox`, the configuration for `docker.io` is ignored +(even if some element of the configuration is defined for `docker.io` and not for `docker.io/library/busybox`). + +## Individual Configuration Sections + +A single configuration section is selected for a container image using the process +described above. The configuration section is a YAML mapping, with the following keys: + +- `sigstore-staging` defines an URL of of the signature storage, used for editing it (adding or deleting signatures). + + This key is optional; if it is missing, `sigstore` below is used. + +- `sigstore` defines an URL of the signature storage. + This URL is used for reading existing signatures, + and if `sigstore-staging` does not exist, also for adding or removing them. + + This key is optional; if it is missing, no signature storage is defined (no signatures + are download along with images, adding new signatures is possible only if `sigstore-staging` is defined). + +## Examples + +### Using Containers from Various Origins + +The following demonstrates how to to consume and run images from various registries and namespaces: + +```yaml +docker: + registry.database-supplier.com: + sigstore: https://sigstore.database-supplier.com + distribution.great-middleware.org: + sigstore: https://security-team.great-middleware.org/sigstore + docker.io/web-framework: + sigstore: https://sigstore.web-framework.io:8080 +``` + +### Developing and Signing Containers, Staging Signatures + +For developers in `example.com`: + +- Consume most container images using the public servers also used by clients. +- Use a separate sigure storage for an container images in a namespace corresponding to the developers' department, with a staging storage used before publishing signatures. +- Craft an individual exception for a single branch a specific developer is working on locally. + +```yaml +docker: + registry.example.com: + sigstore: https://registry-sigstore.example.com + registry.example.com/mydepartment: + sigstore: https://sigstore.mydepartment.example.com + sigstore-staging: file:///mnt/mydepartment/sigstore-staging + registry.example.com/mydepartment/myproject:mybranch: + sigstore: http://localhost:4242/sigstore + sigstore-staging: file:///home/useraccount/webroot/sigstore +``` + +### A Global Default + +If a company publishes its products using a different domain, and different registry hostname for each of them, it is still possible to use a single signature storage server +without listing each domain individually. This is expected to rarely happen, usually only for staging new signatures. + +```yaml +default-docker: + sigstore-staging: file:///mnt/company/common-sigstore-staging +``` + +# AUTHORS + +Miloslav Trmač diff --git a/vendor/github.com/containers/image/docs/containers-signature.5.md b/vendor/github.com/containers/image/docs/containers-signature.5.md new file mode 100644 index 00000000..5b99e7c4 --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-signature.5.md @@ -0,0 +1,241 @@ +% container-signature(5) Container signature format +% Miloslav Trmač +% March 2017 + +# Container signature format + +This document describes the format of container signatures, +as implemented by the `github.com/containers/image/signature` package. + +Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package +(preferably through the higher-level `signature.PolicyContext` interface) +without having to care about the details of the format described below. +This documentation exists primarily for maintainers of the package +and to allow independent reimplementations. + +## High-level overview + +The signature provides an end-to-end authenticated claim that a container image +has been approved by a specific party (e.g. the creator of the image as their work, +an automated build system as a result of an automated build, +a company IT department approving the image for production) under a specified _identity_ +(e.g. an OS base image / specific application, with a specific version). + +A container signature consists of a cryptographic signature which identifies +and authenticates who signed the image, and carries as a signed payload a JSON document. +The JSON document identifies the image being signed, claims a specific identity of the +image and if applicable, contains other information about the image. + +The signatures do not modify the container image (the layers, configuration, manifest, …); +e.g. their presence does not change the manifest digest used to identify the image in +docker/distribution servers; rather, the signatures are associated with an immutable image. +An image can have any number of signatures so signature distribution systems SHOULD support +associating more than one signature with an image. + +## The cryptographic signature + +As distributed, the container signature is a blob which contains a cryptographic signature +in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the +JSON document and a signature of the JSON document; it is not a “detached signature” with +independent blobs containing the JSON document and a cryptographic signature). + +Currently the only defined cryptographic signature format is an OpenPGP signature (RFC 4880), +but others may be added in the future. (The blob does not contain metadata identifying the +cryptographic signature format. It is expected that most formats are sufficiently self-describing +that this is not necessary and the configured expected public key provides another indication +of the expected cryptographic signature format. Such metadata may be added in the future for +newly added cryptographic signature formats, if necessary.) + +Consumers of container signatures SHOULD verify the cryptographic signature +against one or more trusted public keys +(e.g. defined in a [policy.json signature verification policy file](policy.json.md)) +before parsing or processing the JSON payload in _any_ way, +in particular they SHOULD stop processing the container signature +if the cryptographic signature verification fails, without even starting to process the JSON payload. + +(Consumers MAY extract identification of the signing key and other metadata from the cryptographic signature, +and the JSON payload, without verifying the signature, if the purpose is to allow managing the signature blobs, +e.g. to list the authors and image identities of signatures associated with a single container image; +if so, they SHOULD design the output of such processing to minimize the risk of users considering the output trusted +or in any way usable for making policy decisions about the image.) + +### OpenPGP signature verification + +When verifying a cryptographic signature in the OpenPGP format, +the consumer MUST verify at least the following aspects of the signature +(like the `github.com/containers/image/signature` package does): + +- The blob MUST be a “Signed Message” as defined RFC 4880 section 11.3. + (e.g. it MUST NOT be an unsigned “Literal Message”, or any other non-signature format). +- The signature MUST have been made by an expected key trusted for the purpose (and the specific container image). +- The signature MUST be correctly formed and pass the cryptographic validation. +- The signature MUST correctly authenticate the included JSON payload + (in particular, the parsing of the JSON payload MUST NOT start before the complete payload has been cryptographically authenticated). +- The signature MUST NOT be expired. + +The consumer SHOULD have tests for its verification code which verify that signatures failing any of the above are rejected. + +## JSON processing and forward compatibility + +The payload of the cryptographic signature is a JSON document (RFC 7159). +Consumers SHOULD parse it very strictly, +refusing any signature which violates the expected format (e.g. missing members, incorrect member types) +or can be interpreted ambiguously (e.g. a duplicated member in a JSON object). + +Any violations of the JSON format or of other requirements in this document MAY be accepted if the JSON document can be recognized +to have been created by a known-incorrect implementation (see [`optional.creator`](#optionalcreator) below) +and if the semantics of the invalid document, as created by such an implementation, is clear. + +The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`, +each a JSON object. + +The `critical` object MUST contain a `type` member identifying the document as a container signature +(as defined [below](#criticaltype)) +and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value. + +To ensure forward compatibility (allowing older signature consumers to correctly +accept or reject signatures created at a later date, with possible extensions to this format), +consumers MUST reject the signature if the `critical` object, or _any_ of its subobjects, +contain _any_ member or data value which is unrecognized, unsupported, invalid, or in any other way unexpected. +At a minimum, this includes unrecognized members in a JSON object, or incorrect types of expected members. + +For the same reason, consumers SHOULD accept any members with unrecognized names in the `optional` object, +and MAY accept signatures where the object member is recognized but unsupported, or the value of the member is unsupported. +Consumers still SHOULD reject signatures where a member of an `optional` object is supported but the value is recognized as invalid. + +## JSON data format + +An example of the full format follows, with detailed description below. +To reiterate, consumers of the signature SHOULD perform successful cryptographic verification, +and MUST reject unexpected data in the `critical` object, or in the top-level object, as described above. + +```json +{ + "critical": { + "type": "atomic container signature", + "image": { + "docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e" + }, + "identity": { + "docker-reference": "docker.io/library/busybox:latest" + } + }, + "optional": { + "creator": "some software package v1.0.1-35", + "timestamp": 1483228800, + } +} +``` + +### `critical` + +This MUST be a JSON object which contains data critical to correctly evaluating the validity of a signature. + +Consumers MUST reject any signature where the `critical` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. + +### `critical.type` + +This MUST be a string with a string value exactly equal to `atomic container signature` (three words, including the spaces). + +Signature consumers MUST reject signatures which do not have this member or this member does not have exactly the expected value. + +(The consumers MAY support signatures with a different value of the `type` member, if any is defined in the future; +if so, the rest of the JSON document is interpreted according to rules defining that value of `critical.type`, +not by this document.) + +### `critical.image` + +This MUST be a JSON object which identifies the container image this signature applies to. + +Consumers MUST reject any signature where the `critical.image` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. + +(Currently only the `docker-manifest-digest` way of identifying a container image is defined; +alternatives to this may be defined in the future, +but existing consumers are required to reject signatures which use formats they do not support.) + +### `critical.image.docker-manifest-digest` + +This MUST be a JSON string, in the `github.com/opencontainers/go-digest.Digest` string format. + +The value of this member MUST match the manifest of the signed container image, as implemented in the docker/distribution manifest addressing system. + +The consumer of the signature SHOULD verify the manifest digest against a fully verified signature before processing the contents of the image manifest in any other way +(e.g. parsing the manifest further or downloading layers of the image). + +Implementation notes: +* A single container image manifest may have several valid manifest digest values, using different algorithms. +* For “signed” [docker/distribution schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) manifests, +the manifest digest applies to the payload of the JSON web signature, not to the raw manifest blob. + +### `critical.identity` + +This MUST be a JSON object which identifies the claimed identity of the image (usually the purpose of the image, or the application, along with a version information), +as asserted by the author of the signature. + +Consumers MUST reject any signature where the `critical.identity` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data. + +(Currently only the `docker-reference` way of claiming an image identity/purpose is defined; +alternatives to this may be defined in the future, +but existing consumers are required to reject signatures which use formats they do not support.) + +### `critical.identity.docker-reference` + +This MUST be a JSON string, in the `github.com/docker/distribution/reference` string format, +and using the same normalization semantics (where e.g. `busybox:latest` is equivalent to `docker.io/library/busybox:latest`). +If the normalization semantics allows multiple string representations of the claimed identity with equivalent meaning, +the `critical.identity.docker-reference` member SHOULD use the fully explicit form (including the full host name and namespaces). + +The value of this member MUST match the image identity/purpose expected by the consumer of the image signature and the image +(again, accounting for the `docker/distribution/reference` normalization semantics). + +In the most common case, this means that the `critical.identity.docker-reference` value must be equal to the docker/distribution reference used to refer to or download the image. +However, depending on the specific application, users or system administrators may accept less specific matches +(e.g. ignoring the tag value in the signature when pulling the `:latest` tag or when referencing an image by digest), +or they may require `critical.identity.docker-reference` values with a completely different namespace to the reference used to refer to/download the image +(e.g. requiring a `critical.identity.docker-reference` value which identifies the image as coming from a supplier when fetching it from a company-internal mirror of approved images). +The software performing this verification SHOULD allow the users to define such a policy using the [policy.json signature verification policy file format](policy.json.md). + +The `critical.identity.docker-reference` value SHOULD contain either a tag or digest; +in most cases, it SHOULD use a tag rather than a digest. (See also the default [`matchRepoDigestOrExact` matching semantics in `policy.json`](policy.json.md#signedby).) + +### `optional` + +This MUST be a JSON object. + +Consumers SHOULD accept any members with unrecognized names in the `optional` object, +and MAY accept a signature where the object member is recognized but unsupported, or the value of the member is valid but unsupported. +Consumers still SHOULD reject any signature where a member of an `optional` object is supported but the value is recognized as invalid. + +### `optional.creator` + +If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature. + +The contents of this string is not defined in detail; however each implementation creating container signatures: + +- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number) +- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number) + changes whenever the format or semantics of the generated signature changes in any way; + it SHOULD not be possible for two implementations which use a different format or semantics to have the same `optional.creator` value +- SHOULD use a format which is reasonably easy to parse in software (perhaps using a regexp), + and which makes it easy enough to recognize a range of versions of a specific implementation + (e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering; + the string should contain a version number, or at least a date of the commit). + +Consumers of container signatures MAY recognize specific values or sets of values of `optional.creator` +(perhaps augmented with `optional.timestamp`), +and MAY change their processing of the signature based on these values +(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively), +as long as the semantics of the invalid document, as created by such an implementation, is clear. + +If consumers of signatures do change their behavior based on the `optional.creator` value, +they SHOULD take care that the way they process the signatures is not inconsistent with +strictly validating signature consumers. +(I.e. it is acceptable for a consumer to accept a signature based on a specific `optional.creator` value +if other implementations would completely reject the signature, +but it would be very undesirable for the two kinds of implementations to accept the signature in different +and inconsistent situations.) + +### `optional.timestamp` + +If present, this MUST be a JSON number, which is representable as a 64-bit integer, and identifies the time when the signature was created +as the number of seconds since the UNIX epoch (Jan 1 1970 00:00 UTC). diff --git a/vendor/github.com/containers/image/docs/containers-transports.5.md b/vendor/github.com/containers/image/docs/containers-transports.5.md new file mode 100644 index 00000000..e9d3b9c7 --- /dev/null +++ b/vendor/github.com/containers/image/docs/containers-transports.5.md @@ -0,0 +1,109 @@ +% CONTAINERS-TRANSPORTS(5) Containers Transports Man Page +% Valentin Rothberg +% April 2019 + +## NAME + +containers-transports - description of supported transports for copying and storing container images + +## DESCRIPTION + +Tools which use the containers/image library, including skopeo(1), buildah(1), podman(1), all share a common syntax for referring to container images in various locations. +The general form of the syntax is _transport:details_, where details are dependent on the specified transport, which are documented below. + +### **containers-storage:** [storage-specifier]{image-id|docker-reference[@image-id]} + +An image located in a local containers storage. +The format of _docker-reference_ is described in detail in the **docker** transport. + +The _storage-specifier_ allows for referencing storage locations on the file system and has the format `[[driver@]root[+run-root][:options]]` where the optional `driver` refers to the storage driver (e.g., overlay or btrfs) and where `root` is an absolute path to the storage's root directory. +The optional `run-root` can be used to specify the run directory of the storage where all temporary writable content is stored. +The optional `options` are a comma-separated list of driver-specific options. +Please refer to containers-storage.conf(5) for further information on the drivers and supported options. + +### **dir:**_path_ + +An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. +This is a non-standardized format, primarily useful for debugging or noninvasive container inspection. + +### **docker://**_docker-reference_ + +An image in a registry implementing the "Docker Registry HTTP API V2". +By default, uses the authorization state in `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using podman-login(1). +If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using docker-login(1). +The containers-registries.conf(5) further allows for configuring various settings of a registry. + +Note that a _docker-reference_ has the following format: `name[:tag|@digest]`. +While the docker transport does not support both a tag and a digest at the same time some formats like containers-storage do. +Digests can also be used in an image destination as long as the manifest matches the provided digest. +The digest of images can be explored with skopeo-inspect(1). +If `name` does not contain a slash, it is treated as `docker.io/library/name`. +Otherwise, the component before the first slash is checked if it is recognized as a `hostname[:port]` (i.e., it contains either a . or a :, or the component is exactly localhost). +If the first component of name is not recognized as a `hostname[:port]`, `name` is treated as `docker.io/name`. + +### **docker-archive:**_path[:docker-reference]_ + +An image is stored in the docker-save(1) formatted file. +_docker-reference_ is only used when creating such a file, and it must not contain a digest. +It is further possible to copy data to stdin by specifying `docker-archive:/dev/stdin` but note that the used file must be seekable. + +### **docker-daemon:**_docker-reference|algo:digest_ + +An image stored in the docker daemon's internal storage. +The image must be specified as a _docker-reference_ or in an alternative _algo:digest_ format when being used as an image source. +The _algo:digest_ refers to the image ID reported by docker-inspect(1). + +### **oci:**_path[:tag]_ + +An image compliant with the "Open Container Image Layout Specification" at _path_. +Using a _tag_ is optional and allows for storing multiple images at the same _path_. + +### **oci-archive:**_path[:tag]_ + +An image compliant with the "Open Container Image Layout Specification" stored as a tar(1) archive at _path_. + +### **ostree:**_docker-reference[@/absolute/repo/path]_ + +An image in the local ostree(1) repository. +_/absolute/repo/path_ defaults to _/ostree/repo_. + +## Examples + +The following examples demonstrate how some of the containers transports can be used. +The examples use skopeo-copy(1) for copying container images. + +**Copying an image from one registry to another**: +``` +$ skopeo copy docker://docker.io/library/alpine:latest docker://localhost:5000/alpine:latest +``` + +**Copying an image from a running Docker daemon to a directory in the OCI layout**: +``` +$ mkdir alpine-oci +$ skopeo copy docker-daemon:alpine:latest oci:alpine-oci +$ tree alpine-oci +test-oci/ +├── blobs +│   └── sha256 +│   ├── 83ef92b73cf4595aa7fe214ec6747228283d585f373d8f6bc08d66bebab531b7 +│   ├── 9a6259e911dcd0a53535a25a9760ad8f2eded3528e0ad5604c4488624795cecc +│   └── ff8df268d29ccbe81cdf0a173076dcfbbea4bb2b6df1dd26766a73cb7b4ae6f7 +├── index.json +└── oci-layout + +2 directories, 5 files +``` + +**Copying an image from a registry to the local storage**: +``` +$ skopeo copy docker://docker.io/library/alpine:latest containers-storage:alpine:latest +``` + +## SEE ALSO + +docker-login(1), docker-save(1), ostree(1), podman-login(1), skopeo-copy(1), skopeo-inspect(1), tar(1), container-registries.conf(5), containers-storage.conf(5) + +## AUTHORS + +Miloslav Trmač +Valentin Rothberg diff --git a/vendor/github.com/containers/image/docs/signature-protocols.md b/vendor/github.com/containers/image/docs/signature-protocols.md new file mode 100644 index 00000000..ade23228 --- /dev/null +++ b/vendor/github.com/containers/image/docs/signature-protocols.md @@ -0,0 +1,136 @@ +# Signature access protocols + +The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image. +Some image transports (local storage formats and remote procotocols) implement these signatures natively +or trivially; for others, the protocol extensions described below are necessary. + +## docker/distribution registries—separate storage + +### Usage + +Any existing docker/distribution registry, whether or not it natively supports signatures, +can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](registries.d.md). +`registries.d` can be configured to use one storage URL for a whole docker/distribution server, +or also separate URLs for smaller namespaces or individual repositories within the server +(which e.g. allows image authors to manage their own signature storage while publishing +the images on the public `docker.io` server). + +The signature storage URL defines a root of a path hierarchy. +It can be either a `file:///…` URL, pointing to a local directory structure, +or a `http`/`https` URL, pointing to a remote server. +`file:///` signature storage can be both read and written, `http`/`https` only supports reading. + +The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be +a simple static web server serving a directory structure created by writing to a `file:///` signature storage. +(This of course does not prevent other server implementations, +e.g. a HTTP server reading signatures from a database.) + +The usual workflow for producing and distributing images using the separate storage mechanism +is to configure the repository in `registries.d` with `sigstore-staging` URL pointing to a private +`file:///` staging area, and a `sigstore` URL pointing to a public web server. +To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`), +and then copy the created directory structure from the `file:///` staging area +to a subdirectory of a webroot of the public web server so that they are accessible using the public `sigstore` URL. +The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to, +set up a `sigstore` URL pointing to the public web server. + +### Path structure + +Given a _base_ signature storage URL configured in `registries.d` as mentioned above, +and a container image stored in a docker/distribution registry using the _fully-expanded_ name +_hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`, +_namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`), +signatures are accessed using URLs of the form +> _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_ + +where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest +(i.e. even if the user referenced the image using a tag, +the signature storage is always disambiguated using digest references). +Note that in the URLs used for signatures, +_digest-algo_ and _digest-value_ are separated using the `=` character, +not `:` like when acessing the manifest using the docker/distribution API. + +Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1. +Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1, +and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist. +Similarly, to add one more signature to an image, find the first _index_ which does not exist, and +then store the new signature using that _index_ value. + +There is no way to list existing signatures other than iterating through the successive _index_ values, +and no way to download all of the signatures at once. + +### Examples + +For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e` +(or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest), +and with a `registries.d` configuration specifying a `sigstore` URL `https://example.com/sigstore` for the same image, +the following URLs would be accessed to download all signatures: +> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1` +> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2` +> - … + +For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same +`sigstore` URL, the signatures would be available at +> `https://example.com/sigstore/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1` + +and so on. + +## (OpenShift) docker/distribution API extension + +As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides +an extension of the docker/distribution API which allows simpler access to the signatures, +using only the docker/distribution API endpoint. + +This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint, +and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well), +and it is the preferred way implement signature storage in registries. + +See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API. + +To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…` +path to read an array of signatures. Use only the signature objects +which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`; +ignore the other fields of the signature object. + +To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`, +and `content` set to the signature. Also set `name` to an unique name with the form +_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL), +and _per-image-name_ is any unique identifier. + +To add more than one signature, add them one at a time. This API does not allow deleting signatures. + +Note that because signatures are stored within the cluster-wide image objects, +i.e. different namespaces can not associate different sets of signatures to the same image, +updating signatures requires a cluster-wide access to the `imagesignatures` resource +(by default available to the `system:image-signer` role), + +## OpenShift-embedded registries + +The OpenShift-embedded registry implements the ordinary docker/distribution API, +and it also exposes images through the OpenShift REST API (available through the “API master” servers). + +Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension), +which is easier to set up and should usually be preferred. +Continue reading for details on using older versions of OpenShift. + +As of https://github.com/openshift/origin/pull/9181, +signatures are exposed through the OpenShift API +(i.e. to access the complete image, it is necessary to use both APIs, +in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints). + +To read the signature, any user with access to an image can use the `imagestreamimages` namespaced +resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects +which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of +the `ImageSignature` object. + +To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource, +with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form +_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”), +and _per-image-name_ is any unique identifier. + +Note that because signatures are stored within the cluster-wide image objects, +i.e. different namespaces can not associate different sets of signatures to the same image, +updating signatures requires a cluster-wide access to the `imagesignatures` resource +(by default available to the `system:image-signer` role), +and deleting signatures is strongly discouraged +(it deletes the signature from all namespaces which contain the same image). diff --git a/vendor/github.com/containers/image/registries.conf b/vendor/github.com/containers/image/registries.conf new file mode 100644 index 00000000..f6735fb9 --- /dev/null +++ b/vendor/github.com/containers/image/registries.conf @@ -0,0 +1,82 @@ +# For more information on this configuration file, see containers-registries.conf(5). +# +# There are multiple versions of the configuration syntax available, where the +# second iteration is backwards compatible to the first one. Mixing up both +# formats will result in an runtime error. +# +# The initial configuration format looks like this: +# +# Registries to search for images that are not fully-qualified. +# i.e. foobar.com/my_image:latest vs my_image:latest +[registries.search] +registries = [] + +# Registries that do not use TLS when pulling images or uses self-signed +# certificates. +[registries.insecure] +registries = [] + +# Blocked Registries, blocks the `docker daemon` from pulling from the blocked registry. If you specify +# "*", then the docker daemon will only be allowed to pull from registries listed above in the search +# registries. Blocked Registries is deprecated because other container runtimes and tools will not use it. +# It is recommended that you use the trust policy file /etc/containers/policy.json to control which +# registries you want to allow users to pull and push from. policy.json gives greater flexibility, and +# supports all container runtimes and tools including the docker daemon, cri-o, buildah ... +# The atomic CLI `atomic trust` can be used to easily configure the policy.json file. +[registries.block] +registries = [] + +# The second version of the configuration format allows to specify registry +# mirrors: +# +# # An array of host[:port] registries to try when pulling an unqualified image, in order. +# unqualified-search-registries = ["example.com"] +# +# [[registry]] +# # The "prefix" field is used to choose the relevant [[registry]] TOML table; +# # (only) the TOML table with the longest match for the input image name +# # (taking into account namespace/repo/tag/digest separators) is used. +# # +# # If the prefix field is missing, it defaults to be the same as the "location" field. +# prefix = "example.com/foo" +# +# # If true, unencrypted HTTP as well as TLS connections with untrusted +# # certificates are allowed. +# insecure = false +# +# # If true, pulling images with matching names is forbidden. +# blocked = false +# +# # The physical location of the "prefix"-rooted namespace. +# # +# # By default, this equal to "prefix" (in which case "prefix" can be omitted +# # and the [[registry]] TOML table can only specify "location"). +# # +# # Example: Given +# # prefix = "example.com/foo" +# # location = "internal-registry-for-example.net/bar" +# # requests for the image example.com/foo/myimage:latest will actually work with the +# # internal-registry-for-example.net/bar/myimage:latest image. +# location = internal-registry-for-example.com/bar" +# +# # (Possibly-partial) mirrors for the "prefix"-rooted namespace. +# # +# # The mirrors are attempted in the specified order; the first one that can be +# # contacted and contains the image will be used (and if none of the mirrors contains the image, +# # the primary location specified by the "registry.location" field, or using the unmodified +# # user-specified reference, is tried last). +# # +# # Each TOML table in the "mirror" array can contain the following fields, with the same semantics +# # as if specified in the [[registry]] TOML table directly: +# # - location +# # - insecure +# [[registry.mirror]] +# location = "example-mirror-0.local/mirror-for-foo" +# [[registry.mirror]] +# location = "example-mirror-1.local/mirrors/foo" +# insecure = true +# # Given the above, a pull of example.com/foo/image:latest will try: +# # 1. example-mirror-0.local/mirror-for-foo/image:latest +# # 2. example-mirror-1.local/mirrors/foo/image:latest +# # 3. internal-registry-for-example.net/bar/myimage:latest +# # in order, and use the first one that exists. diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/mtrmac/gpgme/data.go index eebc9726..eee32c03 100644 --- a/vendor/github.com/mtrmac/gpgme/data.go +++ b/vendor/github.com/mtrmac/gpgme/data.go @@ -50,25 +50,25 @@ func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { } //export gogpgme_seekfunc -func gogpgme_seekfunc(handle unsafe.Pointer, offset C.off_t, whence C.int) C.off_t { +func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t { d := callbackLookup(uintptr(handle)).(*Data) n, err := d.s.Seek(int64(offset), int(whence)) if err != nil { C.gpgme_err_set_errno(C.EIO) return -1 } - return C.off_t(n) + return C.gpgme_off_t(n) } // The Data buffer used to communicate with GPGME type Data struct { - dh C.gpgme_data_t + dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C buf []byte cbs C.struct_gpgme_data_cbs r io.Reader w io.Writer s io.Seeker - cbc uintptr + cbc uintptr // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh) } func newData() *Data { @@ -154,12 +154,14 @@ func (d *Data) Close() error { callbackDelete(d.cbc) } _, err := C.gpgme_data_release(d.dh) + runtime.KeepAlive(d) d.dh = nil return err } func (d *Data) Write(p []byte) (int, error) { n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + runtime.KeepAlive(d) if err != nil { return 0, err } @@ -171,6 +173,7 @@ func (d *Data) Write(p []byte) (int, error) { func (d *Data) Read(p []byte) (int, error) { n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) + runtime.KeepAlive(d) if err != nil { return 0, err } @@ -181,11 +184,14 @@ func (d *Data) Read(p []byte) (int, error) { } func (d *Data) Seek(offset int64, whence int) (int64, error) { - n, err := C.gpgme_data_seek(d.dh, C.off_t(offset), C.int(whence)) + n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence)) + runtime.KeepAlive(d) return int64(n), err } // Name returns the associated filename if any func (d *Data) Name() string { - return C.GoString(C.gpgme_data_get_file_name(d.dh)) + res := C.GoString(C.gpgme_data_get_file_name(d.dh)) + runtime.KeepAlive(d) + return res } diff --git a/vendor/github.com/mtrmac/gpgme/go.mod b/vendor/github.com/mtrmac/gpgme/go.mod new file mode 100644 index 00000000..3dd09c9f --- /dev/null +++ b/vendor/github.com/mtrmac/gpgme/go.mod @@ -0,0 +1,3 @@ +module github.com/mtrmac/gpgme + +go 1.11 diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/mtrmac/gpgme/go_gpgme.c index b887574e..00da3ab3 100644 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.c +++ b/vendor/github.com/mtrmac/gpgme/go_gpgme.c @@ -8,6 +8,28 @@ void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintpt gpgme_set_passphrase_cb(ctx, cb, (void *)handle); } +gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) { + return gpgme_data_seek(dh, offset, whence); +} + +gpgme_error_t gogpgme_op_assuan_transact_ext( + gpgme_ctx_t ctx, + char* cmd, + uintptr_t data_h, + uintptr_t inquiry_h, + uintptr_t status_h, + gpgme_error_t *operr + ){ + return gpgme_op_assuan_transact_ext( + ctx, + cmd, + (gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, (void *)data_h, + (gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, (void *)inquiry_h, + (gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, (void *)status_h, + operr + ); +} + unsigned int key_revoked(gpgme_key_t k) { return k->revoked; } diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/mtrmac/gpgme/go_gpgme.h index a3678b12..d4826ab3 100644 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.h +++ b/vendor/github.com/mtrmac/gpgme/go_gpgme.h @@ -6,12 +6,24 @@ #include +/* GPGME_VERSION_NUMBER was introduced in 1.4.0 */ +#if !defined(GPGME_VERSION_NUMBER) || GPGME_VERSION_NUMBER < 0x010402 +typedef off_t gpgme_off_t; /* Introduced in 1.4.2 */ +#endif + extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd); extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle); extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle); +extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence); + +extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, uintptr_t data_h, uintptr_t inquiry_h , uintptr_t status_h, gpgme_error_t *operr); + +extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen ); +extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args); +extern gpgme_error_t gogpgme_assuan_status_callback(void *opaque, char* status, char* args); extern unsigned int key_revoked(gpgme_key_t k); extern unsigned int key_expired(gpgme_key_t k); diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/mtrmac/gpgme/gpgme.go index 20aad737..c19b9aeb 100644 --- a/vendor/github.com/mtrmac/gpgme/gpgme.go +++ b/vendor/github.com/mtrmac/gpgme/gpgme.go @@ -7,7 +7,6 @@ package gpgme // #include // #include "go_gpgme.h" import "C" - import ( "fmt" "io" @@ -48,9 +47,8 @@ const ( ProtocolAssuan Protocol = C.GPGME_PROTOCOL_ASSUAN ProtocolG13 Protocol = C.GPGME_PROTOCOL_G13 ProtocolUIServer Protocol = C.GPGME_PROTOCOL_UISERVER - // ProtocolSpawn Protocol = C.GPGME_PROTOCOL_SPAWN // Unavailable in 1.4.3 - ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT - ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN + ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT + ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN ) type PinEntryMode int @@ -70,7 +68,6 @@ const ( EncryptNoEncryptTo EncryptFlag = C.GPGME_ENCRYPT_NO_ENCRYPT_TO EncryptPrepare EncryptFlag = C.GPGME_ENCRYPT_PREPARE EncryptExceptSign EncryptFlag = C.GPGME_ENCRYPT_EXPECT_SIGN - // EncryptNoCompress EncryptFlag = C.GPGME_ENCRYPT_NO_COMPRESS // Unavailable in 1.4.3 ) type HashAlgo int @@ -84,7 +81,6 @@ const ( KeyListModeExtern KeyListMode = C.GPGME_KEYLIST_MODE_EXTERN KeyListModeSigs KeyListMode = C.GPGME_KEYLIST_MODE_SIGS KeyListModeSigNotations KeyListMode = C.GPGME_KEYLIST_MODE_SIG_NOTATIONS - // KeyListModeWithSecret KeyListMode = C.GPGME_KEYLIST_MODE_WITH_SECRET // Unavailable in 1.4.3 KeyListModeEphemeral KeyListMode = C.GPGME_KEYLIST_MODE_EPHEMERAL KeyListModeModeValidate KeyListMode = C.GPGME_KEYLIST_MODE_VALIDATE ) @@ -168,39 +164,60 @@ func EngineCheckVersion(p Protocol) error { } type EngineInfo struct { - info C.gpgme_engine_info_t + next *EngineInfo + protocol Protocol + fileName string + homeDir string + version string + requiredVersion string +} + +func copyEngineInfo(info C.gpgme_engine_info_t) *EngineInfo { + res := &EngineInfo{ + next: nil, + protocol: Protocol(info.protocol), + fileName: C.GoString(info.file_name), + homeDir: C.GoString(info.home_dir), + version: C.GoString(info.version), + requiredVersion: C.GoString(info.req_version), + } + if info.next != nil { + res.next = copyEngineInfo(info.next) + } + return res } func (e *EngineInfo) Next() *EngineInfo { - if e.info.next == nil { - return nil - } - return &EngineInfo{info: e.info.next} + return e.next } func (e *EngineInfo) Protocol() Protocol { - return Protocol(e.info.protocol) + return e.protocol } func (e *EngineInfo) FileName() string { - return C.GoString(e.info.file_name) + return e.fileName } func (e *EngineInfo) Version() string { - return C.GoString(e.info.version) + return e.version } func (e *EngineInfo) RequiredVersion() string { - return C.GoString(e.info.req_version) + return e.requiredVersion } func (e *EngineInfo) HomeDir() string { - return C.GoString(e.info.home_dir) + return e.homeDir } func GetEngineInfo() (*EngineInfo, error) { - info := &EngineInfo{} - return info, handleError(C.gpgme_get_engine_info(&info.info)) + var cInfo C.gpgme_engine_info_t + err := handleError(C.gpgme_get_engine_info(&cInfo)) + if err != nil { + return nil, err + } + return copyEngineInfo(cInfo), nil // It is up to the caller not to invalidate cInfo concurrently until this is done. } func SetEngineInfo(proto Protocol, fileName, homeDir string) error { @@ -261,9 +278,9 @@ type Context struct { KeyError error callback Callback - cbc uintptr + cbc uintptr // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx) - ctx C.gpgme_ctx_t + ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C } func New() (*Context, error) { @@ -281,49 +298,68 @@ func (c *Context) Release() { callbackDelete(c.cbc) } C.gpgme_release(c.ctx) + runtime.KeepAlive(c) c.ctx = nil } func (c *Context) SetArmor(yes bool) { C.gpgme_set_armor(c.ctx, cbool(yes)) + runtime.KeepAlive(c) } func (c *Context) Armor() bool { - return C.gpgme_get_armor(c.ctx) != 0 + res := C.gpgme_get_armor(c.ctx) != 0 + runtime.KeepAlive(c) + return res } func (c *Context) SetTextMode(yes bool) { C.gpgme_set_textmode(c.ctx, cbool(yes)) + runtime.KeepAlive(c) } func (c *Context) TextMode() bool { - return C.gpgme_get_textmode(c.ctx) != 0 + res := C.gpgme_get_textmode(c.ctx) != 0 + runtime.KeepAlive(c) + return res } func (c *Context) SetProtocol(p Protocol) error { - return handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p))) + err := handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p))) + runtime.KeepAlive(c) + return err } func (c *Context) Protocol() Protocol { - return Protocol(C.gpgme_get_protocol(c.ctx)) + res := Protocol(C.gpgme_get_protocol(c.ctx)) + runtime.KeepAlive(c) + return res } func (c *Context) SetKeyListMode(m KeyListMode) error { - return handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m))) + err := handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m))) + runtime.KeepAlive(c) + return err } func (c *Context) KeyListMode() KeyListMode { - return KeyListMode(C.gpgme_get_keylist_mode(c.ctx)) + res := KeyListMode(C.gpgme_get_keylist_mode(c.ctx)) + runtime.KeepAlive(c) + return res } // Unavailable in 1.3.2: // func (c *Context) SetPinEntryMode(m PinEntryMode) error { -// return handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) +// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) +// runtime.KeepAlive(c) +// return err // } // Unavailable in 1.3.2: // func (c *Context) PinEntryMode() PinEntryMode { -// return PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) +// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) +// runtime.KeepAlive(c) +// return res // } func (c *Context) SetCallback(callback Callback) error { @@ -340,11 +376,17 @@ func (c *Context) SetCallback(callback Callback) error { c.cbc = 0 _, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0) } + runtime.KeepAlive(c) return err } func (c *Context) EngineInfo() *EngineInfo { - return &EngineInfo{info: C.gpgme_ctx_get_engine_info(c.ctx)} + cInfo := C.gpgme_ctx_get_engine_info(c.ctx) + runtime.KeepAlive(c) + // NOTE: c must be live as long as we are accessing cInfo. + res := copyEngineInfo(cInfo) + runtime.KeepAlive(c) // for accesses to cInfo + return res } func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error { @@ -357,19 +399,23 @@ func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error chome = C.CString(homeDir) defer C.free(unsafe.Pointer(chome)) } - return handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome)) + err := handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome)) + runtime.KeepAlive(c) + return err } func (c *Context) KeyListStart(pattern string, secretOnly bool) error { cpattern := C.CString(pattern) defer C.free(unsafe.Pointer(cpattern)) - err := C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly)) - return handleError(err) + err := handleError(C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly))) + runtime.KeepAlive(c) + return err } func (c *Context) KeyListNext() bool { c.Key = newKey() err := handleError(C.gpgme_op_keylist_next(c.ctx, &c.Key.k)) + runtime.KeepAlive(c) // implies runtime.KeepAlive(c.Key) if err != nil { if e, ok := err.(Error); ok && e.Code() == ErrorEOF { c.KeyError = nil @@ -383,7 +429,9 @@ func (c *Context) KeyListNext() bool { } func (c *Context) KeyListEnd() error { - return handleError(C.gpgme_op_keylist_end(c.ctx)) + err := handleError(C.gpgme_op_keylist_end(c.ctx)) + runtime.KeepAlive(c) + return err } func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) { @@ -391,7 +439,11 @@ func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) { cfpr := C.CString(fingerprint) defer C.free(unsafe.Pointer(cfpr)) err := handleError(C.gpgme_get_key(c.ctx, cfpr, &key.k, cbool(secret))) - if e, ok := err.(Error); key.k == nil && ok && e.Code() == ErrorEOF { + runtime.KeepAlive(c) + runtime.KeepAlive(key) + keyKIsNil := key.k == nil + runtime.KeepAlive(key) + if e, ok := err.(Error); keyKIsNil && ok && e.Code() == ErrorEOF { return nil, fmt.Errorf("key %q not found", fingerprint) } if err != nil { @@ -401,11 +453,19 @@ func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) { } func (c *Context) Decrypt(ciphertext, plaintext *Data) error { - return handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh)) + err := handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh)) + runtime.KeepAlive(c) + runtime.KeepAlive(ciphertext) + runtime.KeepAlive(plaintext) + return err } func (c *Context) DecryptVerify(ciphertext, plaintext *Data) error { - return handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh)) + err := handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh)) + runtime.KeepAlive(c) + runtime.KeepAlive(ciphertext) + runtime.KeepAlive(plaintext) + return err } type Signature struct { @@ -432,10 +492,20 @@ func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, err plainPtr = plain.dh } err := handleError(C.gpgme_op_verify(c.ctx, sig.dh, signedTextPtr, plainPtr)) + runtime.KeepAlive(c) + runtime.KeepAlive(sig) + if signedText != nil { + runtime.KeepAlive(signedText) + } + if plain != nil { + runtime.KeepAlive(plain) + } if err != nil { return "", nil, err } res := C.gpgme_op_verify_result(c.ctx) + runtime.KeepAlive(c) + // NOTE: c must be live as long as we are accessing res. sigs := []Signature{} for s := res.signatures; s != nil; s = s.next { sig := Signature{ @@ -455,7 +525,9 @@ func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, err } sigs = append(sigs, sig) } - return C.GoString(res.file_name), sigs, nil + fileName := C.GoString(res.file_name) + runtime.KeepAlive(c) // for all accesses to res above + return fileName, sigs, nil } func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphertext *Data) error { @@ -467,18 +539,116 @@ func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphe *ptr = recipients[i].k } err := C.gpgme_op_encrypt(c.ctx, (*C.gpgme_key_t)(recp), C.gpgme_encrypt_flags_t(flags), plaintext.dh, ciphertext.dh) + runtime.KeepAlive(c) + runtime.KeepAlive(recipients) + runtime.KeepAlive(plaintext) + runtime.KeepAlive(ciphertext) return handleError(err) } func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error { C.gpgme_signers_clear(c.ctx) + runtime.KeepAlive(c) for _, k := range signers { - if err := handleError(C.gpgme_signers_add(c.ctx, k.k)); err != nil { + err := handleError(C.gpgme_signers_add(c.ctx, k.k)) + runtime.KeepAlive(c) + runtime.KeepAlive(k) + if err != nil { C.gpgme_signers_clear(c.ctx) + runtime.KeepAlive(c) return err } } - return handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode))) + err := handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode))) + runtime.KeepAlive(c) + runtime.KeepAlive(plain) + runtime.KeepAlive(sig) + return err +} + +type AssuanDataCallback func(data []byte) error +type AssuanInquireCallback func(name, args string) error +type AssuanStatusCallback func(status, args string) error + +// AssuanSend sends a raw Assuan command to gpg-agent +func (c *Context) AssuanSend( + cmd string, + data AssuanDataCallback, + inquiry AssuanInquireCallback, + status AssuanStatusCallback, +) error { + var operr C.gpgme_error_t + + dataPtr := callbackAdd(&data) + inquiryPtr := callbackAdd(&inquiry) + statusPtr := callbackAdd(&status) + cmdCStr := C.CString(cmd) + defer C.free(unsafe.Pointer(cmdCStr)) + err := C.gogpgme_op_assuan_transact_ext( + c.ctx, + cmdCStr, + C.uintptr_t(dataPtr), + C.uintptr_t(inquiryPtr), + C.uintptr_t(statusPtr), + &operr, + ) + runtime.KeepAlive(c) + + if handleError(operr) != nil { + return handleError(operr) + } + return handleError(err) +} + +//export gogpgme_assuan_data_callback +func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t { + c := callbackLookup(uintptr(handle)).(*AssuanDataCallback) + if *c == nil { + return 0 + } + (*c)(C.GoBytes(data, C.int(datalen))) + return 0 +} + +//export gogpgme_assuan_inquiry_callback +func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t { + name := C.GoString(cName) + args := C.GoString(cArgs) + c := callbackLookup(uintptr(handle)).(*AssuanInquireCallback) + if *c == nil { + return 0 + } + (*c)(name, args) + return 0 +} + +//export gogpgme_assuan_status_callback +func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t { + status := C.GoString(cStatus) + args := C.GoString(cArgs) + c := callbackLookup(uintptr(handle)).(*AssuanStatusCallback) + if *c == nil { + return 0 + } + (*c)(status, args) + return 0 +} + +// ExportModeFlags defines how keys are exported from Export +type ExportModeFlags uint + +const ( + ExportModeExtern ExportModeFlags = C.GPGME_EXPORT_MODE_EXTERN + ExportModeMinimal ExportModeFlags = C.GPGME_EXPORT_MODE_MINIMAL +) + +func (c *Context) Export(pattern string, mode ExportModeFlags, data *Data) error { + pat := C.CString(pattern) + defer C.free(unsafe.Pointer(pat)) + err := handleError(C.gpgme_op_export(c.ctx, pat, C.gpgme_export_mode_t(mode), data.dh)) + runtime.KeepAlive(c) + runtime.KeepAlive(data) + return err } // ImportStatusFlags describes the type of ImportStatus.Status. The C API in gpgme.h simply uses "unsigned". @@ -517,10 +687,14 @@ type ImportResult struct { func (c *Context) Import(keyData *Data) (*ImportResult, error) { err := handleError(C.gpgme_op_import(c.ctx, keyData.dh)) + runtime.KeepAlive(c) + runtime.KeepAlive(keyData) if err != nil { return nil, err } res := C.gpgme_op_import_result(c.ctx) + runtime.KeepAlive(c) + // NOTE: c must be live as long as we are accessing res. imports := []ImportStatus{} for s := res.imports; s != nil; s = s.next { imports = append(imports, ImportStatus{ @@ -529,7 +703,7 @@ func (c *Context) Import(keyData *Data) (*ImportResult, error) { Status: ImportStatusFlags(s.status), }) } - return &ImportResult{ + importResult := &ImportResult{ Considered: int(res.considered), NoUserID: int(res.no_user_id), Imported: int(res.imported), @@ -544,11 +718,13 @@ func (c *Context) Import(keyData *Data) (*ImportResult, error) { SecretUnchanged: int(res.secret_unchanged), NotImported: int(res.not_imported), Imports: imports, - }, nil + } + runtime.KeepAlive(c) // for all accesses to res above + return importResult, nil } type Key struct { - k C.gpgme_key_t + k C.gpgme_key_t // WARNING: Call Runtime.KeepAlive(k) after ANY passing of k.k to C } func newKey() *Key { @@ -559,85 +735,122 @@ func newKey() *Key { func (k *Key) Release() { C.gpgme_key_release(k.k) + runtime.KeepAlive(k) k.k = nil } func (k *Key) Revoked() bool { - return C.key_revoked(k.k) != 0 + res := C.key_revoked(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) Expired() bool { - return C.key_expired(k.k) != 0 + res := C.key_expired(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) Disabled() bool { - return C.key_disabled(k.k) != 0 + res := C.key_disabled(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) Invalid() bool { - return C.key_invalid(k.k) != 0 + res := C.key_invalid(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) CanEncrypt() bool { - return C.key_can_encrypt(k.k) != 0 + res := C.key_can_encrypt(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) CanSign() bool { - return C.key_can_sign(k.k) != 0 + res := C.key_can_sign(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) CanCertify() bool { - return C.key_can_certify(k.k) != 0 + res := C.key_can_certify(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) Secret() bool { - return C.key_secret(k.k) != 0 + res := C.key_secret(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) CanAuthenticate() bool { - return C.key_can_authenticate(k.k) != 0 + res := C.key_can_authenticate(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) IsQualified() bool { - return C.key_is_qualified(k.k) != 0 + res := C.key_is_qualified(k.k) != 0 + runtime.KeepAlive(k) + return res } func (k *Key) Protocol() Protocol { - return Protocol(k.k.protocol) + res := Protocol(k.k.protocol) + runtime.KeepAlive(k) + return res } func (k *Key) IssuerSerial() string { - return C.GoString(k.k.issuer_serial) + res := C.GoString(k.k.issuer_serial) + runtime.KeepAlive(k) + return res } func (k *Key) IssuerName() string { - return C.GoString(k.k.issuer_name) + res := C.GoString(k.k.issuer_name) + runtime.KeepAlive(k) + return res } func (k *Key) ChainID() string { - return C.GoString(k.k.chain_id) + res := C.GoString(k.k.chain_id) + runtime.KeepAlive(k) + return res } func (k *Key) OwnerTrust() Validity { - return Validity(k.k.owner_trust) + res := Validity(k.k.owner_trust) + runtime.KeepAlive(k) + return res } func (k *Key) SubKeys() *SubKey { - if k.k.subkeys == nil { + subKeys := k.k.subkeys + runtime.KeepAlive(k) + if subKeys == nil { return nil } - return &SubKey{k: k.k.subkeys, parent: k} + return &SubKey{k: subKeys, parent: k} // The parent: k reference ensures subKeys remains valid } func (k *Key) UserIDs() *UserID { - if k.k.uids == nil { + uids := k.k.uids + runtime.KeepAlive(k) + if uids == nil { return nil } - return &UserID{u: k.k.uids, parent: k} + return &UserID{u: uids, parent: k} // The parent: k reference ensures uids remains valid } func (k *Key) KeyListMode() KeyListMode { - return KeyListMode(k.k.keylist_mode) + res := KeyListMode(k.k.keylist_mode) + runtime.KeepAlive(k) + return res } type SubKey struct { @@ -737,12 +950,3 @@ func (u *UserID) Comment() string { func (u *UserID) Email() string { return C.GoString(u.u.email) } - -// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG. -// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved -// - and cgo can't be used in tests. So, provide this helper for test initialization. -func unsetenvGPGAgentInfo() { - v := C.CString("GPG_AGENT_INFO") - defer C.free(unsafe.Pointer(v)) - C.unsetenv(v) -} diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info.go b/vendor/github.com/mtrmac/gpgme/unset_agent_info.go new file mode 100644 index 00000000..986aca59 --- /dev/null +++ b/vendor/github.com/mtrmac/gpgme/unset_agent_info.go @@ -0,0 +1,18 @@ +// +build !windows + +package gpgme + +// #include +import "C" +import ( + "unsafe" +) + +// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG. +// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved +// - and cgo can't be used in tests. So, provide this helper for test initialization. +func unsetenvGPGAgentInfo() { + v := C.CString("GPG_AGENT_INFO") + defer C.free(unsafe.Pointer(v)) + C.unsetenv(v) +} diff --git a/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go b/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go new file mode 100644 index 00000000..431ec86d --- /dev/null +++ b/vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go @@ -0,0 +1,14 @@ +package gpgme + +// #include +import "C" +import ( + "unsafe" +) + +// unsetenv is not available in mingw +func unsetenvGPGAgentInfo() { + v := C.CString("GPG_AGENT_INFO=") + defer C.free(unsafe.Pointer(v)) + C.putenv(v) +}