mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 13:58:48 +00:00
Compare commits
5 Commits
v1.7.0
...
release-0.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e5c9d5790c | ||
|
|
25629ee972 | ||
|
|
77d5469d2b | ||
|
|
29835bb7ee | ||
|
|
fd338a6a8c |
@@ -1,6 +1,6 @@
|
||||
FROM fedora
|
||||
|
||||
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md2man \
|
||||
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-md2man \
|
||||
# storage deps
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/go-check/check"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/image-tools/image"
|
||||
)
|
||||
@@ -64,7 +64,7 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%commit\n",
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%no-protection\n%%commit\n",
|
||||
key, key)
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ func (s *SigningSuite) SetUpSuite(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", s.gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
github.com/urfave/cli v1.17.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/containers/image master
|
||||
github.com/containers/image skopeo-0.1.32-rhel
|
||||
github.com/opencontainers/go-digest master
|
||||
gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
github.com/containers/storage master
|
||||
@@ -49,7 +49,7 @@ github.com/xeipuuv/gojsonpointer master
|
||||
go4.org master https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
# -- end OCI image validation requirements
|
||||
github.com/mtrmac/gpgme master
|
||||
github.com/mtrmac/gpgme v0.1.2
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
k8s.io/client-go master
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
|
||||
7
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
7
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -16,6 +15,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/internal/iolimits"
|
||||
"github.com/containers/image/pkg/docker/config"
|
||||
"github.com/containers/image/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/types"
|
||||
@@ -497,7 +497,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
default:
|
||||
return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL)
|
||||
}
|
||||
tokenBlob, err := ioutil.ReadAll(res.Body)
|
||||
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -576,7 +576,8 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, errors.Wrapf(client.HandleErrorResponse(res), "Error downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
|
||||
}
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
3
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/internal/iolimits"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
@@ -448,7 +449,7 @@ sigExists:
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxErrorBodySize)
|
||||
if err == nil {
|
||||
logrus.Debugf("Error body %s", string(body))
|
||||
}
|
||||
|
||||
10
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
10
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/internal/iolimits"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
@@ -97,7 +98,8 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, "", errors.Wrapf(client.HandleErrorResponse(res), "Error reading manifest %s in %s", tagOrDigest, s.ref.ref.Name())
|
||||
}
|
||||
manblob, err := ioutil.ReadAll(res.Body)
|
||||
|
||||
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -276,7 +278,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
sig, err := ioutil.ReadAll(res.Body)
|
||||
sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@@ -337,7 +339,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return err
|
||||
}
|
||||
defer get.Body.Close()
|
||||
manifestBody, err := ioutil.ReadAll(get.Body)
|
||||
manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -360,7 +362,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
}
|
||||
defer delete.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(delete.Body)
|
||||
body, err := iolimits.ReadAtMost(delete.Body, iolimits.MaxErrorBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
3
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/internal/iolimits"
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
@@ -129,7 +130,7 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
buf, err := ioutil.ReadAll(stream)
|
||||
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error reading Config file stream")
|
||||
}
|
||||
|
||||
10
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
10
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
@@ -11,6 +11,7 @@ import (
|
||||
"path"
|
||||
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/internal/iolimits"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/types"
|
||||
@@ -184,13 +185,13 @@ func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Heade
|
||||
}
|
||||
|
||||
// readTarComponent returns full contents of componentPath.
|
||||
func (s *Source) readTarComponent(path string) ([]byte, error) {
|
||||
func (s *Source) readTarComponent(path string, limit int) ([]byte, error) {
|
||||
file, err := s.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := ioutil.ReadAll(file)
|
||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -213,9 +214,8 @@ func (s *Source) ensureCachedDataIsPresent() error {
|
||||
if len(tarManifest) != 1 {
|
||||
return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
|
||||
}
|
||||
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config)
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func (s *Source) ensureCachedDataIsPresent() error {
|
||||
// loadTarManifest loads and decodes the manifest.json.
|
||||
func (s *Source) loadTarManifest() ([]ManifestItem, error) {
|
||||
// FIXME? Do we need to deal with the legacy format?
|
||||
bytes, err := s.readTarComponent(manifestFileName)
|
||||
bytes, err := s.readTarComponent(manifestFileName, iolimits.MaxTarFileManifestSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
66
vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json
generated
vendored
66
vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json
generated
vendored
@@ -1,66 +0,0 @@
|
||||
{
|
||||
"title": "JSON embedded in an atomic container signature",
|
||||
"description": "This schema is a supplement to atomic-signature.md in this directory.\n\nConsumers of the JSON MUST use the processing rules documented in atomic-signature.md, especially the requirements for the 'critical' subjobject.\n\nWhenever this schema and atomic-signature.md, or the github.com/containers/image/signature implementation, differ,\nit is the atomic-signature.md document, or the github.com/containers/image/signature implementation, which governs.\n\nUsers are STRONGLY RECOMMENDED to use the github.com/containeres/image/signature implementation instead of writing\ntheir own, ESPECIALLY when consuming signatures, so that the policy.json format can be shared by all image consumers.\n",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"critical",
|
||||
"optional"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"critical": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"type",
|
||||
"image",
|
||||
"identity"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"atomic container signature"
|
||||
]
|
||||
},
|
||||
"image": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"docker-manifest-digest"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"docker-manifest-digest": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"identity": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"docker-reference"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"docker-reference": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"optional": {
|
||||
"type": "object",
|
||||
"description": "All members are optional, but if they are included, they must be valid.",
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
"creator": {
|
||||
"type": "string"
|
||||
},
|
||||
"timestamp": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
241
vendor/github.com/containers/image/docs/atomic-signature.md
generated
vendored
241
vendor/github.com/containers/image/docs/atomic-signature.md
generated
vendored
@@ -1,241 +0,0 @@
|
||||
% atomic-signature(5) Atomic signature format
|
||||
% Miloslav Trmač
|
||||
% March 2017
|
||||
|
||||
# Atomic signature format
|
||||
|
||||
This document describes the format of “atomic” container signatures,
|
||||
as implemented by the `github.com/containers/image/signature` package.
|
||||
|
||||
Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package
|
||||
(preferably through the higher-level `signature.PolicyContext` interface)
|
||||
without having to care about the details of the format described below.
|
||||
This documentation exists primarily for maintainers of the package
|
||||
and to allow independent reimplementations.
|
||||
|
||||
## High-level overview
|
||||
|
||||
The signature provides an end-to-end authenticated claim that a container image
|
||||
has been approved by a specific party (e.g. the creator of the image as their work,
|
||||
an automated build system as a result of an automated build,
|
||||
a company IT department approving the image for production) under a specified _identity_
|
||||
(e.g. an OS base image / specific application, with a specific version).
|
||||
|
||||
An atomic container signature consists of a cryptographic signature which identifies
|
||||
and authenticates who signed the image, and carries as a signed payload a JSON document.
|
||||
The JSON document identifies the image being signed, claims a specific identity of the
|
||||
image and if applicable, contains other information about the image.
|
||||
|
||||
The signatures do not modify the container image (the layers, configuration, manifest, …);
|
||||
e.g. their presence does not change the manifest digest used to identify the image in
|
||||
docker/distribution servers; rather, the signatures are associated with an immutable image.
|
||||
An image can have any number of signatures so signature distribution systems SHOULD support
|
||||
associating more than one signature with an image.
|
||||
|
||||
## The cryptographic signature
|
||||
|
||||
As distributed, the atomic container signature is a blob which contains a cryptographic signature
|
||||
in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the
|
||||
JSON document and a signature of the JSON document; it is not a “detached signature” with
|
||||
independent blobs containing the JSON document and a cryptographic signature).
|
||||
|
||||
Currently the only defined cryptographic signature format is an OpenPGP signature (RFC 4880),
|
||||
but others may be added in the future. (The blob does not contain metadata identifying the
|
||||
cryptographic signature format. It is expected that most formats are sufficiently self-describing
|
||||
that this is not necessary and the configured expected public key provides another indication
|
||||
of the expected cryptographic signature format. Such metadata may be added in the future for
|
||||
newly added cryptographic signature formats, if necessary.)
|
||||
|
||||
Consumers of atomic container signatures SHOULD verify the cryptographic signature
|
||||
against one or more trusted public keys
|
||||
(e.g. defined in a [policy.json signature verification policy file](policy.json.md))
|
||||
before parsing or processing the JSON payload in _any_ way,
|
||||
in particular they SHOULD stop processing the container signature
|
||||
if the cryptographic signature verification fails, without even starting to process the JSON payload.
|
||||
|
||||
(Consumers MAY extract identification of the signing key and other metadata from the cryptographic signature,
|
||||
and the JSON payload, without verifying the signature, if the purpose is to allow managing the signature blobs,
|
||||
e.g. to list the authors and image identities of signatures associated with a single container image;
|
||||
if so, they SHOULD design the output of such processing to minimize the risk of users considering the output trusted
|
||||
or in any way usable for making policy decisions about the image.)
|
||||
|
||||
### OpenPGP signature verification
|
||||
|
||||
When verifying a cryptographic signature in the OpenPGP format,
|
||||
the consumer MUST verify at least the following aspects of the signature
|
||||
(like the `github.com/containers/image/signature` package does):
|
||||
|
||||
- The blob MUST be a “Signed Message” as defined RFC 4880 section 11.3.
|
||||
(e.g. it MUST NOT be an unsigned “Literal Message”, or any other non-signature format).
|
||||
- The signature MUST have been made by an expected key trusted for the purpose (and the specific container image).
|
||||
- The signature MUST be correctly formed and pass the cryptographic validation.
|
||||
- The signature MUST correctly authenticate the included JSON payload
|
||||
(in particular, the parsing of the JSON payload MUST NOT start before the complete payload has been cryptographically authenticated).
|
||||
- The signature MUST NOT be expired.
|
||||
|
||||
The consumer SHOULD have tests for its verification code which verify that signatures failing any of the above are rejected.
|
||||
|
||||
## JSON processing and forward compatibility
|
||||
|
||||
The payload of the cryptographic signature is a JSON document (RFC 7159).
|
||||
Consumers SHOULD parse it very strictly,
|
||||
refusing any signature which violates the expected format (e.g. missing members, incorrect member types)
|
||||
or can be interpreted ambiguously (e.g. a duplicated member in a JSON object).
|
||||
|
||||
Any violations of the JSON format or of other requirements in this document MAY be accepted if the JSON document can be recognized
|
||||
to have been created by a known-incorrect implementation (see [`optional.creator`](#optionalcreator) below)
|
||||
and if the semantics of the invalid document, as created by such an implementation, is clear.
|
||||
|
||||
The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`,
|
||||
each a JSON object.
|
||||
|
||||
The `critical` object MUST contain a `type` member identifying the document as an atomic container signature
|
||||
(as defined [below](#criticaltype))
|
||||
and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value.
|
||||
|
||||
To ensure forward compatibility (allowing older signature consumers to correctly
|
||||
accept or reject signatures created at a later date, with possible extensions to this format),
|
||||
consumers MUST reject the signature if the `critical` object, or _any_ of its subobjects,
|
||||
contain _any_ member or data value which is unrecognized, unsupported, invalid, or in any other way unexpected.
|
||||
At a minimum, this includes unrecognized members in a JSON object, or incorrect types of expected members.
|
||||
|
||||
For the same reason, consumers SHOULD accept any members with unrecognized names in the `optional` object,
|
||||
and MAY accept signatures where the object member is recognized but unsupported, or the value of the member is unsupported.
|
||||
Consumers still SHOULD reject signatures where a member of an `optional` object is supported but the value is recognized as invalid.
|
||||
|
||||
## JSON data format
|
||||
|
||||
An example of the full format follows, with detailed description below.
|
||||
To reiterate, consumers of the signature SHOULD perform successful cryptographic verification,
|
||||
and MUST reject unexpected data in the `critical` object, or in the top-level object, as described above.
|
||||
|
||||
```json
|
||||
{
|
||||
"critical": {
|
||||
"type": "atomic container signature",
|
||||
"image": {
|
||||
"docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e"
|
||||
},
|
||||
"identity": {
|
||||
"docker-reference": "docker.io/library/busybox:latest"
|
||||
}
|
||||
},
|
||||
"optional": {
|
||||
"creator": "some software package v1.0.1-35",
|
||||
"timestamp": 1483228800,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `critical`
|
||||
|
||||
This MUST be a JSON object which contains data critical to correctly evaluating the validity of a signature.
|
||||
|
||||
Consumers MUST reject any signature where the `critical` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
|
||||
|
||||
### `critical.type`
|
||||
|
||||
This MUST be a string with a string value exactly equal to `atomic container signature` (three words, including the spaces).
|
||||
|
||||
Signature consumers MUST reject signatures which do not have this member or this member does not have exactly the expected value.
|
||||
|
||||
(The consumers MAY support signatures with a different value of the `type` member, if any is defined in the future;
|
||||
if so, the rest of the JSON document is interpreted according to rules defining that value of `critical.type`,
|
||||
not by this document.)
|
||||
|
||||
### `critical.image`
|
||||
|
||||
This MUST be a JSON object which identifies the container image this signature applies to.
|
||||
|
||||
Consumers MUST reject any signature where the `critical.image` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
|
||||
|
||||
(Currently only the `docker-manifest-digest` way of identifying a container image is defined;
|
||||
alternatives to this may be defined in the future,
|
||||
but existing consumers are required to reject signatures which use formats they do not support.)
|
||||
|
||||
### `critical.image.docker-manifest-digest`
|
||||
|
||||
This MUST be a JSON string, in the `github.com/opencontainers/go-digest.Digest` string format.
|
||||
|
||||
The value of this member MUST match the manifest of the signed container image, as implemented in the docker/distribution manifest addressing system.
|
||||
|
||||
The consumer of the signature SHOULD verify the manifest digest against a fully verified signature before processing the contents of the image manifest in any other way
|
||||
(e.g. parsing the manifest further or downloading layers of the image).
|
||||
|
||||
Implementation notes:
|
||||
* A single container image manifest may have several valid manifest digest values, using different algorithms.
|
||||
* For “signed” [docker/distribution schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) manifests,
|
||||
the manifest digest applies to the payload of the JSON web signature, not to the raw manifest blob.
|
||||
|
||||
### `critical.identity`
|
||||
|
||||
This MUST be a JSON object which identifies the claimed identity of the image (usually the purpose of the image, or the application, along with a version information),
|
||||
as asserted by the author of the signature.
|
||||
|
||||
Consumers MUST reject any signature where the `critical.identity` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
|
||||
|
||||
(Currently only the `docker-reference` way of claiming an image identity/purpose is defined;
|
||||
alternatives to this may be defined in the future,
|
||||
but existing consumers are required to reject signatures which use formats they do not support.)
|
||||
|
||||
### `critical.identity.docker-reference`
|
||||
|
||||
This MUST be a JSON string, in the `github.com/docker/distribution/reference` string format,
|
||||
and using the same normalization semantics (where e.g. `busybox:latest` is equivalent to `docker.io/library/busybox:latest`).
|
||||
If the normalization semantics allows multiple string representations of the claimed identity with equivalent meaning,
|
||||
the `critical.identity.docker-reference` member SHOULD use the fully explicit form (including the full host name and namespaces).
|
||||
|
||||
The value of this member MUST match the image identity/purpose expected by the consumer of the image signature and the image
|
||||
(again, accounting for the `docker/distribution/reference` normalization semantics).
|
||||
|
||||
In the most common case, this means that the `critical.identity.docker-reference` value must be equal to the docker/distribution reference used to refer to or download the image.
|
||||
However, depending on the specific application, users or system administrators may accept less specific matches
|
||||
(e.g. ignoring the tag value in the signature when pulling the `:latest` tag or when referencing an image by digest),
|
||||
or they may require `critical.identity.docker-reference` values with a completely different namespace to the reference used to refer to/download the image
|
||||
(e.g. requiring a `critical.identity.docker-reference` value which identifies the image as coming from a supplier when fetching it from a company-internal mirror of approved images).
|
||||
The software performing this verification SHOULD allow the users to define such a policy using the [policy.json signature verification policy file format](policy.json.md).
|
||||
|
||||
The `critical.identity.docker-reference` value SHOULD contain either a tag or digest;
|
||||
in most cases, it SHOULD use a tag rather than a digest. (See also the default [`matchRepoDigestOrExact` matching semantics in `policy.json`](policy.json.md#signedby).)
|
||||
|
||||
### `optional`
|
||||
|
||||
This MUST be a JSON object.
|
||||
|
||||
Consumers SHOULD accept any members with unrecognized names in the `optional` object,
|
||||
and MAY accept a signature where the object member is recognized but unsupported, or the value of the member is valid but unsupported.
|
||||
Consumers still SHOULD reject any signature where a member of an `optional` object is supported but the value is recognized as invalid.
|
||||
|
||||
### `optional.creator`
|
||||
|
||||
If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature.
|
||||
|
||||
The contents of this string is not defined in detail; however each implementation creating atomic container signatures:
|
||||
|
||||
- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number)
|
||||
- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number)
|
||||
changes whenever the format or semantics of the generated signature changes in any way;
|
||||
it SHOULD not be possible for two implementations which use a different format or semantics to have the same `optional.creator` value
|
||||
- SHOULD use a format which is reasonably easy to parse in software (perhaps using a regexp),
|
||||
and which makes it easy enough to recognize a range of versions of a specific implementation
|
||||
(e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering;
|
||||
the string should contain a version number, or at least a date of the commit).
|
||||
|
||||
Consumers of atomic container signatures MAY recognize specific values or sets of values of `optional.creator`
|
||||
(perhaps augmented with `optional.timestamp`),
|
||||
and MAY change their processing of the signature based on these values
|
||||
(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
|
||||
as long as the semantics of the invalid document, as created by such an implementation, is clear.
|
||||
|
||||
If consumers of signatures do change their behavior based on the `optional.creator` value,
|
||||
they SHOULD take care that the way they process the signatures is not inconsistent with
|
||||
strictly validating signature consumers.
|
||||
(I.e. it is acceptable for a consumer to accept a signature based on a specific `optional.creator` value
|
||||
if other implementations would completely reject the signature,
|
||||
but it would be very undesirable for the two kinds of implementations to accept the signature in different
|
||||
and inconsistent situations.)
|
||||
|
||||
### `optional.timestamp`
|
||||
|
||||
If present, this MUST be a JSON number, which is representable as a 64-bit integer, and identifies the time when the signature was created
|
||||
as the number of seconds since the UNIX epoch (Jan 1 1970 00:00 UTC).
|
||||
283
vendor/github.com/containers/image/docs/containers-policy.json.md
generated
vendored
283
vendor/github.com/containers/image/docs/containers-policy.json.md
generated
vendored
@@ -1,283 +0,0 @@
|
||||
% CONTAINERS-POLICY.JSON(5) policy.json Man Page
|
||||
% Miloslav Trmač
|
||||
% September 2016
|
||||
|
||||
# NAME
|
||||
containers-policy.json - syntax for the signature verification policy file
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Signature verification policy files are used to specify policy, e.g. trusted keys,
|
||||
applicable when deciding whether to accept an image, or individual signatures of that image, as valid.
|
||||
|
||||
The default policy is stored (unless overridden at compile-time) at `/etc/containers/policy.json`;
|
||||
applications performing verification may allow using a different policy instead.
|
||||
|
||||
## FORMAT
|
||||
|
||||
The signature verification policy file, usually called `policy.json`,
|
||||
uses a JSON format. Unlike some other JSON files, its parsing is fairly strict:
|
||||
unrecognized, duplicated or otherwise invalid fields cause the entire file,
|
||||
and usually the entire operation, to be rejected.
|
||||
|
||||
The purpose of the policy file is to define a set of *policy requirements* for a container image,
|
||||
usually depending on its location (where it is being pulled from) or otherwise defined identity.
|
||||
|
||||
Policy requirements can be defined for:
|
||||
|
||||
- An individual *scope* in a *transport*.
|
||||
The *transport* values are the same as the transport prefixes when pushing/pulling images (e.g. `docker:`, `atomic:`),
|
||||
and *scope* values are defined by each transport; see below for more details.
|
||||
|
||||
Usually, a scope can be defined to match a single image, and various prefixes of
|
||||
such a most specific scope define namespaces of matching images.
|
||||
- A default policy for a single transport, expressed using an empty string as a scope
|
||||
- A global default policy.
|
||||
|
||||
If multiple policy requirements match a given image, only the requirements from the most specific match apply,
|
||||
the more general policy requirements definitions are ignored.
|
||||
|
||||
This is expressed in JSON using the top-level syntax
|
||||
```js
|
||||
{
|
||||
"default": [/* policy requirements: global default */]
|
||||
"transports": {
|
||||
transport_name: {
|
||||
"": [/* policy requirements: default for transport $transport_name */],
|
||||
scope_1: [/* policy requirements: default for $scope_1 in $transport_name */],
|
||||
scope_2: [/*…*/]
|
||||
/*…*/
|
||||
},
|
||||
transport_name_2: {/*…*/}
|
||||
/*…*/
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The global `default` set of policy requirements is mandatory; all of the other fields
|
||||
(`transports` itself, any specific transport, the transport-specific default, etc.) are optional.
|
||||
|
||||
<!-- NOTE: Keep this in sync with transports/transports.go! -->
|
||||
## Supported transports and their scopes
|
||||
|
||||
### `atomic:`
|
||||
|
||||
The `atomic:` transport refers to images in an Atomic Registry.
|
||||
|
||||
Supported scopes use the form _hostname_[`:`_port_][`/`_namespace_[`/`_imagestream_ [`:`_tag_]]],
|
||||
i.e. either specifying a complete name of a tagged image, or prefix denoting
|
||||
a host/namespace/image stream.
|
||||
|
||||
*Note:* The _hostname_ and _port_ refer to the Docker registry host and port (the one used
|
||||
e.g. for `docker pull`), _not_ to the OpenShift API host and port.
|
||||
|
||||
### `dir:`
|
||||
|
||||
The `dir:` transport refers to images stored in local directories.
|
||||
|
||||
Supported scopes are paths of directories (either containing a single image or
|
||||
subdirectories possibly containing images).
|
||||
|
||||
*Note:* The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored.
|
||||
|
||||
The top-level scope `"/"` is forbidden; use the transport default scope `""`,
|
||||
for consistency with other transports.
|
||||
|
||||
### `docker:`
|
||||
|
||||
The `docker:` transport refers to images in a registry implementing the "Docker Registry HTTP API V2".
|
||||
|
||||
Scopes matching individual images are named Docker references *in the fully expanded form*, either
|
||||
using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
|
||||
|
||||
More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
|
||||
a repository namespace, or a registry host (by only specifying the host name).
|
||||
|
||||
### `oci:`
|
||||
|
||||
The `oci:` transport refers to images in directories compliant with "Open Container Image Layout Specification".
|
||||
|
||||
Supported scopes use the form _directory_`:`_tag_, and _directory_ referring to
|
||||
a directory containing one or more tags, or any of the parent directories.
|
||||
|
||||
*Note:* See `dir:` above for semantics and restrictions on the directory paths, they apply to `oci:` equivalently.
|
||||
|
||||
### `tarball:`
|
||||
|
||||
The `tarball:` transport refers to tarred up container root filesystems.
|
||||
|
||||
Scopes are ignored.
|
||||
|
||||
## Policy Requirements
|
||||
|
||||
Using the mechanisms above, a set of policy requirements is looked up. The policy requirements
|
||||
are represented as a JSON array of individual requirement objects. For an image to be accepted,
|
||||
*all* of the requirements must be satisfied simulatenously.
|
||||
|
||||
The policy requirements can also be used to decide whether an individual signature is accepted (= is signed by a recognized key of a known author);
|
||||
in that case some requirements may apply only to some signatures, but each signature must be accepted by *at least one* requirement object.
|
||||
|
||||
The following requirement objects are supported:
|
||||
|
||||
### `insecureAcceptAnything`
|
||||
|
||||
A simple requirement with the following syntax
|
||||
|
||||
```json
|
||||
{"type":"insecureAcceptAnything"}
|
||||
```
|
||||
|
||||
This requirement accepts any image (but note that other requirements in the array still apply).
|
||||
|
||||
When deciding to accept an individual signature, this requirement does not have any effect; it does *not* cause the signature to be accepted, though.
|
||||
|
||||
This is useful primarily for policy scopes where no signature verification is required;
|
||||
because the array of policy requirements must not be empty, this requirement is used
|
||||
to represent the lack of requirements explicitly.
|
||||
|
||||
### `reject`
|
||||
|
||||
A simple requirement with the following syntax:
|
||||
|
||||
```json
|
||||
{"type":"reject"}
|
||||
```
|
||||
|
||||
This requirement rejects every image, and every signature.
|
||||
|
||||
### `signedBy`
|
||||
|
||||
This requirement requires an image to be signed with an expected identity, or accepts a signature if it is using an expected identity and key.
|
||||
|
||||
```js
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys", /* The only currently supported value */
|
||||
"keyPath": "/path/to/local/keyring/file",
|
||||
"keyData": "base64-encoded-keyring-data",
|
||||
"signedIdentity": identity_requirement
|
||||
}
|
||||
```
|
||||
<!-- Later: other keyType values -->
|
||||
|
||||
Exactly one of `keyPath` and `keyData` must be present, containing a GPG keyring of one or more public keys. Only signatures made by these keys are accepted.
|
||||
|
||||
The `signedIdentity` field, a JSON object, specifies what image identity the signature claims about the image.
|
||||
One of the following alternatives are supported:
|
||||
|
||||
- The identity in the signature must exactly match the image identity. Note that with this, referencing an image by digest (with a signature claiming a _repository_`:`_tag_ identity) will fail.
|
||||
|
||||
```json
|
||||
{"type":"matchExact"}
|
||||
```
|
||||
- If the image identity carries a tag, the identity in the signature must exactly match;
|
||||
if the image identity uses a digest reference, the identity in the signature must be in the same repository as the image identity (using any tag).
|
||||
|
||||
(Note that with images identified using digest references, the digest from the reference is validated even before signature verification starts.)
|
||||
|
||||
```json
|
||||
{"type":"matchRepoDigestOrExact"}
|
||||
```
|
||||
- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifing an exact image version.
|
||||
|
||||
```json
|
||||
{"type":"matchRepository"}
|
||||
```
|
||||
- The identity in the signature must exactly match a specified identity.
|
||||
This is useful e.g. when locally mirroring images signed using their public identity.
|
||||
|
||||
```js
|
||||
{
|
||||
"type": "exactReference",
|
||||
"dockerReference": docker_reference_value
|
||||
}
|
||||
```
|
||||
- The identity in the signature must be in the same repository as a specified identity.
|
||||
This combines the properties of `matchRepository` and `exactReference`.
|
||||
|
||||
```js
|
||||
{
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": docker_repository_value
|
||||
}
|
||||
```
|
||||
|
||||
If the `signedIdentity` field is missing, it is treated as `matchRepoDigestOrExact`.
|
||||
|
||||
*Note*: `matchExact`, `matchRepoDigestOrExact` and `matchRepository` can be only used if a Docker-like image identity is
|
||||
provided by the transport. In particular, the `dir:` and `oci:` transports can be only
|
||||
used with `exactReference` or `exactRepository`.
|
||||
|
||||
<!-- ### `signedBaseLayer` -->
|
||||
|
||||
## Examples
|
||||
|
||||
It is *strongly* recommended to set the `default` policy to `reject`, and then
|
||||
selectively allow individual transports and scopes as desired.
|
||||
|
||||
### A reasonably locked-down system
|
||||
|
||||
(Note that the `/*`…`*/` comments are not valid in JSON, and must not be used in real policies.)
|
||||
|
||||
```js
|
||||
{
|
||||
"default": [{"type": "reject"}], /* Reject anything not explicitly allowed */
|
||||
"transports": {
|
||||
"docker": {
|
||||
/* Allow installing images from a specific repository namespace, without cryptographic verification.
|
||||
This namespace includes images like openshift/hello-openshift and openshift/origin. */
|
||||
"docker.io/openshift": [{"type": "insecureAcceptAnything"}],
|
||||
/* Similarly, allow installing the “official” busybox images. Note how the fully expanded
|
||||
form, with the explicit /library/, must be used. */
|
||||
"docker.io/library/busybox": [{"type": "insecureAcceptAnything"}]
|
||||
/* Other docker: images use the global default policy and are rejected */
|
||||
},
|
||||
"dir": {
|
||||
"": [{"type": "insecureAcceptAnything"}] /* Allow any images originating in local directories */
|
||||
},
|
||||
"atomic": {
|
||||
/* The common case: using a known key for a repository or set of repositories */
|
||||
"hostname:5000/myns/official": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/path/to/official-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
/* A more complex example, for a repository which contains a mirror of a third-party product,
|
||||
which must be signed-off by local IT */
|
||||
"hostname:5000/vendor/product": [
|
||||
{ /* Require the image to be signed by the original vendor, using the vendor's repository location. */
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/path/to/vendor-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "vendor-hostname/product/repository"
|
||||
}
|
||||
},
|
||||
{ /* Require the image to _also_ be signed by a local reviewer. */
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/path/to/reviewer-pubkey.gpg"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Completely disable security, allow all images, do not trust any signatures
|
||||
|
||||
```json
|
||||
{
|
||||
"default": [{"type": "insecureAcceptAnything"}]
|
||||
}
|
||||
```
|
||||
## SEE ALSO
|
||||
atomic(1)
|
||||
|
||||
## HISTORY
|
||||
August 2018, Rename to containers-policy.json(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
September 2016, Originally compiled by Miloslav Trmač <mitr@redhat.com>
|
||||
56
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
56
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
@@ -1,56 +0,0 @@
|
||||
% CONTAINERS-REGISTRIES.CONF(5) System-wide registry configuration file
|
||||
% Brent Baude
|
||||
% Aug 2017
|
||||
|
||||
# NAME
|
||||
containers-registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The CONTAINERS-REGISTRIES configuration file is a system-wide configuration
|
||||
file for container image registries. The file format is TOML. The valid
|
||||
categories are: 'registries.search', 'registries.insecure', and
|
||||
'registries.block'.
|
||||
|
||||
By default, the configuration file is located at `/etc/containers/registries.conf`.
|
||||
|
||||
# FORMAT
|
||||
The TOML_format is used to build a simple list format for registries under three
|
||||
categories: `registries.search`, `registries.insecure`, and `registries.block`.
|
||||
You can list multiple registries using a comma separated list.
|
||||
|
||||
Search registries are used when the caller of a container runtime does not fully specify the
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Insecure Registries. By default container runtimes use TLS when retrieving images
|
||||
from a registry. If the registry is not setup with TLS, then the container runtime
|
||||
will fail to pull images from the registry. If you add the registry to the list of
|
||||
insecure registries then the container runtime will attempt use standard web protocols to
|
||||
pull the image. It also allows you to pull from a registry with self-signed certificates.
|
||||
Note insecure registries can be used for any registry, not just the registries listed
|
||||
under search.
|
||||
|
||||
Block Registries. The registries in this category are are not pulled from when
|
||||
retrieving images.
|
||||
|
||||
# EXAMPLE
|
||||
The following example configuration defines two searchable registries, one
|
||||
insecure registry, and two blocked registries.
|
||||
|
||||
```
|
||||
[registries.search]
|
||||
registries = ['registry1.com', 'registry2.com']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['registry3.com']
|
||||
|
||||
[registries.block]
|
||||
registries = ['registry.untrusted.com', 'registry.unsafe.com']
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>
|
||||
|
||||
Aug 2017, Originally compiled by Brent Baude <bbaude@redhat.com>
|
||||
128
vendor/github.com/containers/image/docs/containers-registries.d.md
generated
vendored
128
vendor/github.com/containers/image/docs/containers-registries.d.md
generated
vendored
@@ -1,128 +0,0 @@
|
||||
% CONTAINERS-REGISTRIES.D(5) Registries.d Man Page
|
||||
% Miloslav Trmač
|
||||
% August 2016
|
||||
|
||||
# NAME
|
||||
containers-registries.d - Directory for various registries configurations
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
The registries configuration directory contains configuration for various registries
|
||||
(servers storing remote container images), and for content stored in them,
|
||||
so that the configuration does not have to be provided in command-line options over and over for every command,
|
||||
and so that it can be shared by all users of containers/image.
|
||||
|
||||
By default (unless overridden at compile-time), the registries configuration directory is `/etc/containers/registries.d`;
|
||||
applications may allow using a different directory instead.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
The directory may contain any number of files with the extension `.yaml`,
|
||||
each using the YAML format. Other than the mandatory extension, names of the files
|
||||
don’t matter.
|
||||
|
||||
The contents of these files are merged together; to have a well-defined and easy to understand
|
||||
behavior, there can be only one configuration section describing a single namespace within a registry
|
||||
(in particular there can be at most one one `default-docker` section across all files,
|
||||
and there can be at most one instance of any key under the the `docker` section;
|
||||
these sections are documented later).
|
||||
|
||||
Thus, it is forbidden to have two conflicting configurations for a single registry or scope,
|
||||
and it is also forbidden to split a configuration for a single registry or scope across
|
||||
more than one file (even if they are not semantically in conflict).
|
||||
|
||||
## Registries, Scopes and Search Order
|
||||
|
||||
Each YAML file must contain a “YAML mapping” (key-value pairs). Two top-level keys are defined:
|
||||
|
||||
- `default-docker` is the _configuration section_ (as documented below)
|
||||
for registries implementing "Docker Registry HTTP API V2".
|
||||
|
||||
This key is optional.
|
||||
|
||||
- `docker` is a mapping, using individual registries implementing "Docker Registry HTTP API V2",
|
||||
or namespaces and individual images within these registries, as keys;
|
||||
the value assigned to any such key is a _configuration section_.
|
||||
|
||||
This key is optional.
|
||||
|
||||
Scopes matching individual images are named Docker references *in the fully expanded form*, either
|
||||
using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
|
||||
|
||||
More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
|
||||
a repository namespace, or a registry host (and a port if it differs from the default).
|
||||
|
||||
Note that if a registry is accessed using a hostname+port configuration, the port-less hostname
|
||||
is _not_ used as parent scope.
|
||||
|
||||
When searching for a configuration to apply for an individual container image, only
|
||||
the configuration for the most-precisely matching scope is used; configuration using
|
||||
more general scopes is ignored. For example, if _any_ configuration exists for
|
||||
`docker.io/library/busybox`, the configuration for `docker.io` is ignored
|
||||
(even if some element of the configuration is defined for `docker.io` and not for `docker.io/library/busybox`).
|
||||
|
||||
## Individual Configuration Sections
|
||||
|
||||
A single configuration section is selected for a container image using the process
|
||||
described above. The configuration section is a YAML mapping, with the following keys:
|
||||
|
||||
- `sigstore-staging` defines an URL of of the signature storage, used for editing it (adding or deleting signatures).
|
||||
|
||||
This key is optional; if it is missing, `sigstore` below is used.
|
||||
|
||||
- `sigstore` defines an URL of the signature storage.
|
||||
This URL is used for reading existing signatures,
|
||||
and if `sigstore-staging` does not exist, also for adding or removing them.
|
||||
|
||||
This key is optional; if it is missing, no signature storage is defined (no signatures
|
||||
are download along with images, adding new signatures is possible only if `sigstore-staging` is defined).
|
||||
|
||||
## Examples
|
||||
|
||||
### Using Containers from Various Origins
|
||||
|
||||
The following demonstrates how to to consume and run images from various registries and namespaces:
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
registry.database-supplier.com:
|
||||
sigstore: https://sigstore.database-supplier.com
|
||||
distribution.great-middleware.org:
|
||||
sigstore: https://security-team.great-middleware.org/sigstore
|
||||
docker.io/web-framework:
|
||||
sigstore: https://sigstore.web-framework.io:8080
|
||||
```
|
||||
|
||||
### Developing and Signing Containers, Staging Signatures
|
||||
|
||||
For developers in `example.com`:
|
||||
|
||||
- Consume most container images using the public servers also used by clients.
|
||||
- Use a separate sigure storage for an container images in a namespace corresponding to the developers' department, with a staging storage used before publishing signatures.
|
||||
- Craft an individual exception for a single branch a specific developer is working on locally.
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
registry.example.com:
|
||||
sigstore: https://registry-sigstore.example.com
|
||||
registry.example.com/mydepartment:
|
||||
sigstore: https://sigstore.mydepartment.example.com
|
||||
sigstore-staging: file:///mnt/mydepartment/sigstore-staging
|
||||
registry.example.com/mydepartment/myproject:mybranch:
|
||||
sigstore: http://localhost:4242/sigstore
|
||||
sigstore-staging: file:///home/useraccount/webroot/sigstore
|
||||
```
|
||||
|
||||
### A Global Default
|
||||
|
||||
If a company publishes its products using a different domain, and different registry hostname for each of them, it is still possible to use a single signature storage server
|
||||
without listing each domain individually. This is expected to rarely happen, usually only for staging new signatures.
|
||||
|
||||
```yaml
|
||||
default-docker:
|
||||
sigstore-staging: file:///mnt/company/common-sigstore-staging
|
||||
```
|
||||
|
||||
# AUTHORS
|
||||
|
||||
Miloslav Trmač <mitr@redhat.com>
|
||||
136
vendor/github.com/containers/image/docs/signature-protocols.md
generated
vendored
136
vendor/github.com/containers/image/docs/signature-protocols.md
generated
vendored
@@ -1,136 +0,0 @@
|
||||
# Signature access protocols
|
||||
|
||||
The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image.
|
||||
Some image transports (local storage formats and remote procotocols) implement these signatures natively
|
||||
or trivially; for others, the protocol extensions described below are necessary.
|
||||
|
||||
## docker/distribution registries—separate storage
|
||||
|
||||
### Usage
|
||||
|
||||
Any existing docker/distribution registry, whether or not it natively supports signatures,
|
||||
can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](registries.d.md).
|
||||
`registries.d` can be configured to use one storage URL for a whole docker/distribution server,
|
||||
or also separate URLs for smaller namespaces or individual repositories within the server
|
||||
(which e.g. allows image authors to manage their own signature storage while publishing
|
||||
the images on the public `docker.io` server).
|
||||
|
||||
The signature storage URL defines a root of a path hierarchy.
|
||||
It can be either a `file:///…` URL, pointing to a local directory structure,
|
||||
or a `http`/`https` URL, pointing to a remote server.
|
||||
`file:///` signature storage can be both read and written, `http`/`https` only supports reading.
|
||||
|
||||
The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be
|
||||
a simple static web server serving a directory structure created by writing to a `file:///` signature storage.
|
||||
(This of course does not prevent other server implementations,
|
||||
e.g. a HTTP server reading signatures from a database.)
|
||||
|
||||
The usual workflow for producing and distributing images using the separate storage mechanism
|
||||
is to configure the repository in `registries.d` with `sigstore-staging` URL pointing to a private
|
||||
`file:///` staging area, and a `sigstore` URL pointing to a public web server.
|
||||
To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`),
|
||||
and then copy the created directory structure from the `file:///` staging area
|
||||
to a subdirectory of a webroot of the public web server so that they are accessible using the public `sigstore` URL.
|
||||
The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to,
|
||||
set up a `sigstore` URL pointing to the public web server.
|
||||
|
||||
### Path structure
|
||||
|
||||
Given a _base_ signature storage URL configured in `registries.d` as mentioned above,
|
||||
and a container image stored in a docker/distribution registry using the _fully-expanded_ name
|
||||
_hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`,
|
||||
_namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`),
|
||||
signatures are accessed using URLs of the form
|
||||
> _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_
|
||||
|
||||
where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest
|
||||
(i.e. even if the user referenced the image using a tag,
|
||||
the signature storage is always disambiguated using digest references).
|
||||
Note that in the URLs used for signatures,
|
||||
_digest-algo_ and _digest-value_ are separated using the `=` character,
|
||||
not `:` like when acessing the manifest using the docker/distribution API.
|
||||
|
||||
Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1.
|
||||
Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1,
|
||||
and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist.
|
||||
Similarly, to add one more signature to an image, find the first _index_ which does not exist, and
|
||||
then store the new signature using that _index_ value.
|
||||
|
||||
There is no way to list existing signatures other than iterating through the successive _index_ values,
|
||||
and no way to download all of the signatures at once.
|
||||
|
||||
### Examples
|
||||
|
||||
For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e`
|
||||
(or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest),
|
||||
and with a `registries.d` configuration specifying a `sigstore` URL `https://example.com/sigstore` for the same image,
|
||||
the following URLs would be accessed to download all signatures:
|
||||
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1`
|
||||
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2`
|
||||
> - …
|
||||
|
||||
For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same
|
||||
`sigstore` URL, the signatures would be available at
|
||||
> `https://example.com/sigstore/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1`
|
||||
|
||||
and so on.
|
||||
|
||||
## (OpenShift) docker/distribution API extension
|
||||
|
||||
As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides
|
||||
an extension of the docker/distribution API which allows simpler access to the signatures,
|
||||
using only the docker/distribution API endpoint.
|
||||
|
||||
This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint,
|
||||
and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well),
|
||||
and it is the preferred way implement signature storage in registries.
|
||||
|
||||
See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API.
|
||||
|
||||
To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…`
|
||||
path to read an array of signatures. Use only the signature objects
|
||||
which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`;
|
||||
ignore the other fields of the signature object.
|
||||
|
||||
To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`,
|
||||
and `content` set to the signature. Also set `name` to an unique name with the form
|
||||
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL),
|
||||
and _per-image-name_ is any unique identifier.
|
||||
|
||||
To add more than one signature, add them one at a time. This API does not allow deleting signatures.
|
||||
|
||||
Note that because signatures are stored within the cluster-wide image objects,
|
||||
i.e. different namespaces can not associate different sets of signatures to the same image,
|
||||
updating signatures requires a cluster-wide access to the `imagesignatures` resource
|
||||
(by default available to the `system:image-signer` role),
|
||||
|
||||
## OpenShift-embedded registries
|
||||
|
||||
The OpenShift-embedded registry implements the ordinary docker/distribution API,
|
||||
and it also exposes images through the OpenShift REST API (available through the “API master” servers).
|
||||
|
||||
Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension),
|
||||
which is easier to set up and should usually be preferred.
|
||||
Continue reading for details on using older versions of OpenShift.
|
||||
|
||||
As of https://github.com/openshift/origin/pull/9181,
|
||||
signatures are exposed through the OpenShift API
|
||||
(i.e. to access the complete image, it is necessary to use both APIs,
|
||||
in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints).
|
||||
|
||||
To read the signature, any user with access to an image can use the `imagestreamimages` namespaced
|
||||
resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects
|
||||
which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of
|
||||
the `ImageSignature` object.
|
||||
|
||||
To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource,
|
||||
with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form
|
||||
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”),
|
||||
and _per-image-name_ is any unique identifier.
|
||||
|
||||
Note that because signatures are stored within the cluster-wide image objects,
|
||||
i.e. different namespaces can not associate different sets of signatures to the same image,
|
||||
updating signatures requires a cluster-wide access to the `imagesignatures` resource
|
||||
(by default available to the `system:image-signer` role),
|
||||
and deleting signatures is strongly discouraged
|
||||
(it deletes the signature from all namespaces which contain the same image).
|
||||
4
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
4
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@@ -6,10 +6,10 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/internal/iolimits"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@@ -100,7 +100,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
blob, err := ioutil.ReadAll(stream)
|
||||
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/image/oci.go
generated
vendored
4
vendor/github.com/containers/image/image/oci.go
generated
vendored
@@ -3,9 +3,9 @@ package image
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/internal/iolimits"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@@ -65,7 +65,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
defer stream.Close()
|
||||
blob, err := ioutil.ReadAll(stream)
|
||||
blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
60
vendor/github.com/containers/image/internal/iolimits/iolimits.go
generated
vendored
Normal file
60
vendor/github.com/containers/image/internal/iolimits/iolimits.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package iolimits
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// All constants below are intended to be used as limits for `ReadAtMost`. The
|
||||
// immediate use-case for limiting the size of in-memory copied data is to
|
||||
// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of
|
||||
// copying data until running out of memory, we error out after hitting the
|
||||
// specified limit.
|
||||
const (
|
||||
// megaByte denotes one megabyte and is intended to be used as a limit in
|
||||
// `ReadAtMost`.
|
||||
megaByte = 1 << 20
|
||||
// MaxManifestBodySize is the maximum allowed size of a manifest. The limit
|
||||
// of 4 MB aligns with the one of a Docker registry:
|
||||
// https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30
|
||||
MaxManifestBodySize = 4 * megaByte
|
||||
// MaxAuthTokenBodySize is the maximum allowed size of an auth token.
|
||||
// The limit of 1 MB is considered to be greatly sufficient.
|
||||
MaxAuthTokenBodySize = megaByte
|
||||
// MaxSignatureListBodySize is the maximum allowed size of a signature list.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxSignatureListBodySize = 4 * megaByte
|
||||
// MaxSignatureBodySize is the maximum allowed size of a signature.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxSignatureBodySize = 4 * megaByte
|
||||
// MaxErrorBodySize is the maximum allowed size of an error-response body.
|
||||
// The limit of 1 MB is considered to be greatly sufficient.
|
||||
MaxErrorBodySize = megaByte
|
||||
// MaxConfigBodySize is the maximum allowed size of a config blob.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxConfigBodySize = 4 * megaByte
|
||||
// MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body.
|
||||
// The limit of 4 MB is considered to be greatly sufficient.
|
||||
MaxOpenShiftStatusBody = 4 * megaByte
|
||||
// MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images)
|
||||
// The limit of 1 MB is considered to be greatly sufficient.
|
||||
MaxTarFileManifestSize = megaByte
|
||||
)
|
||||
|
||||
// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded.
|
||||
func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
|
||||
limitedReader := io.LimitReader(reader, int64(limit+1))
|
||||
|
||||
res, err := ioutil.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(res) > limit {
|
||||
return nil, errors.Errorf("exceeded maximum allowed size of %d bytes", limit)
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
4
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
4
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@@ -7,13 +7,13 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/docker"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/internal/iolimits"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/image/version"
|
||||
@@ -102,7 +102,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxOpenShiftStatusBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
18
vendor/github.com/mtrmac/gpgme/data.go
generated
vendored
18
vendor/github.com/mtrmac/gpgme/data.go
generated
vendored
@@ -50,25 +50,25 @@ func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
||||
}
|
||||
|
||||
//export gogpgme_seekfunc
|
||||
func gogpgme_seekfunc(handle unsafe.Pointer, offset C.off_t, whence C.int) C.off_t {
|
||||
func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t {
|
||||
d := callbackLookup(uintptr(handle)).(*Data)
|
||||
n, err := d.s.Seek(int64(offset), int(whence))
|
||||
if err != nil {
|
||||
C.gpgme_err_set_errno(C.EIO)
|
||||
return -1
|
||||
}
|
||||
return C.off_t(n)
|
||||
return C.gpgme_off_t(n)
|
||||
}
|
||||
|
||||
// The Data buffer used to communicate with GPGME
|
||||
type Data struct {
|
||||
dh C.gpgme_data_t
|
||||
dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C
|
||||
buf []byte
|
||||
cbs C.struct_gpgme_data_cbs
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
s io.Seeker
|
||||
cbc uintptr
|
||||
cbc uintptr // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh)
|
||||
}
|
||||
|
||||
func newData() *Data {
|
||||
@@ -154,12 +154,14 @@ func (d *Data) Close() error {
|
||||
callbackDelete(d.cbc)
|
||||
}
|
||||
_, err := C.gpgme_data_release(d.dh)
|
||||
runtime.KeepAlive(d)
|
||||
d.dh = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Data) Write(p []byte) (int, error) {
|
||||
n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
|
||||
runtime.KeepAlive(d)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -171,6 +173,7 @@ func (d *Data) Write(p []byte) (int, error) {
|
||||
|
||||
func (d *Data) Read(p []byte) (int, error) {
|
||||
n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
|
||||
runtime.KeepAlive(d)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -181,11 +184,14 @@ func (d *Data) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
func (d *Data) Seek(offset int64, whence int) (int64, error) {
|
||||
n, err := C.gpgme_data_seek(d.dh, C.off_t(offset), C.int(whence))
|
||||
n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence))
|
||||
runtime.KeepAlive(d)
|
||||
return int64(n), err
|
||||
}
|
||||
|
||||
// Name returns the associated filename if any
|
||||
func (d *Data) Name() string {
|
||||
return C.GoString(C.gpgme_data_get_file_name(d.dh))
|
||||
res := C.GoString(C.gpgme_data_get_file_name(d.dh))
|
||||
runtime.KeepAlive(d)
|
||||
return res
|
||||
}
|
||||
|
||||
3
vendor/github.com/mtrmac/gpgme/go.mod
generated
vendored
Normal file
3
vendor/github.com/mtrmac/gpgme/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/mtrmac/gpgme
|
||||
|
||||
go 1.11
|
||||
22
vendor/github.com/mtrmac/gpgme/go_gpgme.c
generated
vendored
22
vendor/github.com/mtrmac/gpgme/go_gpgme.c
generated
vendored
@@ -8,6 +8,28 @@ void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintpt
|
||||
gpgme_set_passphrase_cb(ctx, cb, (void *)handle);
|
||||
}
|
||||
|
||||
gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) {
|
||||
return gpgme_data_seek(dh, offset, whence);
|
||||
}
|
||||
|
||||
gpgme_error_t gogpgme_op_assuan_transact_ext(
|
||||
gpgme_ctx_t ctx,
|
||||
char* cmd,
|
||||
uintptr_t data_h,
|
||||
uintptr_t inquiry_h,
|
||||
uintptr_t status_h,
|
||||
gpgme_error_t *operr
|
||||
){
|
||||
return gpgme_op_assuan_transact_ext(
|
||||
ctx,
|
||||
cmd,
|
||||
(gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, (void *)data_h,
|
||||
(gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, (void *)inquiry_h,
|
||||
(gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, (void *)status_h,
|
||||
operr
|
||||
);
|
||||
}
|
||||
|
||||
unsigned int key_revoked(gpgme_key_t k) {
|
||||
return k->revoked;
|
||||
}
|
||||
|
||||
12
vendor/github.com/mtrmac/gpgme/go_gpgme.h
generated
vendored
12
vendor/github.com/mtrmac/gpgme/go_gpgme.h
generated
vendored
@@ -6,12 +6,24 @@
|
||||
|
||||
#include <gpgme.h>
|
||||
|
||||
/* GPGME_VERSION_NUMBER was introduced in 1.4.0 */
|
||||
#if !defined(GPGME_VERSION_NUMBER) || GPGME_VERSION_NUMBER < 0x010402
|
||||
typedef off_t gpgme_off_t; /* Introduced in 1.4.2 */
|
||||
#endif
|
||||
|
||||
extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size);
|
||||
extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size);
|
||||
extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence);
|
||||
extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd);
|
||||
extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle);
|
||||
extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle);
|
||||
extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence);
|
||||
|
||||
extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, uintptr_t data_h, uintptr_t inquiry_h , uintptr_t status_h, gpgme_error_t *operr);
|
||||
|
||||
extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen );
|
||||
extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args);
|
||||
extern gpgme_error_t gogpgme_assuan_status_callback(void *opaque, char* status, char* args);
|
||||
|
||||
extern unsigned int key_revoked(gpgme_key_t k);
|
||||
extern unsigned int key_expired(gpgme_key_t k);
|
||||
|
||||
346
vendor/github.com/mtrmac/gpgme/gpgme.go
generated
vendored
346
vendor/github.com/mtrmac/gpgme/gpgme.go
generated
vendored
@@ -7,7 +7,6 @@ package gpgme
|
||||
// #include <gpgme.h>
|
||||
// #include "go_gpgme.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -48,9 +47,8 @@ const (
|
||||
ProtocolAssuan Protocol = C.GPGME_PROTOCOL_ASSUAN
|
||||
ProtocolG13 Protocol = C.GPGME_PROTOCOL_G13
|
||||
ProtocolUIServer Protocol = C.GPGME_PROTOCOL_UISERVER
|
||||
// ProtocolSpawn Protocol = C.GPGME_PROTOCOL_SPAWN // Unavailable in 1.4.3
|
||||
ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT
|
||||
ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN
|
||||
ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT
|
||||
ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN
|
||||
)
|
||||
|
||||
type PinEntryMode int
|
||||
@@ -70,7 +68,6 @@ const (
|
||||
EncryptNoEncryptTo EncryptFlag = C.GPGME_ENCRYPT_NO_ENCRYPT_TO
|
||||
EncryptPrepare EncryptFlag = C.GPGME_ENCRYPT_PREPARE
|
||||
EncryptExceptSign EncryptFlag = C.GPGME_ENCRYPT_EXPECT_SIGN
|
||||
// EncryptNoCompress EncryptFlag = C.GPGME_ENCRYPT_NO_COMPRESS // Unavailable in 1.4.3
|
||||
)
|
||||
|
||||
type HashAlgo int
|
||||
@@ -84,7 +81,6 @@ const (
|
||||
KeyListModeExtern KeyListMode = C.GPGME_KEYLIST_MODE_EXTERN
|
||||
KeyListModeSigs KeyListMode = C.GPGME_KEYLIST_MODE_SIGS
|
||||
KeyListModeSigNotations KeyListMode = C.GPGME_KEYLIST_MODE_SIG_NOTATIONS
|
||||
// KeyListModeWithSecret KeyListMode = C.GPGME_KEYLIST_MODE_WITH_SECRET // Unavailable in 1.4.3
|
||||
KeyListModeEphemeral KeyListMode = C.GPGME_KEYLIST_MODE_EPHEMERAL
|
||||
KeyListModeModeValidate KeyListMode = C.GPGME_KEYLIST_MODE_VALIDATE
|
||||
)
|
||||
@@ -168,39 +164,60 @@ func EngineCheckVersion(p Protocol) error {
|
||||
}
|
||||
|
||||
type EngineInfo struct {
|
||||
info C.gpgme_engine_info_t
|
||||
next *EngineInfo
|
||||
protocol Protocol
|
||||
fileName string
|
||||
homeDir string
|
||||
version string
|
||||
requiredVersion string
|
||||
}
|
||||
|
||||
func copyEngineInfo(info C.gpgme_engine_info_t) *EngineInfo {
|
||||
res := &EngineInfo{
|
||||
next: nil,
|
||||
protocol: Protocol(info.protocol),
|
||||
fileName: C.GoString(info.file_name),
|
||||
homeDir: C.GoString(info.home_dir),
|
||||
version: C.GoString(info.version),
|
||||
requiredVersion: C.GoString(info.req_version),
|
||||
}
|
||||
if info.next != nil {
|
||||
res.next = copyEngineInfo(info.next)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (e *EngineInfo) Next() *EngineInfo {
|
||||
if e.info.next == nil {
|
||||
return nil
|
||||
}
|
||||
return &EngineInfo{info: e.info.next}
|
||||
return e.next
|
||||
}
|
||||
|
||||
func (e *EngineInfo) Protocol() Protocol {
|
||||
return Protocol(e.info.protocol)
|
||||
return e.protocol
|
||||
}
|
||||
|
||||
func (e *EngineInfo) FileName() string {
|
||||
return C.GoString(e.info.file_name)
|
||||
return e.fileName
|
||||
}
|
||||
|
||||
func (e *EngineInfo) Version() string {
|
||||
return C.GoString(e.info.version)
|
||||
return e.version
|
||||
}
|
||||
|
||||
func (e *EngineInfo) RequiredVersion() string {
|
||||
return C.GoString(e.info.req_version)
|
||||
return e.requiredVersion
|
||||
}
|
||||
|
||||
func (e *EngineInfo) HomeDir() string {
|
||||
return C.GoString(e.info.home_dir)
|
||||
return e.homeDir
|
||||
}
|
||||
|
||||
func GetEngineInfo() (*EngineInfo, error) {
|
||||
info := &EngineInfo{}
|
||||
return info, handleError(C.gpgme_get_engine_info(&info.info))
|
||||
var cInfo C.gpgme_engine_info_t
|
||||
err := handleError(C.gpgme_get_engine_info(&cInfo))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return copyEngineInfo(cInfo), nil // It is up to the caller not to invalidate cInfo concurrently until this is done.
|
||||
}
|
||||
|
||||
func SetEngineInfo(proto Protocol, fileName, homeDir string) error {
|
||||
@@ -261,9 +278,9 @@ type Context struct {
|
||||
KeyError error
|
||||
|
||||
callback Callback
|
||||
cbc uintptr
|
||||
cbc uintptr // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx)
|
||||
|
||||
ctx C.gpgme_ctx_t
|
||||
ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C
|
||||
}
|
||||
|
||||
func New() (*Context, error) {
|
||||
@@ -281,49 +298,68 @@ func (c *Context) Release() {
|
||||
callbackDelete(c.cbc)
|
||||
}
|
||||
C.gpgme_release(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
c.ctx = nil
|
||||
}
|
||||
|
||||
func (c *Context) SetArmor(yes bool) {
|
||||
C.gpgme_set_armor(c.ctx, cbool(yes))
|
||||
runtime.KeepAlive(c)
|
||||
}
|
||||
|
||||
func (c *Context) Armor() bool {
|
||||
return C.gpgme_get_armor(c.ctx) != 0
|
||||
res := C.gpgme_get_armor(c.ctx) != 0
|
||||
runtime.KeepAlive(c)
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Context) SetTextMode(yes bool) {
|
||||
C.gpgme_set_textmode(c.ctx, cbool(yes))
|
||||
runtime.KeepAlive(c)
|
||||
}
|
||||
|
||||
func (c *Context) TextMode() bool {
|
||||
return C.gpgme_get_textmode(c.ctx) != 0
|
||||
res := C.gpgme_get_textmode(c.ctx) != 0
|
||||
runtime.KeepAlive(c)
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Context) SetProtocol(p Protocol) error {
|
||||
return handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p)))
|
||||
err := handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p)))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) Protocol() Protocol {
|
||||
return Protocol(C.gpgme_get_protocol(c.ctx))
|
||||
res := Protocol(C.gpgme_get_protocol(c.ctx))
|
||||
runtime.KeepAlive(c)
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Context) SetKeyListMode(m KeyListMode) error {
|
||||
return handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m)))
|
||||
err := handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m)))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) KeyListMode() KeyListMode {
|
||||
return KeyListMode(C.gpgme_get_keylist_mode(c.ctx))
|
||||
res := KeyListMode(C.gpgme_get_keylist_mode(c.ctx))
|
||||
runtime.KeepAlive(c)
|
||||
return res
|
||||
}
|
||||
|
||||
// Unavailable in 1.3.2:
|
||||
// func (c *Context) SetPinEntryMode(m PinEntryMode) error {
|
||||
// return handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
|
||||
// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
|
||||
// runtime.KeepAlive(c)
|
||||
// return err
|
||||
// }
|
||||
|
||||
// Unavailable in 1.3.2:
|
||||
// func (c *Context) PinEntryMode() PinEntryMode {
|
||||
// return PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
|
||||
// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
|
||||
// runtime.KeepAlive(c)
|
||||
// return res
|
||||
// }
|
||||
|
||||
func (c *Context) SetCallback(callback Callback) error {
|
||||
@@ -340,11 +376,17 @@ func (c *Context) SetCallback(callback Callback) error {
|
||||
c.cbc = 0
|
||||
_, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0)
|
||||
}
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) EngineInfo() *EngineInfo {
|
||||
return &EngineInfo{info: C.gpgme_ctx_get_engine_info(c.ctx)}
|
||||
cInfo := C.gpgme_ctx_get_engine_info(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
// NOTE: c must be live as long as we are accessing cInfo.
|
||||
res := copyEngineInfo(cInfo)
|
||||
runtime.KeepAlive(c) // for accesses to cInfo
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error {
|
||||
@@ -357,19 +399,23 @@ func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error
|
||||
chome = C.CString(homeDir)
|
||||
defer C.free(unsafe.Pointer(chome))
|
||||
}
|
||||
return handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome))
|
||||
err := handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) KeyListStart(pattern string, secretOnly bool) error {
|
||||
cpattern := C.CString(pattern)
|
||||
defer C.free(unsafe.Pointer(cpattern))
|
||||
err := C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly))
|
||||
return handleError(err)
|
||||
err := handleError(C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly)))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) KeyListNext() bool {
|
||||
c.Key = newKey()
|
||||
err := handleError(C.gpgme_op_keylist_next(c.ctx, &c.Key.k))
|
||||
runtime.KeepAlive(c) // implies runtime.KeepAlive(c.Key)
|
||||
if err != nil {
|
||||
if e, ok := err.(Error); ok && e.Code() == ErrorEOF {
|
||||
c.KeyError = nil
|
||||
@@ -383,7 +429,9 @@ func (c *Context) KeyListNext() bool {
|
||||
}
|
||||
|
||||
func (c *Context) KeyListEnd() error {
|
||||
return handleError(C.gpgme_op_keylist_end(c.ctx))
|
||||
err := handleError(C.gpgme_op_keylist_end(c.ctx))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) {
|
||||
@@ -391,7 +439,11 @@ func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) {
|
||||
cfpr := C.CString(fingerprint)
|
||||
defer C.free(unsafe.Pointer(cfpr))
|
||||
err := handleError(C.gpgme_get_key(c.ctx, cfpr, &key.k, cbool(secret)))
|
||||
if e, ok := err.(Error); key.k == nil && ok && e.Code() == ErrorEOF {
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(key)
|
||||
keyKIsNil := key.k == nil
|
||||
runtime.KeepAlive(key)
|
||||
if e, ok := err.(Error); keyKIsNil && ok && e.Code() == ErrorEOF {
|
||||
return nil, fmt.Errorf("key %q not found", fingerprint)
|
||||
}
|
||||
if err != nil {
|
||||
@@ -401,11 +453,19 @@ func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) {
|
||||
}
|
||||
|
||||
func (c *Context) Decrypt(ciphertext, plaintext *Data) error {
|
||||
return handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh))
|
||||
err := handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(ciphertext)
|
||||
runtime.KeepAlive(plaintext)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Context) DecryptVerify(ciphertext, plaintext *Data) error {
|
||||
return handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh))
|
||||
err := handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(ciphertext)
|
||||
runtime.KeepAlive(plaintext)
|
||||
return err
|
||||
}
|
||||
|
||||
type Signature struct {
|
||||
@@ -432,10 +492,20 @@ func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, err
|
||||
plainPtr = plain.dh
|
||||
}
|
||||
err := handleError(C.gpgme_op_verify(c.ctx, sig.dh, signedTextPtr, plainPtr))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(sig)
|
||||
if signedText != nil {
|
||||
runtime.KeepAlive(signedText)
|
||||
}
|
||||
if plain != nil {
|
||||
runtime.KeepAlive(plain)
|
||||
}
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
res := C.gpgme_op_verify_result(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
// NOTE: c must be live as long as we are accessing res.
|
||||
sigs := []Signature{}
|
||||
for s := res.signatures; s != nil; s = s.next {
|
||||
sig := Signature{
|
||||
@@ -455,7 +525,9 @@ func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, err
|
||||
}
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
return C.GoString(res.file_name), sigs, nil
|
||||
fileName := C.GoString(res.file_name)
|
||||
runtime.KeepAlive(c) // for all accesses to res above
|
||||
return fileName, sigs, nil
|
||||
}
|
||||
|
||||
func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphertext *Data) error {
|
||||
@@ -467,18 +539,116 @@ func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphe
|
||||
*ptr = recipients[i].k
|
||||
}
|
||||
err := C.gpgme_op_encrypt(c.ctx, (*C.gpgme_key_t)(recp), C.gpgme_encrypt_flags_t(flags), plaintext.dh, ciphertext.dh)
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(recipients)
|
||||
runtime.KeepAlive(plaintext)
|
||||
runtime.KeepAlive(ciphertext)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error {
|
||||
C.gpgme_signers_clear(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
for _, k := range signers {
|
||||
if err := handleError(C.gpgme_signers_add(c.ctx, k.k)); err != nil {
|
||||
err := handleError(C.gpgme_signers_add(c.ctx, k.k))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(k)
|
||||
if err != nil {
|
||||
C.gpgme_signers_clear(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode)))
|
||||
err := handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode)))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(plain)
|
||||
runtime.KeepAlive(sig)
|
||||
return err
|
||||
}
|
||||
|
||||
type AssuanDataCallback func(data []byte) error
|
||||
type AssuanInquireCallback func(name, args string) error
|
||||
type AssuanStatusCallback func(status, args string) error
|
||||
|
||||
// AssuanSend sends a raw Assuan command to gpg-agent
|
||||
func (c *Context) AssuanSend(
|
||||
cmd string,
|
||||
data AssuanDataCallback,
|
||||
inquiry AssuanInquireCallback,
|
||||
status AssuanStatusCallback,
|
||||
) error {
|
||||
var operr C.gpgme_error_t
|
||||
|
||||
dataPtr := callbackAdd(&data)
|
||||
inquiryPtr := callbackAdd(&inquiry)
|
||||
statusPtr := callbackAdd(&status)
|
||||
cmdCStr := C.CString(cmd)
|
||||
defer C.free(unsafe.Pointer(cmdCStr))
|
||||
err := C.gogpgme_op_assuan_transact_ext(
|
||||
c.ctx,
|
||||
cmdCStr,
|
||||
C.uintptr_t(dataPtr),
|
||||
C.uintptr_t(inquiryPtr),
|
||||
C.uintptr_t(statusPtr),
|
||||
&operr,
|
||||
)
|
||||
runtime.KeepAlive(c)
|
||||
|
||||
if handleError(operr) != nil {
|
||||
return handleError(operr)
|
||||
}
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
//export gogpgme_assuan_data_callback
|
||||
func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t {
|
||||
c := callbackLookup(uintptr(handle)).(*AssuanDataCallback)
|
||||
if *c == nil {
|
||||
return 0
|
||||
}
|
||||
(*c)(C.GoBytes(data, C.int(datalen)))
|
||||
return 0
|
||||
}
|
||||
|
||||
//export gogpgme_assuan_inquiry_callback
|
||||
func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t {
|
||||
name := C.GoString(cName)
|
||||
args := C.GoString(cArgs)
|
||||
c := callbackLookup(uintptr(handle)).(*AssuanInquireCallback)
|
||||
if *c == nil {
|
||||
return 0
|
||||
}
|
||||
(*c)(name, args)
|
||||
return 0
|
||||
}
|
||||
|
||||
//export gogpgme_assuan_status_callback
|
||||
func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t {
|
||||
status := C.GoString(cStatus)
|
||||
args := C.GoString(cArgs)
|
||||
c := callbackLookup(uintptr(handle)).(*AssuanStatusCallback)
|
||||
if *c == nil {
|
||||
return 0
|
||||
}
|
||||
(*c)(status, args)
|
||||
return 0
|
||||
}
|
||||
|
||||
// ExportModeFlags defines how keys are exported from Export
|
||||
type ExportModeFlags uint
|
||||
|
||||
const (
|
||||
ExportModeExtern ExportModeFlags = C.GPGME_EXPORT_MODE_EXTERN
|
||||
ExportModeMinimal ExportModeFlags = C.GPGME_EXPORT_MODE_MINIMAL
|
||||
)
|
||||
|
||||
func (c *Context) Export(pattern string, mode ExportModeFlags, data *Data) error {
|
||||
pat := C.CString(pattern)
|
||||
defer C.free(unsafe.Pointer(pat))
|
||||
err := handleError(C.gpgme_op_export(c.ctx, pat, C.gpgme_export_mode_t(mode), data.dh))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(data)
|
||||
return err
|
||||
}
|
||||
|
||||
// ImportStatusFlags describes the type of ImportStatus.Status. The C API in gpgme.h simply uses "unsigned".
|
||||
@@ -517,10 +687,14 @@ type ImportResult struct {
|
||||
|
||||
func (c *Context) Import(keyData *Data) (*ImportResult, error) {
|
||||
err := handleError(C.gpgme_op_import(c.ctx, keyData.dh))
|
||||
runtime.KeepAlive(c)
|
||||
runtime.KeepAlive(keyData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := C.gpgme_op_import_result(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
// NOTE: c must be live as long as we are accessing res.
|
||||
imports := []ImportStatus{}
|
||||
for s := res.imports; s != nil; s = s.next {
|
||||
imports = append(imports, ImportStatus{
|
||||
@@ -529,7 +703,7 @@ func (c *Context) Import(keyData *Data) (*ImportResult, error) {
|
||||
Status: ImportStatusFlags(s.status),
|
||||
})
|
||||
}
|
||||
return &ImportResult{
|
||||
importResult := &ImportResult{
|
||||
Considered: int(res.considered),
|
||||
NoUserID: int(res.no_user_id),
|
||||
Imported: int(res.imported),
|
||||
@@ -544,11 +718,13 @@ func (c *Context) Import(keyData *Data) (*ImportResult, error) {
|
||||
SecretUnchanged: int(res.secret_unchanged),
|
||||
NotImported: int(res.not_imported),
|
||||
Imports: imports,
|
||||
}, nil
|
||||
}
|
||||
runtime.KeepAlive(c) // for all accesses to res above
|
||||
return importResult, nil
|
||||
}
|
||||
|
||||
type Key struct {
|
||||
k C.gpgme_key_t
|
||||
k C.gpgme_key_t // WARNING: Call Runtime.KeepAlive(k) after ANY passing of k.k to C
|
||||
}
|
||||
|
||||
func newKey() *Key {
|
||||
@@ -559,85 +735,122 @@ func newKey() *Key {
|
||||
|
||||
func (k *Key) Release() {
|
||||
C.gpgme_key_release(k.k)
|
||||
runtime.KeepAlive(k)
|
||||
k.k = nil
|
||||
}
|
||||
|
||||
func (k *Key) Revoked() bool {
|
||||
return C.key_revoked(k.k) != 0
|
||||
res := C.key_revoked(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Expired() bool {
|
||||
return C.key_expired(k.k) != 0
|
||||
res := C.key_expired(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Disabled() bool {
|
||||
return C.key_disabled(k.k) != 0
|
||||
res := C.key_disabled(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Invalid() bool {
|
||||
return C.key_invalid(k.k) != 0
|
||||
res := C.key_invalid(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) CanEncrypt() bool {
|
||||
return C.key_can_encrypt(k.k) != 0
|
||||
res := C.key_can_encrypt(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) CanSign() bool {
|
||||
return C.key_can_sign(k.k) != 0
|
||||
res := C.key_can_sign(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) CanCertify() bool {
|
||||
return C.key_can_certify(k.k) != 0
|
||||
res := C.key_can_certify(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Secret() bool {
|
||||
return C.key_secret(k.k) != 0
|
||||
res := C.key_secret(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) CanAuthenticate() bool {
|
||||
return C.key_can_authenticate(k.k) != 0
|
||||
res := C.key_can_authenticate(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) IsQualified() bool {
|
||||
return C.key_is_qualified(k.k) != 0
|
||||
res := C.key_is_qualified(k.k) != 0
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) Protocol() Protocol {
|
||||
return Protocol(k.k.protocol)
|
||||
res := Protocol(k.k.protocol)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) IssuerSerial() string {
|
||||
return C.GoString(k.k.issuer_serial)
|
||||
res := C.GoString(k.k.issuer_serial)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) IssuerName() string {
|
||||
return C.GoString(k.k.issuer_name)
|
||||
res := C.GoString(k.k.issuer_name)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) ChainID() string {
|
||||
return C.GoString(k.k.chain_id)
|
||||
res := C.GoString(k.k.chain_id)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) OwnerTrust() Validity {
|
||||
return Validity(k.k.owner_trust)
|
||||
res := Validity(k.k.owner_trust)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
func (k *Key) SubKeys() *SubKey {
|
||||
if k.k.subkeys == nil {
|
||||
subKeys := k.k.subkeys
|
||||
runtime.KeepAlive(k)
|
||||
if subKeys == nil {
|
||||
return nil
|
||||
}
|
||||
return &SubKey{k: k.k.subkeys, parent: k}
|
||||
return &SubKey{k: subKeys, parent: k} // The parent: k reference ensures subKeys remains valid
|
||||
}
|
||||
|
||||
func (k *Key) UserIDs() *UserID {
|
||||
if k.k.uids == nil {
|
||||
uids := k.k.uids
|
||||
runtime.KeepAlive(k)
|
||||
if uids == nil {
|
||||
return nil
|
||||
}
|
||||
return &UserID{u: k.k.uids, parent: k}
|
||||
return &UserID{u: uids, parent: k} // The parent: k reference ensures uids remains valid
|
||||
}
|
||||
|
||||
func (k *Key) KeyListMode() KeyListMode {
|
||||
return KeyListMode(k.k.keylist_mode)
|
||||
res := KeyListMode(k.k.keylist_mode)
|
||||
runtime.KeepAlive(k)
|
||||
return res
|
||||
}
|
||||
|
||||
type SubKey struct {
|
||||
@@ -737,12 +950,3 @@ func (u *UserID) Comment() string {
|
||||
func (u *UserID) Email() string {
|
||||
return C.GoString(u.u.email)
|
||||
}
|
||||
|
||||
// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG.
|
||||
// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved
|
||||
// - and cgo can't be used in tests. So, provide this helper for test initialization.
|
||||
func unsetenvGPGAgentInfo() {
|
||||
v := C.CString("GPG_AGENT_INFO")
|
||||
defer C.free(unsafe.Pointer(v))
|
||||
C.unsetenv(v)
|
||||
}
|
||||
|
||||
18
vendor/github.com/mtrmac/gpgme/unset_agent_info.go
generated
vendored
Normal file
18
vendor/github.com/mtrmac/gpgme/unset_agent_info.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// +build !windows
|
||||
|
||||
package gpgme
|
||||
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG.
|
||||
// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved
|
||||
// - and cgo can't be used in tests. So, provide this helper for test initialization.
|
||||
func unsetenvGPGAgentInfo() {
|
||||
v := C.CString("GPG_AGENT_INFO")
|
||||
defer C.free(unsafe.Pointer(v))
|
||||
C.unsetenv(v)
|
||||
}
|
||||
14
vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go
generated
vendored
Normal file
14
vendor/github.com/mtrmac/gpgme/unset_agent_info_windows.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
package gpgme
|
||||
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// unsetenv is not available in mingw
|
||||
func unsetenvGPGAgentInfo() {
|
||||
v := C.CString("GPG_AGENT_INFO=")
|
||||
defer C.free(unsafe.Pointer(v))
|
||||
C.putenv(v)
|
||||
}
|
||||
Reference in New Issue
Block a user