mirror of
https://github.com/containers/skopeo.git
synced 2025-06-28 07:37:41 +00:00
Vendor after merging mtrmac/image:openshift-native-signatures and update API use
Update copy.go for signature implementation change Now we need to push the manifest first, and only afterwards the signatures.
This commit is contained in:
parent
ecc745d124
commit
c9fbb6c1ab
@ -71,14 +71,14 @@ func copyHandler(context *cli.Context) error {
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
if err := dest.PutSignatures(sigs); err != nil {
|
||||
return fmt.Errorf("Error writing signatures: %v", err)
|
||||
}
|
||||
|
||||
// FIXME: We need to call PutManifest after PutBlob and PutSignatures. This seems ugly; move to a "set properties" + "commit" model?
|
||||
// FIXME: We need to call PutManifest after PutBlob and before PutSignatures. This seems ugly; move to a "set properties" + "commit" model?
|
||||
if err := dest.PutManifest(manifest); err != nil {
|
||||
return fmt.Errorf("Error writing manifest: %v", err)
|
||||
}
|
||||
|
||||
if err := dest.PutSignatures(sigs); err != nil {
|
||||
return fmt.Errorf("Error writing signatures: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
23
vendor/github.com/containers/image/directory/directory.go
generated
vendored
23
vendor/github.com/containers/image/directory/directory.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// manifestPath returns a path for the manifest within a directory using our conventions.
|
||||
func manifestPath(dir string) string {
|
||||
return filepath.Join(dir, "manifest.json")
|
||||
}
|
||||
|
||||
// layerPath returns a path for a layer tarball within a directory using our conventions.
|
||||
func layerPath(dir string, digest string) string {
|
||||
// FIXME: Should we keep the digest identification?
|
||||
return filepath.Join(dir, strings.TrimPrefix(digest, "sha256:")+".tar")
|
||||
}
|
||||
|
||||
// signaturePath returns a path for a signature within a directory using our conventions.
|
||||
func signaturePath(dir string, index int) string {
|
||||
return filepath.Join(dir, fmt.Sprintf("signature-%d", index+1))
|
||||
}
|
6
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
6
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@ -28,11 +28,11 @@ func (d *dirImageDestination) SupportedManifestMIMETypes() []string {
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) PutManifest(manifest []byte) error {
|
||||
return ioutil.WriteFile(manifestPath(d.ref.path), manifest, 0644)
|
||||
return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644)
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) PutBlob(digest string, stream io.Reader) error {
|
||||
layerFile, err := os.Create(layerPath(d.ref.path, digest))
|
||||
layerFile, err := os.Create(d.ref.layerPath(digest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -48,7 +48,7 @@ func (d *dirImageDestination) PutBlob(digest string, stream io.Reader) error {
|
||||
|
||||
func (d *dirImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
for i, sig := range signatures {
|
||||
if err := ioutil.WriteFile(signaturePath(d.ref.path, i), sig, 0644); err != nil {
|
||||
if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
8
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
8
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@ -26,7 +26,7 @@ func (s *dirImageSource) Reference() types.ImageReference {
|
||||
|
||||
// it's up to the caller to determine the MIME type of the returned manifest's bytes
|
||||
func (s *dirImageSource) GetManifest(_ []string) ([]byte, string, error) {
|
||||
m, err := ioutil.ReadFile(manifestPath(s.ref.path))
|
||||
m, err := ioutil.ReadFile(s.ref.manifestPath())
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@ -34,11 +34,11 @@ func (s *dirImageSource) GetManifest(_ []string) ([]byte, string, error) {
|
||||
}
|
||||
|
||||
func (s *dirImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) {
|
||||
r, err := os.Open(layerPath(s.ref.path, digest))
|
||||
r, err := os.Open(s.ref.layerPath(digest))
|
||||
if err != nil {
|
||||
return nil, 0, nil
|
||||
}
|
||||
fi, err := os.Stat(layerPath(s.ref.path, digest))
|
||||
fi, err := r.Stat()
|
||||
if err != nil {
|
||||
return nil, 0, nil
|
||||
}
|
||||
@ -48,7 +48,7 @@ func (s *dirImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) {
|
||||
func (s *dirImageSource) GetSignatures() ([][]byte, error) {
|
||||
signatures := [][]byte{}
|
||||
for i := 0; ; i++ {
|
||||
signature, err := ioutil.ReadFile(signaturePath(s.ref.path, i))
|
||||
signature, err := ioutil.ReadFile(s.ref.signaturePath(i))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
break
|
||||
|
17
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
17
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
@ -3,6 +3,7 @@ package directory
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/directory/explicitfilepath"
|
||||
@ -137,3 +138,19 @@ func (ref dirReference) NewImageSource(certPath string, tlsVerify bool) (types.I
|
||||
func (ref dirReference) NewImageDestination(certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
||||
return newImageDestination(ref), nil
|
||||
}
|
||||
|
||||
// manifestPath returns a path for the manifest within a directory using our conventions.
|
||||
func (ref dirReference) manifestPath() string {
|
||||
return filepath.Join(ref.path, "manifest.json")
|
||||
}
|
||||
|
||||
// layerPath returns a path for a layer tarball within a directory using our conventions.
|
||||
func (ref dirReference) layerPath(digest string) string {
|
||||
// FIXME: Should we keep the digest identification?
|
||||
return filepath.Join(ref.path, strings.TrimPrefix(digest, "sha256:")+".tar")
|
||||
}
|
||||
|
||||
// signaturePath returns a path for a signature within a directory using our conventions.
|
||||
func (ref dirReference) signaturePath(index int) string {
|
||||
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
|
||||
}
|
||||
|
53
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
53
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@ -137,41 +137,38 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
return nil
|
||||
case "Bearer":
|
||||
res, err := c.client.Do(req)
|
||||
// FIXME? This gets a new token for every API request;
|
||||
// we may be easily able to reuse a previous token, e.g.
|
||||
// for OpenShift the token only identifies the user and does not vary
|
||||
// across operations. Should we just try the request first, and
|
||||
// only get a new token on failure?
|
||||
// OTOH what to do with the single-use body stream in that case?
|
||||
|
||||
// Try performing the request, expecting it to fail.
|
||||
testReq := *req
|
||||
// Do not use the body stream, or we couldn't reuse it for the "real" call later.
|
||||
testReq.Body = nil
|
||||
testReq.ContentLength = 0
|
||||
res, err := c.client.Do(&testReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr := res.Header.Get("WWW-Authenticate")
|
||||
if hdr == "" || res.StatusCode != http.StatusUnauthorized {
|
||||
chs := parseAuthHeader(res.Header)
|
||||
if res.StatusCode != http.StatusUnauthorized || chs == nil || len(chs) == 0 {
|
||||
// no need for bearer? wtf?
|
||||
return nil
|
||||
}
|
||||
tokens = strings.Split(hdr, " ")
|
||||
tokens = strings.Split(tokens[1], ",")
|
||||
var realm, service, scope string
|
||||
for _, token := range tokens {
|
||||
if strings.HasPrefix(token, "realm") {
|
||||
realm = strings.Trim(token[len("realm="):], "\"")
|
||||
}
|
||||
if strings.HasPrefix(token, "service") {
|
||||
service = strings.Trim(token[len("service="):], "\"")
|
||||
}
|
||||
if strings.HasPrefix(token, "scope") {
|
||||
scope = strings.Trim(token[len("scope="):], "\"")
|
||||
}
|
||||
// Arbitrarily use the first challenge, there is no reason to expect more than one.
|
||||
challenge := chs[0]
|
||||
if challenge.Scheme != "bearer" { // Another artifact of trying to handle WWW-Authenticate before it actually happens.
|
||||
return fmt.Errorf("Unimplemented: WWW-Authenticate Bearer replaced by %#v", challenge.Scheme)
|
||||
}
|
||||
|
||||
if realm == "" {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return fmt.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
if service == "" {
|
||||
return fmt.Errorf("missing service in bearer auth challenge")
|
||||
}
|
||||
// The scope can be empty if we're not getting a token for a specific repo
|
||||
//if scope == "" && repo != "" {
|
||||
if scope == "" {
|
||||
return fmt.Errorf("missing scope in bearer auth challenge")
|
||||
}
|
||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||
scope, _ := challenge.Parameters["scope"] // Will be "" if not present
|
||||
token, err := c.getBearerToken(realm, service, scope)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -189,7 +186,9 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err
|
||||
return "", err
|
||||
}
|
||||
getParams := authReq.URL.Query()
|
||||
getParams.Add("service", service)
|
||||
if service != "" {
|
||||
getParams.Add("service", service)
|
||||
}
|
||||
if scope != "" {
|
||||
getParams.Add("scope", scope)
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
4
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@ -59,7 +59,7 @@ func simplifyContentType(contentType string) string {
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) GetManifest(mimetypes []string) ([]byte, string, error) {
|
||||
reference, err := tagOrDigest(s.ref.ref)
|
||||
reference, err := s.ref.tagOrDigest()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@ -114,7 +114,7 @@ func (s *dockerImageSource) Delete() error {
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = []string{manifest.DockerV2Schema2MIMEType}
|
||||
|
||||
reference, err := tagOrDigest(s.ref.ref)
|
||||
reference, err := s.ref.tagOrDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
12
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
12
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
@ -129,3 +129,15 @@ func (ref dockerReference) NewImageSource(certPath string, tlsVerify bool) (type
|
||||
func (ref dockerReference) NewImageDestination(certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
||||
return newImageDestination(ref, certPath, tlsVerify)
|
||||
}
|
||||
|
||||
// tagOrDigest returns a tag or digest from the reference.
|
||||
func (ref dockerReference) tagOrDigest() (string, error) {
|
||||
if ref, ok := ref.ref.(reference.Canonical); ok {
|
||||
return ref.Digest().String(), nil
|
||||
}
|
||||
if ref, ok := ref.ref.(reference.NamedTagged); ok {
|
||||
return ref.Tag(), nil
|
||||
}
|
||||
// This should not happen, NewReference above refuses reference.IsNameOnly values.
|
||||
return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", ref.ref.String())
|
||||
}
|
||||
|
18
vendor/github.com/containers/image/docker/docker_utils.go
generated
vendored
18
vendor/github.com/containers/image/docker/docker_utils.go
generated
vendored
@ -1,18 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/reference"
|
||||
)
|
||||
|
||||
// tagOrDigest returns a tag or digest from a reference for which !reference.IsNameOnly.
|
||||
func tagOrDigest(ref reference.Named) (string, error) {
|
||||
if ref, ok := ref.(reference.Canonical); ok {
|
||||
return ref.Digest().String(), nil
|
||||
}
|
||||
if ref, ok := ref.(reference.NamedTagged); ok {
|
||||
return ref.Tag(), nil
|
||||
}
|
||||
return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", ref.String())
|
||||
}
|
159
vendor/github.com/containers/image/docker/wwwauthenticate.go
generated
vendored
Normal file
159
vendor/github.com/containers/image/docker/wwwauthenticate.go
generated
vendored
Normal file
@ -0,0 +1,159 @@
|
||||
package docker
|
||||
|
||||
// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// challenge carries information from a WWW-Authenticate response header.
|
||||
// See RFC 7235.
|
||||
type challenge struct {
|
||||
// Scheme is the auth-scheme according to RFC 7235
|
||||
Scheme string
|
||||
|
||||
// Parameters are the auth-params according to RFC 7235
|
||||
Parameters map[string]string
|
||||
}
|
||||
|
||||
// Octet types from RFC 7230.
|
||||
type octetType byte
|
||||
|
||||
var octetTypes [256]octetType
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
|
||||
if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
func parseAuthHeader(header http.Header) []challenge {
|
||||
challenges := []challenge{}
|
||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
||||
v, p := parseValueAndParams(h)
|
||||
if v != "" {
|
||||
challenges = append(challenges, challenge{Scheme: v, Parameters: p})
|
||||
}
|
||||
}
|
||||
return challenges
|
||||
}
|
||||
|
||||
// NOTE: This is not a fully compliant parser per RFC 7235:
|
||||
// Most notably it does not support more than one challenge within a single header
|
||||
// Some of the whitespace parsing also seems noncompliant.
|
||||
// But it is clearly better than what we used to have…
|
||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
||||
params = make(map[string]string)
|
||||
value, s := expectToken(header)
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
value = strings.ToLower(value)
|
||||
s = "," + skipSpace(s)
|
||||
for strings.HasPrefix(s, ",") {
|
||||
var pkey string
|
||||
pkey, s = expectToken(skipSpace(s[1:]))
|
||||
if pkey == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return
|
||||
}
|
||||
var pvalue string
|
||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||
if pvalue == "" {
|
||||
return
|
||||
}
|
||||
pkey = strings.ToLower(pkey)
|
||||
params[pkey] = pvalue
|
||||
s = skipSpace(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isToken == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return expectToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
43
vendor/github.com/containers/image/oci/oci_dest.go
generated
vendored
43
vendor/github.com/containers/image/oci/oci_dest.go
generated
vendored
@ -7,7 +7,6 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
@ -77,9 +76,6 @@ func createManifest(m []byte) ([]byte, string, error) {
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) PutManifest(m []byte) error {
|
||||
if err := d.ensureParentDirectoryExists("refs"); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(mitr, runcom): this breaks signatures entirely since at this point we're creating a new manifest
|
||||
// and signatures don't apply anymore. Will fix.
|
||||
ociMan, mt, err := createManifest(m)
|
||||
@ -100,21 +96,26 @@ func (d *ociImageDestination) PutManifest(m []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(blobPath(d.ref.dir, digest), ociMan, 0644); err != nil {
|
||||
if err := ioutil.WriteFile(d.ref.blobPath(digest), ociMan, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(runcom): ugly here?
|
||||
if err := ioutil.WriteFile(ociLayoutPath(d.ref.dir), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
||||
if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(descriptorPath(d.ref.dir, d.ref.tag), data, 0644)
|
||||
descriptorPath := d.ref.descriptorPath(d.ref.tag)
|
||||
if err := ensureParentDirectoryExists(descriptorPath); err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(descriptorPath, data, 0644)
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) PutBlob(digest string, stream io.Reader) error {
|
||||
if err := d.ensureParentDirectoryExists("blobs"); err != nil {
|
||||
blobPath := d.ref.blobPath(digest)
|
||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||
return err
|
||||
}
|
||||
blob, err := os.Create(blobPath(d.ref.dir, digest))
|
||||
blob, err := os.Create(blobPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -128,10 +129,11 @@ func (d *ociImageDestination) PutBlob(digest string, stream io.Reader) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) ensureParentDirectoryExists(parent string) error {
|
||||
path := filepath.Join(d.ref.dir, parent)
|
||||
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
// ensureParentDirectoryExists ensures the parent of the supplied path exists.
|
||||
func ensureParentDirectoryExists(path string) error {
|
||||
parent := filepath.Dir(path)
|
||||
if _, err := os.Stat(parent); err != nil && os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(parent, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -151,18 +153,3 @@ func (d *ociImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ociLayoutPathPath returns a path for the oci-layout within a directory using OCI conventions.
|
||||
func ociLayoutPath(dir string) string {
|
||||
return filepath.Join(dir, "oci-layout")
|
||||
}
|
||||
|
||||
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
|
||||
func blobPath(dir string, digest string) string {
|
||||
return filepath.Join(dir, "blobs", strings.Replace(digest, ":", "-", -1))
|
||||
}
|
||||
|
||||
// descriptorPath returns a path for the manifest within a directory using OCI conventions.
|
||||
func descriptorPath(dir string, digest string) string {
|
||||
return filepath.Join(dir, "refs", digest)
|
||||
}
|
||||
|
22
vendor/github.com/containers/image/oci/oci_transport.go
generated
vendored
22
vendor/github.com/containers/image/oci/oci_transport.go
generated
vendored
@ -3,6 +3,7 @@ package oci
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
@ -83,9 +84,6 @@ func ParseReference(reference string) (types.ImageReference, error) {
|
||||
} else {
|
||||
dir = reference[:sep]
|
||||
tag = reference[sep+1:]
|
||||
if !refRegexp.MatchString(tag) {
|
||||
return nil, fmt.Errorf("Invalid tag %s", tag)
|
||||
}
|
||||
}
|
||||
return NewReference(dir, tag)
|
||||
}
|
||||
@ -104,6 +102,9 @@ func NewReference(dir, tag string) (types.ImageReference, error) {
|
||||
if strings.Contains(resolved, ":") {
|
||||
return nil, fmt.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, tag, resolved)
|
||||
}
|
||||
if !refRegexp.MatchString(tag) {
|
||||
return nil, fmt.Errorf("Invalid tag %s", tag)
|
||||
}
|
||||
return ociReference{dir: dir, resolvedDir: resolved, tag: tag}, nil
|
||||
}
|
||||
|
||||
@ -173,3 +174,18 @@ func (ref ociReference) NewImageSource(certPath string, tlsVerify bool) (types.I
|
||||
func (ref ociReference) NewImageDestination(certPath string, tlsVerify bool) (types.ImageDestination, error) {
|
||||
return newImageDestination(ref), nil
|
||||
}
|
||||
|
||||
// ociLayoutPathPath returns a path for the oci-layout within a directory using OCI conventions.
|
||||
func (ref ociReference) ociLayoutPath() string {
|
||||
return filepath.Join(ref.dir, "oci-layout")
|
||||
}
|
||||
|
||||
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
|
||||
func (ref ociReference) blobPath(digest string) string {
|
||||
return filepath.Join(ref.dir, "blobs", strings.Replace(digest, ":", "-", -1))
|
||||
}
|
||||
|
||||
// descriptorPath returns a path for the manifest within a directory using OCI conventions.
|
||||
func (ref ociReference) descriptorPath(digest string) string {
|
||||
return filepath.Join(ref.dir, "refs", digest)
|
||||
}
|
||||
|
117
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
117
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@ -2,6 +2,7 @@ package openshift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -126,6 +127,22 @@ func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]
|
||||
return body, nil
|
||||
}
|
||||
|
||||
// getImage loads the specified image object.
|
||||
func (c *openshiftClient) getImage(imageStreamImageName string) (*image, error) {
|
||||
// FIXME: validate components per validation.IsValidPathSegmentName?
|
||||
path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName)
|
||||
body, err := c.doRequest("GET", path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Note: This does absolutely no kind/version checking or conversions.
|
||||
var isi imageStreamImage
|
||||
if err := json.Unmarshal(body, &isi); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &isi.Image, nil
|
||||
}
|
||||
|
||||
// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use;
|
||||
// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside.
|
||||
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
||||
@ -193,7 +210,21 @@ func (s *openshiftImageSource) GetBlob(digest string) (io.ReadCloser, int64, err
|
||||
}
|
||||
|
||||
func (s *openshiftImageSource) GetSignatures() ([][]byte, error) {
|
||||
return nil, nil
|
||||
if err := s.ensureImageIsResolved(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
image, err := s.client.getImage(s.imageStreamImageName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sigs [][]byte
|
||||
for _, sig := range image.Signatures {
|
||||
if sig.Type == imageSignatureTypeAtomic {
|
||||
sigs = append(sigs, sig.Content)
|
||||
}
|
||||
}
|
||||
return sigs, nil
|
||||
}
|
||||
|
||||
// ensureImageIsResolved sets up s.docker and s.imageStreamImageName
|
||||
@ -248,6 +279,8 @@ func (s *openshiftImageSource) ensureImageIsResolved() error {
|
||||
type openshiftImageDestination struct {
|
||||
client *openshiftClient
|
||||
docker types.ImageDestination // The Docker Registry endpoint
|
||||
// State
|
||||
imageStreamImageName string // "" if not yet known
|
||||
}
|
||||
|
||||
// newImageDestination creates a new ImageDestination for the specified reference and connection specification.
|
||||
@ -290,11 +323,16 @@ func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
||||
// FIXME? Can this eventually just call d.docker.PutManifest()?
|
||||
// Right now we need this as a skeleton to attach signatures to, and
|
||||
// to workaround our inability to change tags when uploading v2s1 manifests.
|
||||
|
||||
// Note: This does absolutely no kind/version checking or conversions.
|
||||
manifestDigest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.imageStreamImageName = manifestDigest
|
||||
// FIXME: We can't do what respositorymiddleware.go does because we don't know the internal address. Does any of this matter?
|
||||
dockerImageReference := fmt.Sprintf("%s/%s/%s@%s", d.client.dockerRegistryHostPart(), d.client.ref.namespace, d.client.ref.stream, manifestDigest)
|
||||
ism := imageStreamMapping{
|
||||
@ -327,7 +365,7 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.docker.PutManifest(m)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) PutBlob(digest string, stream io.Reader) error {
|
||||
@ -335,9 +373,64 @@ func (d *openshiftImageDestination) PutBlob(digest string, stream io.Reader) err
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) != 0 {
|
||||
return fmt.Errorf("Pushing signatures to an Atomic Registry is not supported")
|
||||
// FIXME: This assumption that signatures are stored after the manifest rather breaks the model.
|
||||
if d.imageStreamImageName == "" {
|
||||
return fmt.Errorf("Unknown manifest digest, can't add signatures")
|
||||
}
|
||||
// Because image signatures are a shared resource in Atomic Registry, the default upload
|
||||
// always adds signatures. Eventually we should also allow removing signatures.
|
||||
|
||||
if len(signatures) == 0 {
|
||||
return nil // No need to even read the old state.
|
||||
}
|
||||
|
||||
image, err := d.client.getImage(d.imageStreamImageName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingSigNames := map[string]struct{}{}
|
||||
for _, sig := range image.Signatures {
|
||||
existingSigNames[sig.objectMeta.Name] = struct{}{}
|
||||
}
|
||||
|
||||
sigExists:
|
||||
for _, newSig := range signatures {
|
||||
for _, existingSig := range image.Signatures {
|
||||
if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
|
||||
continue sigExists
|
||||
}
|
||||
}
|
||||
|
||||
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
|
||||
var signatureName string
|
||||
for {
|
||||
randBytes := make([]byte, 16)
|
||||
n, err := rand.Read(randBytes)
|
||||
if err != nil || n != 16 {
|
||||
return fmt.Errorf("Error generating random signature ID: %v, len %d", err, n)
|
||||
}
|
||||
signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes)
|
||||
if _, ok := existingSigNames[signatureName]; !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Note: This does absolutely no kind/version checking or conversions.
|
||||
sig := imageSignature{
|
||||
typeMeta: typeMeta{
|
||||
Kind: "ImageSignature",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
objectMeta: objectMeta{Name: signatureName},
|
||||
Type: imageSignatureTypeAtomic,
|
||||
Content: newSig,
|
||||
}
|
||||
body, err := json.Marshal(sig)
|
||||
_, err = d.client.doRequest("POST", "/oapi/v1/imagesignatures", body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -367,6 +460,22 @@ type image struct {
|
||||
DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"`
|
||||
DockerImageManifest string `json:"dockerImageManifest,omitempty"`
|
||||
// DockerImageLayers []ImageLayer `json:"dockerImageLayers"`
|
||||
Signatures []imageSignature `json:"signatures,omitempty"`
|
||||
}
|
||||
|
||||
const imageSignatureTypeAtomic string = "atomic"
|
||||
|
||||
type imageSignature struct {
|
||||
typeMeta `json:",inline"`
|
||||
objectMeta `json:"metadata,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Content []byte `json:"content"`
|
||||
// Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
|
||||
// ImageIdentity string `json:"imageIdentity,omitempty"`
|
||||
// SignedClaims map[string]string `json:"signedClaims,omitempty"`
|
||||
// Created *unversioned.Time `json:"created,omitempty"`
|
||||
// IssuedBy SignatureIssuer `json:"issuedBy,omitempty"`
|
||||
// IssuedTo SignatureSubject `json:"issuedTo,omitempty"`
|
||||
}
|
||||
type imageStreamMapping struct {
|
||||
typeMeta `json:",inline"`
|
||||
|
Loading…
Reference in New Issue
Block a user