mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 22:08:44 +00:00
Compare commits
85 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37727a45f9 | ||
|
|
75d94e790c | ||
|
|
1fe8da63a9 | ||
|
|
737ed9c2a4 | ||
|
|
39a4475cf3 | ||
|
|
3c286dd1d1 | ||
|
|
b8b0e9937b | ||
|
|
437d33ec9a | ||
|
|
d9035db615 | ||
|
|
198842bbec | ||
|
|
916a395d82 | ||
|
|
89acf46019 | ||
|
|
8960ab3ce7 | ||
|
|
145304b7cf | ||
|
|
e534472e7d | ||
|
|
d9d3ceca45 | ||
|
|
23a4605742 | ||
|
|
4811c07d71 | ||
|
|
15b38112b1 | ||
|
|
4ef35a385a | ||
|
|
38ae81fa03 | ||
|
|
e4297e3b30 | ||
|
|
9b09b6eb87 | ||
|
|
45ed92ce0c | ||
|
|
c233a6dcb1 | ||
|
|
e0f0869151 | ||
|
|
e6802c4df4 | ||
|
|
2b910649b9 | ||
|
|
808717862b | ||
|
|
f45ae950aa | ||
|
|
3bc062423e | ||
|
|
d0d7d97f9c | ||
|
|
89cd19519f | ||
|
|
3e973e1aa2 | ||
|
|
7f6b0e39d0 | ||
|
|
cc2445de81 | ||
|
|
f6bf57460d | ||
|
|
a9cc9b9133 | ||
|
|
91cd3510eb | ||
|
|
ac7edc7d10 | ||
|
|
92b1eec64c | ||
|
|
c819bc1754 | ||
|
|
6a2f38d66c | ||
|
|
2019b79c7f | ||
|
|
f79cc8aeda | ||
|
|
0bfe297fc1 | ||
|
|
ac4c291f76 | ||
|
|
d2837c9e56 | ||
|
|
5aaf3a9e4c | ||
|
|
0c4a9cc684 | ||
|
|
bd524670b1 | ||
|
|
693de29e37 | ||
|
|
f44ee2f80a | ||
|
|
a71900996f | ||
|
|
a26578178b | ||
|
|
7ba56f3f7a | ||
|
|
0f701726bd | ||
|
|
91ad8c39c6 | ||
|
|
ad3e8f407d | ||
|
|
0703ec6ce8 | ||
|
|
3e2defd6d3 | ||
|
|
5200272846 | ||
|
|
43eab90b36 | ||
|
|
0ad25b2d33 | ||
|
|
22d187181b | ||
|
|
8cbfcc820a | ||
|
|
8539d21152 | ||
|
|
370be7e777 | ||
|
|
95e17ed1e0 | ||
|
|
73edfb8216 | ||
|
|
49084d2cd8 | ||
|
|
8b904e908e | ||
|
|
23183072fb | ||
|
|
3be97ce281 | ||
|
|
b46506c077 | ||
|
|
49d9fa9faf | ||
|
|
77363128e1 | ||
|
|
59a452276b | ||
|
|
0f363498c2 | ||
|
|
a2dccca2e6 | ||
|
|
27b77f2bde | ||
|
|
6eda759dd2 | ||
|
|
de71408294 | ||
|
|
13cd098079 | ||
|
|
697ef59525 |
@@ -23,12 +23,12 @@ env:
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
####
|
||||
FEDORA_NAME: "fedora-35"
|
||||
PRIOR_FEDORA_NAME: "fedora-34"
|
||||
FEDORA_NAME: "fedora-36"
|
||||
PRIOR_FEDORA_NAME: "fedora-35"
|
||||
UBUNTU_NAME: "ubuntu-2110"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c6226133906620416"
|
||||
IMAGE_SUFFIX: "c4955393725038592"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
@@ -90,7 +90,7 @@ osx_task:
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
brew update
|
||||
brew install gpgme go go-md2man
|
||||
go get -u golang.org/x/lint/golint
|
||||
go install golang.org/x/lint/golint@latest
|
||||
test_script: |
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
go version
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
@@ -30,6 +30,7 @@ type copyOptions struct {
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signPassphraseFile string // Path pointing to a passphrase file when signing
|
||||
signIdentity string // Identity of the signed image, must be a fully specified docker reference
|
||||
digestFile string // Write digest to this file
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
@@ -81,6 +82,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.signPassphraseFile, "sign-passphrase-file", "", "File that contains a passphrase for the --sign-by key")
|
||||
flags.StringVar(&opts.signIdentity, "sign-identity", "", "Identity of signed image, must be a fully specified docker reference. Defaults to the target docker reference.")
|
||||
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
|
||||
@@ -110,7 +112,7 @@ func parseMultiArch(multiArch string) (copy.ImageListSelection, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
@@ -125,7 +127,11 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
defer func() {
|
||||
if err := policyContext.Destroy(); err != nil {
|
||||
retErr = fmt.Errorf("(error tearing down policy context: %v): %w", err, retErr)
|
||||
}
|
||||
}()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(imageNames[0])
|
||||
if err != nil {
|
||||
@@ -227,11 +233,20 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var signIdentity reference.Named = nil
|
||||
if opts.signIdentity != "" {
|
||||
signIdentity, err = reference.ParseNamed(opts.signIdentity)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not parse --sign-identity: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
SignPassphrase: passphrase,
|
||||
SignIdentity: signIdentity,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
@@ -250,7 +265,7 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = ioutil.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
|
||||
if err = os.WriteFile(opts.digestFile, []byte(manifestDigest.String()), 0644); err != nil {
|
||||
return fmt.Errorf("Failed to write digest to file %q: %w", opts.digestFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -122,7 +121,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
tmpDir, err := os.MkdirTemp(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -5,10 +5,12 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/archive"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
@@ -18,7 +20,7 @@ import (
|
||||
|
||||
// tagListOutput is the output format of (skopeo list-tags), primarily so that we can format it with a simple json.MarshalIndent.
|
||||
type tagListOutput struct {
|
||||
Repository string
|
||||
Repository string `json:",omitempty"`
|
||||
Tags []string
|
||||
}
|
||||
|
||||
@@ -28,6 +30,21 @@ type tagsOptions struct {
|
||||
retryOpts *retry.RetryOptions
|
||||
}
|
||||
|
||||
var transportHandlers = map[string]func(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error){
|
||||
docker.Transport.Name(): listDockerRepoTags,
|
||||
archive.Transport.Name(): listDockerArchiveTags,
|
||||
}
|
||||
|
||||
// supportedTransports returns all the supported transports
|
||||
func supportedTransports(joinStr string) string {
|
||||
res := make([]string, 0, len(transportHandlers))
|
||||
for handlerName := range transportHandlers {
|
||||
res = append(res, handlerName)
|
||||
}
|
||||
sort.Strings(res)
|
||||
return strings.Join(res, joinStr)
|
||||
}
|
||||
|
||||
func tagsCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := dockerImageFlags(global, sharedOpts, nil, "", "")
|
||||
@@ -38,13 +55,14 @@ func tagsCmd(global *globalOptions) *cobra.Command {
|
||||
image: imageOpts,
|
||||
retryOpts: retryOpts,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "list-tags [command options] REPOSITORY-NAME",
|
||||
Short: "List tags in the transport/repository specified by the REPOSITORY-NAME",
|
||||
Long: `Return the list of tags from the transport/repository "REPOSITORY-NAME"
|
||||
Use: "list-tags [command options] SOURCE-IMAGE",
|
||||
Short: "List tags in the transport/repository specified by the SOURCE-IMAGE",
|
||||
Long: `Return the list of tags from the transport/repository "SOURCE-IMAGE"
|
||||
|
||||
Supported transports:
|
||||
docker
|
||||
` + supportedTransports(" ") + `
|
||||
|
||||
See skopeo-list-tags(1) section "REPOSITORY NAMES" for the expected format
|
||||
`,
|
||||
@@ -95,6 +113,58 @@ func listDockerTags(ctx context.Context, sys *types.SystemContext, imgRef types.
|
||||
return repositoryName, tags, nil
|
||||
}
|
||||
|
||||
// return the tagLists from a docker repo
|
||||
func listDockerRepoTags(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error) {
|
||||
// Do transport-specific parsing and validation to get an image reference
|
||||
imgRef, err := parseDockerRepositoryReference(userInput)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// return the tagLists from a docker archive file
|
||||
func listDockerArchiveTags(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error) {
|
||||
ref, err := alltransports.ParseImageName(userInput)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tarReader, _, err := archive.NewReaderForReference(sys, ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer tarReader.Close()
|
||||
|
||||
imageRefs, err := tarReader.List()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var repoTags []string
|
||||
for imageIndex, items := range imageRefs {
|
||||
for _, ref := range items {
|
||||
repoTags, err = tarReader.ManifestTagsForReference(ref)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// handle for each untagged image
|
||||
if len(repoTags) == 0 {
|
||||
repoTags = []string{fmt.Sprintf("@%d", imageIndex)}
|
||||
}
|
||||
tagListing = append(tagListing, repoTags...)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
@@ -113,23 +183,17 @@ func (opts *tagsOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return fmt.Errorf("Invalid %q: does not specify a transport", args[0])
|
||||
}
|
||||
|
||||
if transport.Name() != docker.Transport.Name() {
|
||||
return fmt.Errorf("Unsupported transport '%v' for tag listing. Only '%v' currently supported", transport.Name(), docker.Transport.Name())
|
||||
}
|
||||
|
||||
// Do transport-specific parsing and validation to get an image reference
|
||||
imgRef, err := parseDockerRepositoryReference(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var repositoryName string
|
||||
var tagListing []string
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
|
||||
if val, ok := transportHandlers[transport.Name()]; ok {
|
||||
repositoryName, tagListing, err = val(ctx, sys, opts, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("Unsupported transport '%s' for tag listing. Only supported: %s",
|
||||
transport.Name(), supportedTransports(", "))
|
||||
}
|
||||
|
||||
outputData := tagListOutput{
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -31,7 +31,7 @@ func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
manifestPath := args[0]
|
||||
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
man, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
|
||||
@@ -98,7 +98,7 @@ const maxMsgSize = 32 * 1024
|
||||
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
|
||||
// We hard error if the input JSON numbers we expect to be
|
||||
// integers are above this.
|
||||
const maxJSONFloat = float64(1<<53 - 1)
|
||||
const maxJSONFloat = float64(uint64(1)<<53 - 1)
|
||||
|
||||
// request is the JSON serialization of a function call
|
||||
type request struct {
|
||||
@@ -664,7 +664,14 @@ func proxyCmd(global *globalOptions) *cobra.Command {
|
||||
// processRequest dispatches a remote request.
|
||||
// replyBuf is the result of the invocation.
|
||||
// terminate should be true if processing of requests should halt.
|
||||
func (h *proxyHandler) processRequest(req request) (rb replyBuf, terminate bool, err error) {
|
||||
func (h *proxyHandler) processRequest(readBytes []byte) (rb replyBuf, terminate bool, err error) {
|
||||
var req request
|
||||
|
||||
// Parse the request JSON
|
||||
if err = json.Unmarshal(readBytes, &req); err != nil {
|
||||
err = fmt.Errorf("invalid request: %v", err)
|
||||
return
|
||||
}
|
||||
// Dispatch on the method
|
||||
switch req.Method {
|
||||
case "Initialize":
|
||||
@@ -717,18 +724,15 @@ func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
return fmt.Errorf("reading socket: %v", err)
|
||||
}
|
||||
// Parse the request JSON
|
||||
readbuf := buf[0:n]
|
||||
var req request
|
||||
if err := json.Unmarshal(readbuf, &req); err != nil {
|
||||
rb := replyBuf{}
|
||||
rb.send(conn, fmt.Errorf("invalid request: %v", err))
|
||||
}
|
||||
|
||||
rb, terminate, err := handler.processRequest(req)
|
||||
rb, terminate, err := handler.processRequest(readbuf)
|
||||
if terminate {
|
||||
return nil
|
||||
}
|
||||
rb.send(conn, err)
|
||||
|
||||
if err := rb.send(conn, err); err != nil {
|
||||
return fmt.Errorf("writing to socket: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/pkg/cli"
|
||||
"github.com/containers/image/v5/signature"
|
||||
@@ -39,7 +39,7 @@ func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
dockerReference := args[1]
|
||||
fingerprint := args[2]
|
||||
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
manifest, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading %s: %v", manifestPath, err)
|
||||
}
|
||||
@@ -60,7 +60,7 @@ func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
if err := os.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", opts.output, err)
|
||||
}
|
||||
return nil
|
||||
@@ -89,11 +89,11 @@ func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error
|
||||
expectedFingerprint := args[2]
|
||||
signaturePath := args[3]
|
||||
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
unverifiedManifest, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest from %s: %v", manifestPath, err)
|
||||
}
|
||||
unverifiedSignature, err := ioutil.ReadFile(signaturePath)
|
||||
unverifiedSignature, err := os.ReadFile(signaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading signature from %s: %v", signaturePath, err)
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer)
|
||||
}
|
||||
untrustedSignaturePath := args[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
untrustedSignature, err := os.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading untrusted signature from %s: %v", untrustedSignaturePath, err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -25,9 +24,8 @@ const (
|
||||
// Test that results of runSkopeo failed with nothing on stdout, and substring
|
||||
// within the error message.
|
||||
func assertTestFailed(t *testing.T, stdout string, err error, substring string) {
|
||||
assert.Error(t, err)
|
||||
assert.ErrorContains(t, err, substring)
|
||||
assert.Empty(t, stdout)
|
||||
assert.Contains(t, err.Error(), substring)
|
||||
}
|
||||
|
||||
func TestStandaloneSign(t *testing.T) {
|
||||
@@ -78,7 +76,7 @@ func TestStandaloneSign(t *testing.T) {
|
||||
assertTestFailed(t, out, err, "/dev/full")
|
||||
|
||||
// Success
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
sigOutput, err := os.CreateTemp("", "sig")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
out, err = runSkopeo("standalone-sign", "-o", sigOutput.Name(),
|
||||
@@ -86,9 +84,9 @@ func TestStandaloneSign(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, out)
|
||||
|
||||
sig, err := ioutil.ReadFile(sigOutput.Name())
|
||||
sig, err := os.ReadFile(sigOutput.Name())
|
||||
require.NoError(t, err)
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
manifest, err := os.ReadFile(manifestPath)
|
||||
require.NoError(t, err)
|
||||
mech, err = signature.NewGPGSigningMechanism()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -42,6 +42,7 @@ type syncOptions struct {
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
dryRun bool // Don't actually copy anything, just output what it would have done
|
||||
preserveDigests bool // Preserve digests during sync
|
||||
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
|
||||
}
|
||||
@@ -110,6 +111,7 @@ See skopeo-sync(1) for details.
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
flags.BoolVarP(&opts.all, "all", "a", false, "Copy all images if SOURCE-IMAGE is a list")
|
||||
flags.BoolVar(&opts.dryRun, "dry-run", false, "Run without actually copying data")
|
||||
flags.BoolVar(&opts.preserveDigests, "preserve-digests", false, "Preserve digests of images and lists")
|
||||
flags.BoolVarP(&opts.keepGoing, "keep-going", "", false, "Do not abort the sync if any image copy fails")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
@@ -138,7 +140,7 @@ func (tls *tlsVerifyConfig) UnmarshalYAML(unmarshal func(interface{}) error) err
|
||||
// It returns a new unmarshaled sourceConfig object and any error encountered.
|
||||
func newSourceConfig(yamlFile string) (sourceConfig, error) {
|
||||
var cfg sourceConfig
|
||||
source, err := ioutil.ReadFile(yamlFile)
|
||||
source, err := os.ReadFile(yamlFile)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
@@ -257,11 +259,11 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
|
||||
// and any error encountered.
|
||||
func imagesToCopyFromDir(dirPath string) ([]types.ImageReference, error) {
|
||||
var sourceReferences []types.ImageReference
|
||||
err := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {
|
||||
err := filepath.WalkDir(dirPath, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
if !d.IsDir() && d.Name() == "manifest.json" {
|
||||
dirname := filepath.Dir(path)
|
||||
ref, err := directory.Transport.ParseReference(dirname)
|
||||
if err != nil {
|
||||
@@ -500,7 +502,7 @@ func imagesToCopy(source string, transport string, sourceCtx *types.SystemContex
|
||||
return descriptors, nil
|
||||
}
|
||||
|
||||
func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
@@ -510,7 +512,11 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error loading trust policy")
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
defer func() {
|
||||
if err := policyContext.Destroy(); err != nil {
|
||||
retErr = fmt.Errorf("(error tearing down policy context: %v): %w", err, retErr)
|
||||
}
|
||||
}()
|
||||
|
||||
// validate source and destination options
|
||||
contains := func(val string, list []string) (_ bool) {
|
||||
@@ -593,6 +599,10 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
errorsPresent := false
|
||||
imagesNumber := 0
|
||||
if opts.dryRun {
|
||||
logrus.Warn("Running in dry-run mode")
|
||||
}
|
||||
|
||||
for _, srcRepo := range srcRepoList {
|
||||
options.SourceCtx = srcRepo.Context
|
||||
for counter, ref := range srcRepo.ImageRefs {
|
||||
@@ -619,29 +629,37 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
fromToFields := logrus.Fields{
|
||||
"from": transports.ImageName(ref),
|
||||
"to": transports.ImageName(destRef),
|
||||
}).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if !opts.keepGoing {
|
||||
return errors.Wrapf(err, "Error copying ref %q", transports.ImageName(ref))
|
||||
}
|
||||
if opts.dryRun {
|
||||
logrus.WithFields(fromToFields).Infof("Would have copied image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
} else {
|
||||
logrus.WithFields(fromToFields).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
if !opts.keepGoing {
|
||||
return errors.Wrapf(err, "Error copying ref %q", transports.ImageName(ref))
|
||||
}
|
||||
// log the error, keep a note that there was a failure and move on to the next
|
||||
// image ref
|
||||
errorsPresent = true
|
||||
logrus.WithError(err).Errorf("Error copying ref %q", transports.ImageName(ref))
|
||||
continue
|
||||
}
|
||||
// log the error, keep a note that there was a failure and move on to the next
|
||||
// image ref
|
||||
errorsPresent = true
|
||||
logrus.WithError(err).Errorf("Error copying ref %q", transports.ImageName(ref))
|
||||
continue
|
||||
}
|
||||
imagesNumber++
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
if opts.dryRun {
|
||||
logrus.Infof("Would have synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
} else {
|
||||
logrus.Infof("Synced %d images from %d sources", imagesNumber, len(srcRepoList))
|
||||
}
|
||||
if !errorsPresent {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd
|
||||
return func(c *cobra.Command, args []string) error {
|
||||
err := handler(args, c.OutOrStdout())
|
||||
if _, ok := err.(errorShouldDisplayUsage); ok {
|
||||
c.Help()
|
||||
return c.Help()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ _skopeo_copy() {
|
||||
--multi-arch
|
||||
--sign-by
|
||||
--sign-passphrase-file
|
||||
--sign-identity
|
||||
--src-creds --screds
|
||||
--src-cert-dir
|
||||
--src-tls-verify
|
||||
@@ -106,6 +107,7 @@ _skopeo_sync() {
|
||||
--all
|
||||
--dest-no-creds
|
||||
--dest-tls-verify
|
||||
--dry-run
|
||||
--remove-signatures
|
||||
--scoped
|
||||
--src-no-creds
|
||||
|
||||
@@ -30,12 +30,6 @@ else
|
||||
) > /dev/stderr
|
||||
fi
|
||||
|
||||
OS_RELEASE_ID="$(source /etc/os-release; echo $ID)"
|
||||
# GCE image-name compatible string representation of distribution _major_ version
|
||||
OS_RELEASE_VER="$(source /etc/os-release; echo $VERSION_ID | tr -d '.')"
|
||||
# Combined to ease some usage
|
||||
OS_REL_VER="${OS_RELEASE_ID}-${OS_RELEASE_VER}"
|
||||
|
||||
# This is the magic interpreted by the tests to allow modifying local config/services.
|
||||
SKOPEO_CONTAINER_TESTS=1
|
||||
|
||||
@@ -64,6 +58,10 @@ _run_setup() {
|
||||
# Required for testing the SIF transport
|
||||
dnf install -y fakeroot squashfs-tools
|
||||
|
||||
msg "Removing systemd-resolved from nsswitch.conf"
|
||||
# /etc/resolv.conf is already set to bypass systemd-resolvd
|
||||
sed -i -r -e 's/^(hosts.+)resolve.+dns/\1dns/' /etc/nsswitch.conf
|
||||
|
||||
# A slew of compiled binaries are pre-built and distributed
|
||||
# within the CI/Dev container image, but we want to run
|
||||
# things directly on the host VM. Fortunately they're all
|
||||
|
||||
@@ -18,7 +18,7 @@ default to `/`.
|
||||
|
||||
The container images are:
|
||||
|
||||
* `quay.io/containers/skopeo:<version>` and `quay.io/skopeo/stable:<version>` -
|
||||
* `quay.io/containers/skopeo:v<version>` and `quay.io/skopeo/stable:v<version>` -
|
||||
These images are built when a new Skopeo version becomes available in
|
||||
Fedora. These images are intended to be unchanging and stable, they will
|
||||
never be updated by automation once they've been pushed. For build details,
|
||||
|
||||
@@ -70,7 +70,7 @@ MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifes
|
||||
|
||||
Print usage statement
|
||||
|
||||
**--multi-arch**
|
||||
**--multi-arch** _option_
|
||||
|
||||
Control what is copied if _source-image_ refers to a multi-architecture image. Default is system.
|
||||
|
||||
@@ -89,14 +89,18 @@ Suppress output information when copying images.
|
||||
|
||||
Do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by**=_key-id_
|
||||
**--sign-by** _key-id_
|
||||
|
||||
Add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--sign-passphrase-file**=_path_
|
||||
**--sign-passphrase-file** _path_
|
||||
|
||||
The passphare to use when signing with the key ID from `--sign-by`. Only the first line will be read. A passphrase stored in a file is of questionable security if other users can read this file. Do not use this option if at all avoidable.
|
||||
|
||||
**--sign-identity** _reference_
|
||||
|
||||
The identity to use when signing the image. The identity must be a fully specified docker reference. If the identity is not specified, the target docker reference will be used.
|
||||
|
||||
**--src-shared-blob-dir** _directory_
|
||||
|
||||
Directory to use to share blobs across OCI repositories.
|
||||
|
||||
@@ -6,17 +6,27 @@ skopeo\-delete - Mark the _image-name_ for later deletion by the registry's garb
|
||||
## SYNOPSIS
|
||||
**skopeo delete** [*options*] _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
## DESCRIPTION
|
||||
|
||||
Mark _image-name_ for deletion.
|
||||
The effect of this is registry-specific; many registries don’t support this operation, or don’t allow it in some circumstances / configurations.
|
||||
|
||||
**WARNING**: If _image-name_ contains a digest, this affects the referenced manifest, and may delete all tags (within the current repository?) pointing to that manifest.
|
||||
|
||||
**WARNING**: If _image-name_ contains a tag (but not a digest), in the current version of Skopeo this resolves the tag into a digest, and then deletes the manifest by digest, as described above (possibly deleting all tags pointing to that manifest, not just the provided tag). This behavior may change in the future.
|
||||
|
||||
|
||||
When using the github.com/distribution/distribution registry server:
|
||||
To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
```
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
## OPTIONS
|
||||
@@ -76,7 +86,7 @@ The password to access the registry.
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
$ skopeo delete docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
% skopeo-list-tags(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-list\-tags - List tags in the transport-specific image repository.
|
||||
skopeo\-list\-tags - List image names in a transport-specific collection of images.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo list-tags** [*options*] _repository-name_
|
||||
**skopeo list-tags** [*options*] _source-image_
|
||||
|
||||
Return a list of tags from _repository-name_ in a registry.
|
||||
Return a list of tags from _source-image_ in a registry or a local docker-archive file.
|
||||
|
||||
_repository-name_ name of repository to retrieve tag listing from
|
||||
_source-image_ name of the repository to retrieve a tag listing from or a local docker-archive file.
|
||||
|
||||
## OPTIONS
|
||||
|
||||
@@ -53,7 +53,7 @@ The password to access the registry.
|
||||
|
||||
## REPOSITORY NAMES
|
||||
|
||||
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags". Currently, only the Docker transport is supported.
|
||||
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags".
|
||||
|
||||
This commands refers to repositories using a _transport_`:`_details_ format. The following formats are supported:
|
||||
|
||||
@@ -72,6 +72,8 @@ This commands refers to repositories using a _transport_`:`_details_ format. The
|
||||
"docker.io/myuser/myimage:v1.0"
|
||||
"docker.io/myuser/myimage@sha256:f48c4cc192f4c3c6a069cb5cca6d0a9e34d6076ba7c214fd0cc3ca60e0af76bb"
|
||||
|
||||
**docker-archive:path[:docker-reference]
|
||||
more than one images were stored in a docker save-formatted file.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
@@ -121,8 +123,48 @@ $ skopeo list-tags docker://localhost:5000/fedora
|
||||
|
||||
```
|
||||
|
||||
### Docker-archive Transport
|
||||
|
||||
To list the tags in a local docker-archive file:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/busybox.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
"busybox:1.28.3"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Also supports more than one tags in an archive:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/docker-two-images.tar.gz
|
||||
{
|
||||
"Tags": [
|
||||
"example.com/empty:latest",
|
||||
"example.com/empty/but:different"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Will include a source-index entry for each untagged image:
|
||||
|
||||
```sh
|
||||
$ skopeo list-tags docker-archive:/tmp/four-tags-with-an-untag.tar
|
||||
{
|
||||
"Tags": [
|
||||
"image1:tag1",
|
||||
"image2:tag2",
|
||||
"@2",
|
||||
"image4:tag4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5)
|
||||
skopeo(1), skopeo-login(1), docker-login(1), containers-auth.json(5), containers-transports(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
|
||||
@@ -50,6 +50,10 @@ Path of the authentication file for the source registry. Uses path given by `--a
|
||||
|
||||
Path of the authentication file for the destination registry. Uses path given by `--authfile`, if not provided.
|
||||
|
||||
**--dry-run**
|
||||
|
||||
Run the sync without actually copying data to the destination.
|
||||
|
||||
**--src**, **-s** _transport_ Transport for the source repository.
|
||||
|
||||
**--dest**, **-d** _transport_ Destination transport.
|
||||
|
||||
@@ -102,7 +102,7 @@ Print the version number
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark the _image-name_ for later deletion by the registry's garbage collector. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about _image-name_ in a registry. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List tags in the transport-specific image repository. |
|
||||
| [skopeo-list-tags(1)](skopeo-list-tags.1.md) | List image names in a transport-specific collection of images.|
|
||||
| [skopeo-login(1)](skopeo-login.1.md) | Login to a container registry. |
|
||||
| [skopeo-logout(1)](skopeo-logout.1.md) | Logout of a container registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
|
||||
16
go.mod
16
go.mod
@@ -1,13 +1,13 @@
|
||||
module github.com/containers/skopeo
|
||||
|
||||
go 1.12
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.47.3
|
||||
github.com/containers/image/v5 v5.19.1
|
||||
github.com/containers/ocicrypt v1.1.2
|
||||
github.com/containers/storage v1.38.2
|
||||
github.com/docker/docker v20.10.12+incompatible
|
||||
github.com/containers/common v0.48.0
|
||||
github.com/containers/image/v5 v5.21.1
|
||||
github.com/containers/ocicrypt v1.1.4
|
||||
github.com/containers/storage v1.40.2
|
||||
github.com/docker/docker v20.10.14+incompatible
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84
|
||||
@@ -15,9 +15,9 @@ require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/cobra v1.4.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/stretchr/testify v1.7.1
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
|
||||
@@ -1,14 +1,21 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Before running podman for the first time, make sure
|
||||
# to set storage to vfs (not overlay): podman-in-podman
|
||||
# doesn't work with overlay. And, disable mountopt,
|
||||
# which causes error with vfs.
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
# These tests can run in/outside of a container. However,
|
||||
# not all storage drivers are supported in a container
|
||||
# environment. Detect this and setup storage when
|
||||
# running in a container.
|
||||
if ((SKOPEO_CONTAINER_TESTS)) && [[ -r /etc/containers/storage.conf ]]; then
|
||||
sed -i \
|
||||
-e 's/^driver\s*=.*/driver = "vfs"/' \
|
||||
-e 's/^mountopt/#mountopt/' \
|
||||
/etc/containers/storage.conf
|
||||
elif ((SKOPEO_CONTAINER_TESTS)); then
|
||||
cat >> /etc/containers/storage.conf << EOF
|
||||
[storage]
|
||||
driver = "vfs"
|
||||
EOF
|
||||
fi
|
||||
|
||||
# Build skopeo, install into /usr/bin
|
||||
make PREFIX=/usr install
|
||||
|
||||
42
install.md
42
install.md
@@ -1,7 +1,8 @@
|
||||
# Installing Skopeo
|
||||
|
||||
## Distribution Packages
|
||||
`skopeo` may already be packaged in your distribution.
|
||||
`skopeo` may already be packaged in your distribution. This document lists the
|
||||
installation steps for many distros, along with their information and support links.
|
||||
|
||||
### Fedora
|
||||
|
||||
@@ -9,30 +10,51 @@
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
### RHEL/CentOS ≥ 8 and CentOS Stream
|
||||
[Package Info](https://src.fedoraproject.org/rpms/skopeo) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Fedora&component=skopeo&product=Fedora)
|
||||
|
||||
Fedora bugs can be reported on the Skopeo GitHub [Issues](https://github.com/containers/skopeo/issues) page.
|
||||
|
||||
### RHEL / CentOS Stream ≥ 8
|
||||
|
||||
```sh
|
||||
sudo dnf -y install skopeo
|
||||
```
|
||||
|
||||
If you are a RHEL customer, please reach out through the official RHEL support
|
||||
channels for any issues.
|
||||
|
||||
CentOS Stream 9: [Package Info](https://gitlab.com/redhat/centos-stream/rpms/skopeo/-/tree/c9s) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Red%20Hat&component=skopeo&product=Red%20Hat%20Enterprise%20Linux%209&version=CentOS%20Stream)
|
||||
|
||||
CentOS Stream 8: [Package Info](https://git.centos.org/rpms/skopeo/tree/c8s-stream-rhel8) and
|
||||
[Bugzilla](https://bugzilla.redhat.com/buglist.cgi?bug_status=__open__&classification=Red%20Hat&component=skopeo&product=Red%20Hat%20Enterprise%20Linux%208&version=CentOS%20Stream)
|
||||
|
||||
|
||||
### RHEL/CentOS ≤ 7.x
|
||||
|
||||
```sh
|
||||
sudo yum -y install skopeo
|
||||
```
|
||||
|
||||
CentOS 7: [Package Repo](https://git.centos.org/rpms/skopeo/tree/c7-extras)
|
||||
|
||||
### openSUSE
|
||||
|
||||
```sh
|
||||
sudo zypper install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://software.opensuse.org/package/skopeo)
|
||||
|
||||
### Alpine
|
||||
|
||||
```sh
|
||||
sudo apk add skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://pkgs.alpinelinux.org/packages?name=skopeo)
|
||||
|
||||
### macOS
|
||||
|
||||
```sh
|
||||
@@ -44,6 +66,8 @@ brew install skopeo
|
||||
$ nix-env -i skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://search.nixos.org/packages?&show=skopeo&query=skopeo)
|
||||
|
||||
### Debian
|
||||
|
||||
The skopeo package is available on [Bullseye](https://packages.debian.org/bullseye/skopeo),
|
||||
@@ -55,6 +79,8 @@ sudo apt-get update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
[Package Info](https://packages.debian.org/stable/skopeo)
|
||||
|
||||
### Raspberry Pi OS arm64 (beta)
|
||||
|
||||
Raspberry Pi OS uses the standard Debian's repositories,
|
||||
@@ -73,17 +99,7 @@ sudo apt-get -y update
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
|
||||
The [Kubic project](https://build.opensuse.org/package/show/devel:kubic:libcontainers:stable/skopeo)
|
||||
provides packages for Ubuntu 20.04 (it should also work with direct derivatives like Pop!\_OS).
|
||||
|
||||
```bash
|
||||
. /etc/os-release
|
||||
echo "deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/ /" | sudo tee /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
|
||||
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add -
|
||||
sudo apt-get update
|
||||
sudo apt-get -y upgrade
|
||||
sudo apt-get -y install skopeo
|
||||
```
|
||||
[Package Info](https://packages.ubuntu.com/jammy/skopeo)
|
||||
|
||||
### Windows
|
||||
Skopeo has not yet been packaged for Windows. There is an [open feature
|
||||
|
||||
@@ -36,12 +36,12 @@ func (s *SkopeoSuite) SetUpSuite(c *check.C) {
|
||||
|
||||
func (s *SkopeoSuite) TearDownSuite(c *check.C) {
|
||||
if s.regV2 != nil {
|
||||
s.regV2.Close()
|
||||
s.regV2.tearDown(c)
|
||||
}
|
||||
if s.regV2WithAuth != nil {
|
||||
//cmd := exec.Command("docker", "logout", s.regV2WithAuth)
|
||||
//c.Assert(cmd.Run(), check.IsNil)
|
||||
s.regV2WithAuth.Close()
|
||||
s.regV2WithAuth.tearDown(c)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
@@ -64,9 +64,7 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, false, false)
|
||||
s.s1Registry = setupRegistryV2At(c, v2s1DockerRegistryURL, false, true)
|
||||
|
||||
gpgHome, err := ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
s.gpgHome = gpgHome
|
||||
s.gpgHome = c.MkDir()
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
@@ -75,21 +73,18 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key))
|
||||
err := ioutil.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
err := os.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
[]byte(out), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *CopySuite) TearDownSuite(c *check.C) {
|
||||
if s.gpgHome != "" {
|
||||
os.RemoveAll(s.gpgHome)
|
||||
}
|
||||
if s.registry != nil {
|
||||
s.registry.Close()
|
||||
s.registry.tearDown(c)
|
||||
}
|
||||
if s.s1Registry != nil {
|
||||
s.s1Registry.Close()
|
||||
s.s1Registry.tearDown(c)
|
||||
}
|
||||
if s.cluster != nil {
|
||||
s.cluster.tearDown(c)
|
||||
@@ -97,32 +92,20 @@ func (s *CopySuite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestList(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-manifest-list")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
dir := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestList(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-all-manifest-list")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
dir := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", knownListImage, "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) {
|
||||
oci1, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "dir:"+dir1, "oci:"+oci2)
|
||||
@@ -133,18 +116,10 @@ func (s *CopySuite) TestCopyAllWithManifestListRoundTrip(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListConverge(c *check.C) {
|
||||
oci1, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "--format", "oci", knownListImage, "dir:"+dir2)
|
||||
@@ -155,13 +130,11 @@ func (s *CopySuite) TestCopyAllWithManifestListConverge(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyNoneWithManifestList(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=index-only", knownListImage, "dir:"+dir1)
|
||||
|
||||
manifestPath := filepath.Join(dir1, "manifest.json")
|
||||
readManifest, err := ioutil.ReadFile(manifestPath)
|
||||
readManifest, err := os.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(readManifest)
|
||||
c.Assert(mimeType, check.Equals, "application/vnd.docker.distribution.manifest.list.v2+json")
|
||||
@@ -170,18 +143,10 @@ func (s *CopySuite) TestCopyNoneWithManifestList(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListConverge(c *check.C) {
|
||||
oci1, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-all-manifest-list-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-all-manifest-list-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "oci:"+oci1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--multi-arch=all", "oci:"+oci1, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "--format", "oci", knownListImage, "dir:"+dir2)
|
||||
@@ -192,24 +157,16 @@ func (s *CopySuite) TestCopyWithManifestListConverge(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyAllWithManifestListStorageFails(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-storage")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoFails(c, `.*destination transport .* does not support copying multiple images as a group.*`, "copy", "--multi-arch=all", knownListImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "copy", "containers-storage:"+storage+"test", "dir:"+dir2)
|
||||
@@ -218,16 +175,10 @@ func (s *CopySuite) TestCopyWithManifestListStorage(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageMultiple(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-multiple-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "amd64", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "containers-storage:"+storage+"test")
|
||||
assertSkopeoSucceeds(c, "", "--override-arch", "arm64", "copy", knownListImage, "dir:"+dir1)
|
||||
@@ -237,18 +188,10 @@ func (s *CopySuite) TestCopyWithManifestListStorageMultiple(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
oci1, err := ioutil.TempDir("", "copy-manifest-list-digest-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "copy-manifest-list-digest-oci")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -262,31 +205,21 @@ func (s *CopySuite) TestCopyWithManifestListDigest(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithDigestfileOutput(c *check.C) {
|
||||
tempdir, err := ioutil.TempDir("", "tempdir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tempdir)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
tempdir := c.MkDir()
|
||||
dir1 := c.MkDir()
|
||||
digestOutPath := filepath.Join(tempdir, "digest.txt")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--digestfile="+digestOutPath, knownListImage, "dir:"+dir1)
|
||||
readDigest, err := ioutil.ReadFile(digestOutPath)
|
||||
readDigest, err := os.ReadFile(digestOutPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = digest.Parse(string(readDigest))
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -299,16 +232,10 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigest(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
dir1, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-dir")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -321,9 +248,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArches(c *check
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseListDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-both")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -343,9 +268,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesBothUseLi
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUsesListDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-first")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -379,9 +302,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesFirstUses
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUsesListDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-second")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -415,9 +336,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesSecondUse
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUsesListDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-third")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -451,9 +370,7 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesThirdUses
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDigest(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-manifest-list-storage-digest-multiple-arches-tag-digest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
m := combinedOutputOfCommand(c, skopeoBinary, "inspect", "--raw", knownListImage)
|
||||
manifestDigest, err := manifest.Digest([]byte(m))
|
||||
@@ -496,28 +413,20 @@ func (s *CopySuite) TestCopyWithManifestListStorageDigestMultipleArchesTagAndDig
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenImageOSDoesNotMatchRuntimeOS(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-fails-image-does-not-match-runtime")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoFails(c, `.*no image found in manifest list for architecture .*, variant .*, OS .*`, "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySucceedsWhenImageDoesNotMatchRuntimeButWeOverride(c *check.C) {
|
||||
storage, err := ioutil.TempDir("", "copy-succeeds-image-does-not-match-runtime-but-override")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(storage)
|
||||
storage := c.MkDir()
|
||||
storage = fmt.Sprintf("[vfs@%s/root+%s/runroot]", storage, storage)
|
||||
assertSkopeoSucceeds(c, "", "--override-os=windows", "--override-arch=amd64", "copy", knownWindowsOnlyImage, "containers-storage:"+storage+"test")
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
@@ -533,12 +442,8 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// "pull": docker: → dir:
|
||||
@@ -557,7 +462,7 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
ociImgName := "pause"
|
||||
defer os.RemoveAll(ociDest)
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://k8s.gcr.io/pause:latest", "oci:"+ociDest+":"+ociImgName)
|
||||
_, err = os.Stat(ociDest)
|
||||
_, err := os.Stat(ociDest)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// docker v2s2 -> OCI image layout without image name
|
||||
@@ -569,31 +474,14 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
|
||||
originalImageDir, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(originalImageDir)
|
||||
encryptedImgDir, err := ioutil.TempDir("", "copy-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(encryptedImgDir)
|
||||
decryptedImgDir, err := ioutil.TempDir("", "copy-3")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(decryptedImgDir)
|
||||
keysDir, err := ioutil.TempDir("", "copy-4")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(keysDir)
|
||||
undecryptedImgDir, err := ioutil.TempDir("", "copy-5")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(undecryptedImgDir)
|
||||
multiLayerImageDir, err := ioutil.TempDir("", "copy-6")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(multiLayerImageDir)
|
||||
partiallyEncryptedImgDir, err := ioutil.TempDir("", "copy-7")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(partiallyEncryptedImgDir)
|
||||
partiallyDecryptedImgDir, err := ioutil.TempDir("", "copy-8")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(partiallyDecryptedImgDir)
|
||||
originalImageDir := c.MkDir()
|
||||
encryptedImgDir := c.MkDir()
|
||||
decryptedImgDir := c.MkDir()
|
||||
keysDir := c.MkDir()
|
||||
undecryptedImgDir := c.MkDir()
|
||||
multiLayerImageDir := c.MkDir()
|
||||
partiallyEncryptedImgDir := c.MkDir()
|
||||
partiallyDecryptedImgDir := c.MkDir()
|
||||
|
||||
// Create RSA key pair
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
@@ -602,9 +490,9 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey)
|
||||
publicKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = ioutil.WriteFile(keysDir+"/private.key", privateKeyBytes, 0644)
|
||||
err = os.WriteFile(keysDir+"/private.key", privateKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = ioutil.WriteFile(keysDir+"/public.key", publicKeyBytes, 0644)
|
||||
err = os.WriteFile(keysDir+"/public.key", publicKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// We can either perform encryption or decryption on the image.
|
||||
@@ -628,7 +516,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
invalidPrivateKey, err := rsa.GenerateKey(rand.Reader, 4096)
|
||||
c.Assert(err, check.IsNil)
|
||||
invalidPrivateKeyBytes := x509.MarshalPKCS1PrivateKey(invalidPrivateKey)
|
||||
err = ioutil.WriteFile(keysDir+"/invalid_private.key", invalidPrivateKeyBytes, 0644)
|
||||
err = os.WriteFile(keysDir+"/invalid_private.key", invalidPrivateKeyBytes, 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoFails(c, ".*no suitable key unwrapper found or none of the private keys could be used for decryption.*",
|
||||
"copy", "--decryption-key", keysDir+"/invalid_private.key",
|
||||
@@ -668,7 +556,7 @@ func (s *CopySuite) TestCopyEncryption(c *check.C) {
|
||||
}
|
||||
|
||||
func matchLayerBlobBinaryType(c *check.C, ociImageDirPath string, contentType string, matchCount int) {
|
||||
files, err := ioutil.ReadDir(ociImageDirPath)
|
||||
files, err := os.ReadDir(ociImageDirPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
foundCount := 0
|
||||
@@ -704,7 +592,7 @@ func assertDirImagesAreEqual(c *check.C, dir1, dir2 string) {
|
||||
digests := []digest.Digest{}
|
||||
for _, dir := range []string{dir1, dir2} {
|
||||
manifestPath := filepath.Join(dir, "manifest.json")
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
m, err := os.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
digest, err := manifest.Digest(m)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -722,7 +610,7 @@ func assertSchema1DirImagesAreEqualExceptNames(c *check.C, dir1, ref1, dir2, ref
|
||||
manifests := []map[string]interface{}{}
|
||||
for dir, ref := range map[string]string{dir1: ref1, dir2: ref2} {
|
||||
manifestPath := filepath.Join(dir, "manifest.json")
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
m, err := os.ReadFile(manifestPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
data := map[string]interface{}{}
|
||||
err = json.Unmarshal(m, &data)
|
||||
@@ -745,12 +633,8 @@ func assertSchema1DirImagesAreEqualExceptNames(c *check.C, dir1, ref1, dir2, ref
|
||||
|
||||
// Streaming (skopeo copy)
|
||||
func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "streaming-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "streaming-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
dir1 := c.MkDir()
|
||||
dir2 := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
// streaming: docker: → atomic:
|
||||
@@ -770,12 +654,8 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
oci1, err := ioutil.TempDir("", "oci-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "oci-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
oci1 := c.MkDir()
|
||||
oci2 := c.MkDir()
|
||||
|
||||
// Docker -> OCI
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", testFQIN, "oci:"+oci1+":latest")
|
||||
@@ -798,7 +678,7 @@ func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
// Verify using the upstream OCI image validator, this should catch most
|
||||
// non-compliance errors. DO NOT REMOVE THIS TEST UNLESS IT'S ABSOLUTELY
|
||||
// NECESSARY.
|
||||
err = image.ValidateLayout(oci1, nil, logger)
|
||||
err := image.ValidateLayout(oci1, nil, logger)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = image.ValidateLayout(oci2, nil, logger)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -820,9 +700,7 @@ func (s *CopySuite) TestCopySignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
dir, err := ioutil.TempDir("", "signatures-dest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
dir := c.MkDir()
|
||||
dirDest := "dir:" + dir
|
||||
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
@@ -876,9 +754,7 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "dir-signatures-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
topDirDest := "dir:" + topDir
|
||||
|
||||
for _, suffix := range []string{"/dir1", "/dir2", "/restricted/personal", "/restricted/official", "/restricted/badidentity", "/dest"} {
|
||||
@@ -921,9 +797,7 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
func (s *CopySuite) TestCopyCompression(c *check.C) {
|
||||
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
|
||||
|
||||
topDir, err := ioutil.TempDir("", "compression-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
|
||||
for i, t := range []struct{ fixture, remote string }{
|
||||
{"uncompressed-image-s1", "docker://" + v2DockerRegistryURL + "/compression/compression:s1"},
|
||||
@@ -958,15 +832,15 @@ func (s *CopySuite) TestCopyCompression(c *check.C) {
|
||||
|
||||
func findRegularFiles(c *check.C, root string) []string {
|
||||
result := []string{}
|
||||
err := filepath.Walk(root, filepath.WalkFunc(func(path string, info os.FileInfo, err error) error {
|
||||
err := filepath.WalkDir(root, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.Mode().IsRegular() {
|
||||
if d.Type().IsRegular() {
|
||||
result = append(result, path)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
return result
|
||||
}
|
||||
@@ -982,9 +856,7 @@ func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "signatures-sigstore")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
copyDest := filepath.Join(tmpDir, "dest")
|
||||
err = os.Mkdir(copyDest, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -1050,9 +922,7 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "atomic-extension")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
for _, subdir := range []string{"dirAA", "dirAD", "dirDA", "dirDD", "registries.d"} {
|
||||
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -1099,22 +969,6 @@ func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
assertDirImagesAreEqual(c, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD"))
|
||||
}
|
||||
|
||||
// copyWithSignedIdentity creates a copy of an unsigned image, adding a signature for an unrelated identity
|
||||
// This should be easier than using standalone-sign.
|
||||
func copyWithSignedIdentity(c *check.C, src, dest, signedIdentity, signBy, registriesDir string) {
|
||||
topDir, err := ioutil.TempDir("", "copyWithSignedIdentity")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
|
||||
signingDir := filepath.Join(topDir, "signing-temp")
|
||||
assertSkopeoSucceeds(c, "", "copy", "--src-tls-verify=false", src, "dir:"+signingDir)
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", filepath.Join(signingDir, "signature-1"),
|
||||
filepath.Join(signingDir, "manifest.json"), signedIdentity, signBy)
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "ls", "-laR", signingDir))
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--dest-tls-verify=false", "dir:"+signingDir, dest)
|
||||
}
|
||||
|
||||
// Both mirroring support in registries.conf, and mirrored remapIdentity support in policy.json
|
||||
func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
const regPrefix = "docker://localhost:5006/myns/mirroring-"
|
||||
@@ -1126,9 +980,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "mirrored-signatures")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
registriesDir := filepath.Join(topDir, "registries.d") // An empty directory to disable sigstore use
|
||||
dirDest := "dir:" + filepath.Join(topDir, "unused-dest")
|
||||
|
||||
@@ -1160,10 +1012,12 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted.*",
|
||||
"--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:mirror-signed", dirDest)
|
||||
|
||||
// Fail if we specify an unqualified identity
|
||||
assertSkopeoFails(c, ".*Could not parse --sign-identity: repository name must be canonical.*",
|
||||
"--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=this-is-not-fully-specified", regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed")
|
||||
|
||||
// Create a signature for mirroring-primary:primary-signed without pushing there.
|
||||
copyWithSignedIdentity(c, regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed",
|
||||
"localhost:5006/myns/mirroring-primary:primary-signed", "personal@example.com",
|
||||
registriesDir)
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:primary-signed", regPrefix+"primary:unsigned", regPrefix+"mirror:primary-signed")
|
||||
// Verify that a correctly signed image for the primary is accessible using the primary's reference
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"primary:primary-signed", dirDest)
|
||||
// … but verify that while it is accessible using the mirror location
|
||||
@@ -1178,9 +1032,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
// … it is NOT accessible when requiring a signature …
|
||||
assertSkopeoFails(c, ".*Source image rejected: None of the signatures were accepted, reasons: Signature for identity localhost:5006/myns/mirroring-primary:direct is not accepted; Signature for identity localhost:5006/myns/mirroring-mirror:mirror-signed is not accepted; Signature for identity localhost:5006/myns/mirroring-primary:primary-signed is not accepted.*", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// … until signed.
|
||||
copyWithSignedIdentity(c, regPrefix+"remap:remapped", regPrefix+"remap:remapped",
|
||||
"localhost:5006/myns/mirroring-primary:remapped", "personal@example.com",
|
||||
registriesDir)
|
||||
assertSkopeoSucceeds(c, "", "--registries.d", registriesDir, "copy", "--src-tls-verify=false", "--dest-tls-verify=false", "--sign-by=personal@example.com", "--sign-identity=localhost:5006/myns/mirroring-primary:remapped", regPrefix+"remap:remapped", regPrefix+"remap:remapped")
|
||||
assertSkopeoSucceeds(c, "", "--policy", policy, "--registries.d", registriesDir, "--registries-conf", "fixtures/registries.conf", "copy", "--src-tls-verify=false", regPrefix+"remap:remapped", dirDest)
|
||||
// To be extra clear about the semantics, verify that the signedPrefix (primary) location never exists
|
||||
// and only the remapped prefix (mirror) is accessed.
|
||||
@@ -1189,9 +1041,7 @@ func (s *CopySuite) TestCopyVerifyingMirroredSignatures(c *check.C) {
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", testFQIN, fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "dir:"+dir1)
|
||||
}
|
||||
|
||||
@@ -1205,9 +1055,7 @@ func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWithoutTLSVerifyFalse(c *check.C) {
|
||||
topDir, err := ioutil.TempDir("", "no-panic-on-https-response-without-tls-verify-false")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
@@ -1223,9 +1071,7 @@ func (s *CopySuite) TestCopySchemaConversion(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
|
||||
topDir, err := ioutil.TempDir("", "manifest-conversion")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
srcDir := filepath.Join(topDir, "source")
|
||||
destDir1 := filepath.Join(topDir, "dest1")
|
||||
destDir2 := filepath.Join(topDir, "dest2")
|
||||
@@ -1249,18 +1095,14 @@ func (s *CopySuite) TestCopyManifestConversion(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyPreserveDigests(c *check.C) {
|
||||
topDir, err := ioutil.TempDir("", "preserve-digests")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
|
||||
assertSkopeoSucceeds(c, "", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "dir:"+topDir)
|
||||
assertSkopeoFails(c, ".*Instructed to preserve digests.*", "copy", knownListImage, "--multi-arch=all", "--preserve-digests", "--format=oci", "dir:"+topDir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Registry, schema2Registry string) {
|
||||
topDir, err := ioutil.TempDir("", "schema-conversion")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
topDir := c.MkDir()
|
||||
for _, subdir := range []string{"input1", "input2", "dest2"} {
|
||||
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -1294,35 +1136,35 @@ func (s *CopySuite) testCopySchemaConversionRegistries(c *check.C, schema1Regist
|
||||
const regConfFixture = "./fixtures/registries.conf"
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirror(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir := c.MkDir()
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://mirror.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorsUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir := c.MkDir()
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://invalid.invalid/busybox", "dir:"+dir)
|
||||
// .invalid domains are, per RFC 6761, supposed to result in NXDOMAIN.
|
||||
// With systemd-resolved (used only via NSS?), we instead seem to get “Temporary failure in name resolution”
|
||||
assertSkopeoFails(c, ".*(no such host|Temporary failure in name resolution).*",
|
||||
"--registries-conf="+regConfFixture, "copy", "docker://invalid.invalid/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestSuccessCopySrcWithMirrorAndPrefix(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir := c.MkDir()
|
||||
|
||||
assertSkopeoSucceeds(c, "", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/foo/bar/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestFailureCopySrcWithMirrorAndPrefixUnavailable(c *check.C) {
|
||||
dir, err := ioutil.TempDir("", "copy-mirror")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir := c.MkDir()
|
||||
|
||||
assertSkopeoFails(c, ".*no such host.*", "--registries-conf="+regConfFixture, "copy",
|
||||
"docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir)
|
||||
// .invalid domains are, per RFC 6761, supposed to result in NXDOMAIN.
|
||||
// With systemd-resolved (used only via NSS?), we instead seem to get “Temporary failure in name resolution”
|
||||
assertSkopeoFails(c, ".*(no such host|Temporary failure in name resolution).*",
|
||||
"--registries-conf="+regConfFixture, "copy", "docker://gcr.invalid/wrong/prefix/busybox", "dir:"+dir)
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyFailsWhenReferenceIsInvalid(c *check.C) {
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -33,10 +32,7 @@ type openshiftCluster struct {
|
||||
// in isolated test environment.
|
||||
func startOpenshiftCluster(c *check.C) *openshiftCluster {
|
||||
cluster := &openshiftCluster{}
|
||||
|
||||
dir, err := ioutil.TempDir("", "openshift-cluster")
|
||||
c.Assert(err, check.IsNil)
|
||||
cluster.workingDir = dir
|
||||
cluster.workingDir = c.MkDir()
|
||||
|
||||
cluster.startMaster(c)
|
||||
cluster.prepareRegistryConfig(c)
|
||||
@@ -196,7 +192,7 @@ func (cluster *openshiftCluster) startRegistry(c *check.C) {
|
||||
// The default configuration currently already contains acceptschema2: false
|
||||
})
|
||||
// Make sure the configuration contains "acceptschema2: false", because eventually it will be enabled upstream and this function will need to be updated.
|
||||
configContents, err := ioutil.ReadFile(schema1Config)
|
||||
configContents, err := os.ReadFile(schema1Config)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(string(configContents), check.Matches, "(?s).*acceptschema2: false.*")
|
||||
cluster.processes = append(cluster.processes, cluster.startRegistryProcess(c, 5005, schema1Config))
|
||||
@@ -240,7 +236,7 @@ func (cluster *openshiftCluster) dockerLogin(c *check.C) {
|
||||
}`, port, authValue))
|
||||
}
|
||||
configJSON := `{"auths": {` + strings.Join(auths, ",") + `}}`
|
||||
err = ioutil.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
err = os.WriteFile(filepath.Join(cluster.dockerDir, "config.json"), []byte(configJSON), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
@@ -258,12 +254,12 @@ func (cluster *openshiftCluster) relaxImageSignerPermissions(c *check.C) {
|
||||
// tearDown stops the cluster services and deletes (only some!) of the state.
|
||||
func (cluster *openshiftCluster) tearDown(c *check.C) {
|
||||
for i := len(cluster.processes) - 1; i >= 0; i-- {
|
||||
cluster.processes[i].Process.Kill()
|
||||
}
|
||||
if cluster.workingDir != "" {
|
||||
os.RemoveAll(cluster.workingDir)
|
||||
// It’s undocumented what Kill() returns if the process has terminated,
|
||||
// so we couldn’t check just for that. This is running in a container anyway…
|
||||
_ = cluster.processes[i].Process.Kill()
|
||||
}
|
||||
if cluster.dockerDir != "" {
|
||||
os.RemoveAll(cluster.dockerDir)
|
||||
err := os.RemoveAll(cluster.dockerDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -57,7 +57,7 @@ type pipefd struct {
|
||||
fd *os.File
|
||||
}
|
||||
|
||||
func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd *pipefd, err error) {
|
||||
func (p *proxy) call(method string, args []interface{}) (rval interface{}, fd *pipefd, err error) {
|
||||
req := request{
|
||||
Method: method,
|
||||
Args: args,
|
||||
@@ -66,7 +66,7 @@ func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err := self.c.Write(reqbuf)
|
||||
n, err := p.c.Write(reqbuf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd
|
||||
}
|
||||
oob := make([]byte, syscall.CmsgSpace(1))
|
||||
replybuf := make([]byte, maxMsgSize)
|
||||
n, oobn, _, _, err := self.c.ReadMsgUnix(replybuf, oob)
|
||||
n, oobn, _, _, err := p.c.ReadMsgUnix(replybuf, oob)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("reading reply: %v", err)
|
||||
return
|
||||
@@ -119,9 +119,9 @@ func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd
|
||||
return
|
||||
}
|
||||
|
||||
func (self *proxy) callNoFd(method string, args []interface{}) (rval interface{}, err error) {
|
||||
func (p *proxy) callNoFd(method string, args []interface{}) (rval interface{}, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = self.call(method, args)
|
||||
rval, fd, err = p.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -132,9 +132,9 @@ func (self *proxy) callNoFd(method string, args []interface{}) (rval interface{}
|
||||
return rval, nil
|
||||
}
|
||||
|
||||
func (self *proxy) callReadAllBytes(method string, args []interface{}) (rval interface{}, buf []byte, err error) {
|
||||
func (p *proxy) callReadAllBytes(method string, args []interface{}) (rval interface{}, buf []byte, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = self.call(method, args)
|
||||
rval, fd, err = p.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -144,13 +144,13 @@ func (self *proxy) callReadAllBytes(method string, args []interface{}) (rval int
|
||||
}
|
||||
fetchchan := make(chan byteFetch)
|
||||
go func() {
|
||||
manifestBytes, err := ioutil.ReadAll(fd.fd)
|
||||
manifestBytes, err := io.ReadAll(fd.fd)
|
||||
fetchchan <- byteFetch{
|
||||
content: manifestBytes,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
_, err = self.callNoFd("FinishPipe", []interface{}{fd.id})
|
||||
_, err = p.callNoFd("FinishPipe", []interface{}{fd.id})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -241,7 +241,7 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
}
|
||||
imgid := uint32(imgidv)
|
||||
|
||||
v, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
|
||||
_, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -250,7 +250,7 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
v, configBytes, err := p.callReadAllBytes("GetFullConfig", []interface{}{imgid})
|
||||
_, configBytes, err := p.callReadAllBytes("GetFullConfig", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -269,7 +269,7 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
}
|
||||
|
||||
// Also test this legacy interface
|
||||
v, ctrconfigBytes, err := p.callReadAllBytes("GetConfig", []interface{}{imgid})
|
||||
_, ctrconfigBytes, err := p.callReadAllBytes("GetConfig", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -285,6 +285,9 @@ func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
}
|
||||
|
||||
_, err = p.callNoFd("CloseImage", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
@@ -20,7 +19,6 @@ const (
|
||||
type testRegistryV2 struct {
|
||||
cmd *exec.Cmd
|
||||
url string
|
||||
dir string
|
||||
username string
|
||||
password string
|
||||
email string
|
||||
@@ -45,10 +43,7 @@ func setupRegistryV2At(c *check.C, url string, auth, schema1 bool) *testRegistry
|
||||
}
|
||||
|
||||
func newTestRegistryV2At(c *check.C, url string, auth, schema1 bool) (*testRegistryV2, error) {
|
||||
tmp, err := ioutil.TempDir("", "registry-test-")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tmp := c.MkDir()
|
||||
template := `version: 0.1
|
||||
loglevel: debug
|
||||
storage:
|
||||
@@ -71,7 +66,7 @@ http:
|
||||
username = "testuser"
|
||||
password = "testpassword"
|
||||
email = "test@test.org"
|
||||
if err := ioutil.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)); err != nil {
|
||||
if err := os.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
htpasswd = fmt.Sprintf(`auth:
|
||||
@@ -86,7 +81,6 @@ http:
|
||||
return nil, err
|
||||
}
|
||||
if _, err := fmt.Fprintf(config, template, tmp, url, htpasswd); err != nil {
|
||||
os.RemoveAll(tmp)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -98,7 +92,6 @@ http:
|
||||
cmd := exec.Command(binary, confPath)
|
||||
consumeAndLogOutputs(c, fmt.Sprintf("registry-%s", url), cmd)
|
||||
if err := cmd.Start(); err != nil {
|
||||
os.RemoveAll(tmp)
|
||||
if os.IsNotExist(err) {
|
||||
c.Skip(err.Error())
|
||||
}
|
||||
@@ -107,7 +100,6 @@ http:
|
||||
return &testRegistryV2{
|
||||
cmd: cmd,
|
||||
url: url,
|
||||
dir: tmp,
|
||||
username: username,
|
||||
password: password,
|
||||
email: email,
|
||||
@@ -126,7 +118,8 @@ func (t *testRegistryV2) Ping() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *testRegistryV2) Close() {
|
||||
t.cmd.Process.Kill()
|
||||
os.RemoveAll(t.dir)
|
||||
func (t *testRegistryV2) tearDown(c *check.C) {
|
||||
// It’s undocumented what Kill() returns if the process has terminated,
|
||||
// so we couldn’t check just for that. This is running in a container anyway…
|
||||
_ = t.cmd.Process.Kill()
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@@ -21,7 +20,6 @@ func init() {
|
||||
}
|
||||
|
||||
type SigningSuite struct {
|
||||
gpgHome string
|
||||
fingerprint string
|
||||
}
|
||||
|
||||
@@ -40,25 +38,18 @@ func (s *SigningSuite) SetUpSuite(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
s.gpgHome, err = ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
gpgHome := c.MkDir()
|
||||
os.Setenv("GNUPGHOME", gpgHome)
|
||||
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", s.gpgHome, "--batch", "--gen-key")
|
||||
runCommandWithInput(c, "Key-Type: RSA\nName-Real: Testing user\n%no-protection\n%commit\n", gpgBinary, "--homedir", gpgHome, "--batch", "--gen-key")
|
||||
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", s.gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
lines, err := exec.Command(gpgBinary, "--homedir", gpgHome, "--with-colons", "--no-permission-warning", "--fingerprint").Output()
|
||||
c.Assert(err, check.IsNil)
|
||||
s.fingerprint, err = findFingerprint(lines)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
func (s *SigningSuite) TearDownSuite(c *check.C) {
|
||||
if s.gpgHome != "" {
|
||||
err := os.RemoveAll(s.gpgHome)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
s.gpgHome = ""
|
||||
|
||||
os.Unsetenv("GNUPGHOME")
|
||||
}
|
||||
|
||||
@@ -73,7 +64,7 @@ func (s *SigningSuite) TestSignVerifySmoke(c *check.C) {
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/smoketest"
|
||||
|
||||
sigOutput, err := ioutil.TempFile("", "sig")
|
||||
sigOutput, err := os.CreateTemp("", "sig")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.Remove(sigOutput.Name())
|
||||
assertSkopeoSucceeds(c, "^$", "standalone-sign", "-o", sigOutput.Name(),
|
||||
|
||||
@@ -3,7 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -40,7 +40,6 @@ func init() {
|
||||
type SyncSuite struct {
|
||||
cluster *openshiftCluster
|
||||
registry *testRegistryV2
|
||||
gpgHome string
|
||||
}
|
||||
|
||||
func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
@@ -74,10 +73,8 @@ func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
// FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
|
||||
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, registryAuth, registrySchema1)
|
||||
|
||||
gpgHome, err := ioutil.TempDir("", "skopeo-gpg")
|
||||
c.Assert(err, check.IsNil)
|
||||
s.gpgHome = gpgHome
|
||||
os.Setenv("GNUPGHOME", s.gpgHome)
|
||||
gpgHome := c.MkDir()
|
||||
os.Setenv("GNUPGHOME", gpgHome)
|
||||
|
||||
for _, key := range []string{"personal", "official"} {
|
||||
batchInput := fmt.Sprintf("Key-Type: RSA\nName-Real: Test key - %s\nName-email: %s@example.com\n%%no-protection\n%%commit\n",
|
||||
@@ -85,7 +82,7 @@ func (s *SyncSuite) SetUpSuite(c *check.C) {
|
||||
runCommandWithInput(c, batchInput, gpgBinary, "--batch", "--gen-key")
|
||||
|
||||
out := combinedOutputOfCommand(c, gpgBinary, "--armor", "--export", fmt.Sprintf("%s@example.com", key))
|
||||
err := ioutil.WriteFile(filepath.Join(s.gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
err := os.WriteFile(filepath.Join(gpgHome, fmt.Sprintf("%s-pubkey.gpg", key)),
|
||||
[]byte(out), 0600)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
@@ -96,21 +93,32 @@ func (s *SyncSuite) TearDownSuite(c *check.C) {
|
||||
return
|
||||
}
|
||||
|
||||
if s.gpgHome != "" {
|
||||
os.RemoveAll(s.gpgHome)
|
||||
}
|
||||
if s.registry != nil {
|
||||
s.registry.Close()
|
||||
s.registry.tearDown(c)
|
||||
}
|
||||
if s.cluster != nil {
|
||||
s.cluster.tearDown(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
func assertNumberOfManifestsInSubdirs(c *check.C, dir string, expectedCount int) {
|
||||
nManifests := 0
|
||||
err := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.IsDir() && d.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
c.Assert(nManifests, check.Equals, expectedCount)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
@@ -136,9 +144,7 @@ func (s *SyncSuite) TestDocker2DirTagged(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedManifestList
|
||||
@@ -164,16 +170,14 @@ func (s *SyncSuite) TestDocker2DirTaggedAll(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestPreserveDigests(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedManifestList
|
||||
|
||||
// copy docker => dir
|
||||
assertSkopeoSucceeds(c, "", "copy", "--all", "--preserve-digests", "docker://"+image, "dir:"+tmpDir)
|
||||
_, err = os.Stat(path.Join(tmpDir, "manifest.json"))
|
||||
_, err := os.Stat(path.Join(tmpDir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
assertSkopeoFails(c, ".*Instructed to preserve digests.*", "copy", "--all", "--preserve-digests", "--format=oci", "docker://"+image, "dir:"+tmpDir)
|
||||
@@ -186,8 +190,7 @@ func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
imagePath := imageRef.DockerReference().String()
|
||||
|
||||
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -195,8 +198,6 @@ func (s *SyncSuite) TestScoped(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
os.RemoveAll(dir1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
@@ -210,8 +211,7 @@ func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "copy", "--dest-tls-verify=false", "docker://"+image, "docker://"+path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())))
|
||||
|
||||
//sync upstream image to dir, not scoped
|
||||
dir1, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
dir1 := c.MkDir()
|
||||
assertSkopeoSucceeds(c, "", "sync", "--src", "docker", "--dest", "dir", image, dir1)
|
||||
_, err = os.Stat(path.Join(dir1, path.Base(imagePath), "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -226,14 +226,10 @@ func (s *SyncSuite) TestDirIsNotOverwritten(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src-tls-verify=false", "--src", "docker", "--dest", "dir", path.Join(v2DockerRegistryURL, reference.Path(imageRef.DockerReference())), dir1)
|
||||
_, err = os.Stat(path.Join(dir1, imagePath, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
os.RemoveAll(dir1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableRepo
|
||||
@@ -255,9 +251,7 @@ func (s *SyncSuite) TestDocker2DirUntagged(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
image := pullableRepo
|
||||
@@ -278,7 +272,8 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
|
||||
// sync to the local registry
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "docker", "--dest-tls-verify=false", yamlFile, v2DockerRegistryURL)
|
||||
// sync back from local registry to a folder
|
||||
os.Remove(yamlFile)
|
||||
@@ -289,7 +284,8 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
%s: []
|
||||
`, v2DockerRegistryURL, imagePath)
|
||||
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
err = os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
sysCtx = types.SystemContext{
|
||||
@@ -301,27 +297,11 @@ func (s *SyncSuite) TestYamlUntagged(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Check(len(localTags), check.Not(check.Equals), 0)
|
||||
c.Assert(len(localTags), check.Equals, len(tags))
|
||||
|
||||
nManifests := 0
|
||||
//count the number of manifest.json in dir1
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, len(tags))
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, len(tags))
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlRegex2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
@@ -334,28 +314,14 @@ k8s.gcr.io:
|
||||
c.Assert(nTags, check.Not(check.Equals), 0)
|
||||
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, nTags)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, nTags)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlDigest2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
@@ -365,28 +331,14 @@ k8s.gcr.io:
|
||||
- sha256:59eec8837a4d942cc19a52b8c09ea75121acc38114a2c68b98983ce9356b8610
|
||||
`
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, 1)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, 1)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYaml2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
|
||||
yamlConfig := `
|
||||
@@ -417,29 +369,15 @@ quay.io:
|
||||
c.Assert(nTags, check.Not(check.Equals), 0)
|
||||
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
|
||||
nManifests := 0
|
||||
err = filepath.Walk(dir1, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() && info.Name() == "manifest.json" {
|
||||
nManifests++
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Assert(nManifests, check.Equals, nTags)
|
||||
assertSkopeoSucceeds(c, "", "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
assertNumberOfManifestsInSubdirs(c, dir1, nTags)
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
image := pullableRepoWithLatestTag
|
||||
tag := "latest"
|
||||
@@ -481,7 +419,8 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
for _, cfg := range testCfg {
|
||||
yamlConfig := fmt.Sprintf(yamlTemplate, v2DockerRegistryURL, cfg.tlsVerify, image, tag)
|
||||
yamlFile := path.Join(tmpDir, "registries.yaml")
|
||||
ioutil.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
err := os.WriteFile(yamlFile, []byte(yamlConfig), 0644)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
cfg.checker(c, cfg.msg, "sync", "--scoped", "--src", "yaml", "--dest", "dir", yamlFile, dir1)
|
||||
os.Remove(yamlFile)
|
||||
@@ -491,9 +430,7 @@ func (s *SyncSuite) TestYamlTLSVerify(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "sync-manifest-output")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
destDir1 := filepath.Join(tmpDir, "dest1")
|
||||
destDir2 := filepath.Join(tmpDir, "dest2")
|
||||
@@ -513,9 +450,7 @@ func (s *SyncSuite) TestSyncManifestOutput(c *check.C) {
|
||||
func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableTaggedImage
|
||||
@@ -546,15 +481,13 @@ func (s *SyncSuite) TestDocker2DockerTagged(c *check.C) {
|
||||
func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
const localRegURL = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of needing an Internet connection.
|
||||
image := pullableRepoWithLatestTag
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
err = os.Mkdir(dir1, 0755)
|
||||
err := os.Mkdir(dir1, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
err = os.Mkdir(dir2, 0755)
|
||||
@@ -586,9 +519,7 @@ func (s *SyncSuite) TestDir2DockerTagged(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDir2Dir(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
dir1 := path.Join(tmpDir, "dir1")
|
||||
dir2 := path.Join(tmpDir, "dir2")
|
||||
@@ -598,9 +529,7 @@ func (s *SyncSuite) TestFailsWithDir2Dir(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) {
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
assertSkopeoFails(c, ".*No images to sync found in .*",
|
||||
"sync", "--scoped", "--dest-tls-verify=false", "--src", "dir", "--dest", "docker", tmpDir, v2DockerRegistryURL)
|
||||
@@ -612,9 +541,7 @@ func (s *SyncSuite) TestFailsNoSourceImages(c *check.C) {
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) {
|
||||
const regURL = "google.com/namespace/imagename"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
@@ -627,9 +554,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceNoRegistry(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
|
||||
const repo = "privateimagenamethatshouldnotbepublic"
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*Registry disallows tag list retrieval.*",
|
||||
@@ -642,9 +567,7 @@ func (s *SyncSuite) TestFailsWithDockerSourceUnauthorized(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
repo := path.Join(v2DockerRegistryURL, "imagedoesnotexist")
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
|
||||
//untagged
|
||||
assertSkopeoFails(c, ".*invalid status code from registry 404.*",
|
||||
@@ -657,9 +580,9 @@ func (s *SyncSuite) TestFailsWithDockerSourceNotExisting(c *check.C) {
|
||||
|
||||
func (s *SyncSuite) TestFailsWithDirSourceNotExisting(c *check.C) {
|
||||
// Make sure the dir does not exist!
|
||||
tmpDir, err := ioutil.TempDir("", "skopeo-sync-test")
|
||||
c.Assert(err, check.IsNil)
|
||||
err = os.RemoveAll(tmpDir)
|
||||
tmpDir := c.MkDir()
|
||||
tmpDir = filepath.Join(tmpDir, "this-does-not-exist")
|
||||
err := os.RemoveAll(tmpDir)
|
||||
c.Assert(err, check.IsNil)
|
||||
_, err = os.Stat(path.Join(tmpDir))
|
||||
c.Check(os.IsNotExist(err), check.Equals, true)
|
||||
|
||||
@@ -3,8 +3,8 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -163,7 +163,7 @@ func modifyEnviron(env []string, name, value string) []string {
|
||||
// fileFromFixtureFixture applies edits to inputPath and returns a path to the temporary file.
|
||||
// Callers should defer os.Remove(the_returned_path)
|
||||
func fileFromFixture(c *check.C, inputPath string, edits map[string]string) string {
|
||||
contents, err := ioutil.ReadFile(inputPath)
|
||||
contents, err := os.ReadFile(inputPath)
|
||||
c.Assert(err, check.IsNil)
|
||||
for template, value := range edits {
|
||||
updated := bytes.Replace(contents, []byte(template), []byte(value), -1)
|
||||
@@ -171,7 +171,7 @@ func fileFromFixture(c *check.C, inputPath string, edits map[string]string) stri
|
||||
contents = updated
|
||||
}
|
||||
|
||||
file, err := ioutil.TempFile("", "policy.json")
|
||||
file, err := os.CreateTemp("", "policy.json")
|
||||
c.Assert(err, check.IsNil)
|
||||
path := file.Name()
|
||||
|
||||
@@ -187,7 +187,7 @@ func fileFromFixture(c *check.C, inputPath string, edits map[string]string) stri
|
||||
func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
c.Logf("Running %s %s", decompressDirsBinary, strings.Join(args, " "))
|
||||
for i, dir := range args {
|
||||
m, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
m, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Logf("manifest %d before: %s", i+1, string(m))
|
||||
}
|
||||
@@ -197,7 +197,7 @@ func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
if len(out) > 0 {
|
||||
c.Logf("output: %s", out)
|
||||
}
|
||||
m, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
m, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
c.Logf("manifest %d after: %s", i+1, string(m))
|
||||
}
|
||||
@@ -208,7 +208,7 @@ func runDecompressDirs(c *check.C, regexp string, args ...string) {
|
||||
|
||||
// Verify manifest in a dir: image at dir is expectedMIMEType.
|
||||
func verifyManifestMIMEType(c *check.C, dir string, expectedMIMEType string) {
|
||||
manifestBlob, err := ioutil.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
manifestBlob, err := os.ReadFile(filepath.Join(dir, "manifest.json"))
|
||||
c.Assert(err, check.IsNil)
|
||||
mimeType := manifest.GuessMIMEType(manifestBlob)
|
||||
c.Assert(mimeType, check.Equals, expectedMIMEType)
|
||||
|
||||
@@ -27,11 +27,20 @@ load helpers
|
||||
# Now run inspect locally
|
||||
run_skopeo inspect dir:$workdir
|
||||
inspect_local=$output
|
||||
run_skopeo inspect --raw dir:$workdir
|
||||
inspect_local_raw=$output
|
||||
config_digest=$(jq -r '.config.digest' <<<"$inspect_local_raw")
|
||||
|
||||
# Each SHA-named file must be listed in the output of 'inspect'
|
||||
# Each SHA-named layer file (but not the config) must be listed in the output of 'inspect'.
|
||||
# In all existing versions of Skopeo (with 1.6 being the current as of this comment),
|
||||
# the output of 'inspect' lists layer digests,
|
||||
# but not the digest of the config blob ($config_digest), if any.
|
||||
layers=$(jq -r '.Layers' <<<"$inspect_local")
|
||||
for sha in $(find $workdir -type f | xargs -l1 basename | egrep '^[0-9a-f]{64}$'); do
|
||||
expect_output --from="$inspect_local" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
if [ "sha256:$sha" != "$config_digest" ]; then
|
||||
expect_output --from="$layers" --substring "sha256:$sha" \
|
||||
"Locally-extracted SHA file is present in 'inspect'"
|
||||
fi
|
||||
done
|
||||
|
||||
# Simple sanity check on 'inspect' output.
|
||||
|
||||
@@ -125,6 +125,10 @@ function setup() {
|
||||
run podman --root $TESTDIR/podmanroot images
|
||||
expect_output --substring "mine"
|
||||
|
||||
# rootless cleanup needs to be done with unshare due to subuids
|
||||
if [[ "$(id -u)" != "0" ]]; then
|
||||
run podman unshare rm -rf $TESTDIR/podmanroot
|
||||
fi
|
||||
}
|
||||
|
||||
# shared blob directory
|
||||
@@ -145,6 +149,8 @@ function setup() {
|
||||
}
|
||||
|
||||
@test "copy: sif image" {
|
||||
type -path fakeroot || skip "'fakeroot' tool not available"
|
||||
|
||||
local localimg=dir:$TESTDIR/dir
|
||||
|
||||
run_skopeo copy sif:${TEST_SOURCE_DIR}/testdata/busybox_latest.sif $localimg
|
||||
|
||||
28
systemtest/070-list-tags.bats
Normal file
28
systemtest/070-list-tags.bats
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# list-tags tests
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
# list from registry
|
||||
@test "list-tags: remote repository on a registry" {
|
||||
local remote_image=quay.io/libpod/alpine_labels
|
||||
|
||||
run_skopeo list-tags "docker://${remote_image}"
|
||||
expect_output --substring "quay.io/libpod/alpine_labels"
|
||||
expect_output --substring "latest"
|
||||
}
|
||||
|
||||
# list from a local docker-archive file
|
||||
@test "list-tags: from a docker-archive file" {
|
||||
local file_name=${TEST_SOURCE_DIR}/testdata/docker-two-images.tar.xz
|
||||
|
||||
run_skopeo list-tags docker-archive:$file_name
|
||||
expect_output --substring "example.com/empty:latest"
|
||||
expect_output --substring "example.com/empty/but:different"
|
||||
|
||||
}
|
||||
|
||||
|
||||
# vim: filetype=sh
|
||||
26
systemtest/080-sync.bats
Normal file
26
systemtest/080-sync.bats
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bats
|
||||
#
|
||||
# Sync tests
|
||||
#
|
||||
|
||||
load helpers
|
||||
|
||||
function setup() {
|
||||
standard_setup
|
||||
}
|
||||
|
||||
@test "sync: --dry-run" {
|
||||
local remote_image=quay.io/libpod/busybox:latest
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
run_skopeo sync --dry-run --src docker --dest dir --scoped $remote_image $dir
|
||||
expect_output --substring "Would have copied image"
|
||||
expect_output --substring "from=\"docker://${remote_image}\" to=\"dir:${dir}/${remote_image}\""
|
||||
expect_output --substring "Would have synced 1 images from 1 sources"
|
||||
}
|
||||
|
||||
teardown() {
|
||||
standard_teardown
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
BIN
systemtest/testdata/docker-two-images.tar.xz
vendored
Normal file
BIN
systemtest/testdata/docker-two-images.tar.xz
vendored
Normal file
Binary file not shown.
2
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
2
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@@ -56,7 +56,7 @@ And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
err := toml.Decode(tomlData, &conf)
|
||||
_, err := toml.Decode(tomlData, &conf)
|
||||
// handle error
|
||||
```
|
||||
|
||||
|
||||
73
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
73
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -18,11 +19,29 @@ type Unmarshaler interface {
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
//
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
//
|
||||
// This type can be used for any value, which will cause decoding to be delayed.
|
||||
@@ -42,27 +61,10 @@ type Primitive struct {
|
||||
// The significand precision for float32 and float64 is 24 and 53 bits; this is
|
||||
// the range a natural number can be stored in a float without loss of data.
|
||||
const (
|
||||
maxSafeFloat32Int = 16777215 // 2^24-1
|
||||
maxSafeFloat64Int = 9007199254740991 // 2^53-1
|
||||
maxSafeFloat32Int = 16777215 // 2^24-1
|
||||
maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
|
||||
)
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decoder decodes TOML data.
|
||||
//
|
||||
// TOML tables correspond to Go structs or maps (dealer's choice – they can be
|
||||
@@ -158,22 +160,21 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
return md, md.unify(p.mapping, rv)
|
||||
}
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
|
||||
6
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
6
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@@ -212,7 +212,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.writeQuoted(string(s))
|
||||
enc.w.Write(s)
|
||||
return
|
||||
case encoding.TextMarshaler:
|
||||
s, err := v.MarshalText()
|
||||
@@ -398,6 +398,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
|
||||
continue
|
||||
}
|
||||
opts := getOptions(f.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
|
||||
frv := rv.Field(i)
|
||||
|
||||
|
||||
2
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
2
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
@@ -10,7 +10,7 @@ import (
|
||||
// For example invalid syntax, duplicate keys, etc.
|
||||
//
|
||||
// In addition to the error message itself, you can also print detailed location
|
||||
// information with context by using ErrorWithLocation():
|
||||
// information with context by using ErrorWithPosition():
|
||||
//
|
||||
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
||||
//
|
||||
|
||||
5
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
5
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@@ -128,6 +128,11 @@ func (lx lexer) getPos() Position {
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
// Needed for multiline strings ending with an incomplete UTF-8 sequence.
|
||||
if lx.start > lx.pos {
|
||||
lx.error(errLexUTF8{lx.input[lx.pos]})
|
||||
return
|
||||
}
|
||||
lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
8
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
8
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@@ -220,7 +220,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
||||
return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
@@ -647,7 +647,7 @@ func stripFirstNewline(s string) string {
|
||||
}
|
||||
|
||||
// Remove newlines inside triple-quoted strings if a line ends with "\".
|
||||
func stripEscapedNewlines(s string) string {
|
||||
func (p *parser) stripEscapedNewlines(s string) string {
|
||||
split := strings.Split(s, "\n")
|
||||
if len(split) < 1 {
|
||||
return s
|
||||
@@ -679,6 +679,10 @@ func stripEscapedNewlines(s string) string {
|
||||
continue
|
||||
}
|
||||
|
||||
if i == len(split)-1 {
|
||||
p.panicf("invalid escape: '\\ '")
|
||||
}
|
||||
|
||||
split[i] = line[:len(line)-1] // Remove \
|
||||
if len(split)-1 > i {
|
||||
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
|
||||
|
||||
115
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
115
vendor/github.com/Microsoft/go-winio/backuptar/tar.go
generated
vendored
@@ -113,6 +113,69 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta
|
||||
return hdr
|
||||
}
|
||||
|
||||
// SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file
|
||||
// from the tar header and returns the security descriptor into a byte slice.
|
||||
func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
// Maintaining old SDDL-based behavior for backward
|
||||
// compatibility. All new tar headers written by this library
|
||||
// will have raw binary for the security descriptor.
|
||||
var sd []byte
|
||||
var err error
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return sd, nil
|
||||
}
|
||||
|
||||
// ExtendedAttributesFromTarHeader reads the EAs associated with the header of the
|
||||
// current file from the tar header and returns it as a byte slice.
|
||||
func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) {
|
||||
var eas []winio.ExtendedAttribute
|
||||
var eadata []byte
|
||||
var err error
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err = winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return eadata, nil
|
||||
}
|
||||
|
||||
// EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header
|
||||
// and encodes it into a byte slice. The file for which this function is called must be a
|
||||
// symlink.
|
||||
func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte {
|
||||
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
return winio.EncodeReparsePoint(&rp)
|
||||
}
|
||||
|
||||
// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream.
|
||||
//
|
||||
// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS.
|
||||
@@ -358,21 +421,10 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win
|
||||
// tar file that was not processed, or io.EOF is there are no more.
|
||||
func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
|
||||
bw := winio.NewBackupStreamWriter(w)
|
||||
var sd []byte
|
||||
var err error
|
||||
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
|
||||
// by this library will have raw binary for the security descriptor.
|
||||
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
|
||||
sd, err = winio.SddlToSecurityDescriptor(sddl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
|
||||
sd, err = base64.StdEncoding.DecodeString(sdraw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sd, err := SecurityDescriptorFromTarHeader(hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(sd) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
@@ -388,25 +440,12 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var eas []winio.ExtendedAttribute
|
||||
for k, v := range hdr.PAXRecords {
|
||||
if !strings.HasPrefix(k, hdrEaPrefix) {
|
||||
continue
|
||||
}
|
||||
data, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eas = append(eas, winio.ExtendedAttribute{
|
||||
Name: k[len(hdrEaPrefix):],
|
||||
Value: data,
|
||||
})
|
||||
|
||||
eadata, err := ExtendedAttributesFromTarHeader(hdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(eas) != 0 {
|
||||
eadata, err := winio.EncodeExtendedAttributes(eas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(eadata) != 0 {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupEaData,
|
||||
Size: int64(len(eadata)),
|
||||
@@ -420,13 +459,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeSymlink {
|
||||
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
|
||||
rp := winio.ReparsePoint{
|
||||
Target: filepath.FromSlash(hdr.Linkname),
|
||||
IsMountPoint: isMountPoint,
|
||||
}
|
||||
reparse := winio.EncodeReparsePoint(&rp)
|
||||
reparse := EncodeReparsePointFromTarHeader(hdr)
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupReparseData,
|
||||
Size: int64(len(reparse)),
|
||||
@@ -439,7 +474,9 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
|
||||
bhdr := winio.BackupHeader{
|
||||
Id: winio.BackupData,
|
||||
|
||||
6
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
6
vendor/github.com/Microsoft/go-winio/file.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -143,6 +144,11 @@ func (f *win32File) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsClosed checks if the file has been closed
|
||||
func (f *win32File) IsClosed() bool {
|
||||
return f.closing.isSet()
|
||||
}
|
||||
|
||||
// prepareIo prepares for a new IO operation.
|
||||
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
|
||||
func (f *win32File) prepareIo() (*ioOperation, error) {
|
||||
|
||||
3
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
3
vendor/github.com/Microsoft/go-winio/go.mod
generated
vendored
@@ -1,9 +1,8 @@
|
||||
module github.com/Microsoft/go-winio
|
||||
|
||||
go 1.12
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
||||
)
|
||||
|
||||
3
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
3
vendor/github.com/Microsoft/go-winio/go.sum
generated
vendored
@@ -1,14 +1,11 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
||||
17
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
17
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package winio
|
||||
@@ -252,15 +253,23 @@ func (conn *HvsockConn) Close() error {
|
||||
return conn.sock.Close()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) IsClosed() bool {
|
||||
return conn.sock.IsClosed()
|
||||
}
|
||||
|
||||
func (conn *HvsockConn) shutdown(how int) error {
|
||||
err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD)
|
||||
if conn.IsClosed() {
|
||||
return ErrFileClosed
|
||||
}
|
||||
|
||||
err := syscall.Shutdown(conn.sock.handle, how)
|
||||
if err != nil {
|
||||
return os.NewSyscallError("shutdown", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseRead shuts down the read end of the socket.
|
||||
// CloseRead shuts down the read end of the socket, preventing future read operations.
|
||||
func (conn *HvsockConn) CloseRead() error {
|
||||
err := conn.shutdown(syscall.SHUT_RD)
|
||||
if err != nil {
|
||||
@@ -269,8 +278,8 @@ func (conn *HvsockConn) CloseRead() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseWrite shuts down the write end of the socket, notifying the other endpoint that
|
||||
// no more data will be written.
|
||||
// CloseWrite shuts down the write end of the socket, preventing future write operations and
|
||||
// notifying the other endpoint that no more data will be written.
|
||||
func (conn *HvsockConn) CloseWrite() error {
|
||||
err := conn.shutdown(syscall.SHUT_WR)
|
||||
if err != nil {
|
||||
|
||||
9
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
9
vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go
generated
vendored
@@ -14,8 +14,6 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// Variant specifies which GUID variant (or "type") of the GUID. It determines
|
||||
@@ -41,13 +39,6 @@ type Version uint8
|
||||
var _ = (encoding.TextMarshaler)(GUID{})
|
||||
var _ = (encoding.TextUnmarshaler)(&GUID{})
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
|
||||
// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122.
|
||||
func NewV4() (GUID, error) {
|
||||
var b [16]byte
|
||||
|
||||
15
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
15
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
// +build !windows
|
||||
|
||||
package guid
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type as that is only available to builds
|
||||
// targeted at `windows`. The representation matches that used by native Windows
|
||||
// code.
|
||||
type GUID struct {
|
||||
Data1 uint32
|
||||
Data2 uint16
|
||||
Data3 uint16
|
||||
Data4 [8]byte
|
||||
}
|
||||
10
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
10
vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
package guid
|
||||
|
||||
import "golang.org/x/sys/windows"
|
||||
|
||||
// GUID represents a GUID/UUID. It has the same structure as
|
||||
// golang.org/x/sys/windows.GUID so that it can be used with functions expecting
|
||||
// that type. It is defined as its own type so that stringification and
|
||||
// marshaling can be supported. The representation matches that used by native
|
||||
// Windows code.
|
||||
type GUID windows.GUID
|
||||
15
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
15
vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go
generated
vendored
@@ -3,11 +3,10 @@
|
||||
package security
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -72,7 +71,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
// Stat (to determine if `name` is a directory).
|
||||
s, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "%s os.Stat %s", gvmga, name)
|
||||
return fmt.Errorf("%s os.Stat %s: %w", gvmga, name, err)
|
||||
}
|
||||
|
||||
// Get a handle to the file/directory. Must defer Close on success.
|
||||
@@ -88,7 +87,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
sd := uintptr(0)
|
||||
origDACL := uintptr(0)
|
||||
if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil {
|
||||
return errors.Wrapf(err, "%s GetSecurityInfo %s", gvmga, name)
|
||||
return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err)
|
||||
}
|
||||
defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd)))
|
||||
|
||||
@@ -102,7 +101,7 @@ func GrantVmGroupAccess(name string) error {
|
||||
|
||||
// And finally use SetSecurityInfo to apply the updated DACL.
|
||||
if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil {
|
||||
return errors.Wrapf(err, "%s SetSecurityInfo %s", gvmga, name)
|
||||
return fmt.Errorf("%s SetSecurityInfo %s: %w", gvmga, name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -120,7 +119,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) {
|
||||
}
|
||||
fd, err := syscall.CreateFile(&namep[0], da, sm, nil, syscall.OPEN_EXISTING, fa, 0)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.CreateFile %s", gvmga, name)
|
||||
return 0, fmt.Errorf("%s syscall.CreateFile %s: %w", gvmga, name, err)
|
||||
}
|
||||
return fd, nil
|
||||
}
|
||||
@@ -131,7 +130,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||
// Generate pointers to the SIDs based on the string SIDs
|
||||
sid, err := syscall.StringToSid(sidVmGroup)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "%s syscall.StringToSid %s %s", gvmga, name, sidVmGroup)
|
||||
return 0, fmt.Errorf("%s syscall.StringToSid %s %s: %w", gvmga, name, sidVmGroup, err)
|
||||
}
|
||||
|
||||
inheritance := inheritModeNoInheritance
|
||||
@@ -154,7 +153,7 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp
|
||||
|
||||
modifiedDACL := uintptr(0)
|
||||
if err := setEntriesInAcl(uintptr(uint32(1)), uintptr(unsafe.Pointer(&eaArray[0])), origDACL, &modifiedDACL); err != nil {
|
||||
return 0, errors.Wrapf(err, "%s SetEntriesInAcl %s", gvmga, name)
|
||||
return 0, fmt.Errorf("%s SetEntriesInAcl %s: %w", gvmga, name, err)
|
||||
}
|
||||
|
||||
return modifiedDACL, nil
|
||||
|
||||
59
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
59
vendor/github.com/Microsoft/go-winio/vhd/vhd.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package vhd
|
||||
@@ -7,14 +8,13 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/Microsoft/go-winio/pkg/guid"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go
|
||||
|
||||
//sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
||||
//sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk
|
||||
//sys attachVirtualDisk(handle syscall.Handle, securityDescriptor *uintptr, attachVirtualDiskFlag uint32, providerSpecificFlags uint32, parameters *AttachVirtualDiskParameters, overlapped *syscall.Overlapped) (win32err error) = virtdisk.AttachVirtualDisk
|
||||
//sys detachVirtualDisk(handle syscall.Handle, detachVirtualDiskFlags uint32, providerSpecificFlags uint32) (win32err error) = virtdisk.DetachVirtualDisk
|
||||
//sys getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint32, buffer *uint16) (win32err error) = virtdisk.GetVirtualDiskPhysicalPath
|
||||
@@ -62,13 +62,27 @@ type OpenVirtualDiskParameters struct {
|
||||
Version2 OpenVersion2
|
||||
}
|
||||
|
||||
// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However,
|
||||
// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating
|
||||
// `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods.
|
||||
type openVersion2 struct {
|
||||
getInfoOnly int32
|
||||
readOnly int32
|
||||
resiliencyGUID guid.GUID
|
||||
}
|
||||
|
||||
type openVirtualDiskParameters struct {
|
||||
version uint32
|
||||
version2 openVersion2
|
||||
}
|
||||
|
||||
type AttachVersion2 struct {
|
||||
RestrictedOffset uint64
|
||||
RestrictedLength uint64
|
||||
}
|
||||
|
||||
type AttachVirtualDiskParameters struct {
|
||||
Version uint32 // Must always be set to 2
|
||||
Version uint32
|
||||
Version2 AttachVersion2
|
||||
}
|
||||
|
||||
@@ -146,16 +160,13 @@ func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := syscall.CloseHandle(handle); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return syscall.CloseHandle(handle)
|
||||
}
|
||||
|
||||
// DetachVirtualDisk detaches a virtual hard disk by handle.
|
||||
func DetachVirtualDisk(handle syscall.Handle) (err error) {
|
||||
if err := detachVirtualDisk(handle, 0, 0); err != nil {
|
||||
return errors.Wrap(err, "failed to detach virtual disk")
|
||||
return fmt.Errorf("failed to detach virtual disk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -185,7 +196,7 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua
|
||||
parameters,
|
||||
nil,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -209,7 +220,7 @@ func AttachVhd(path string) (err error) {
|
||||
AttachVirtualDiskFlagNone,
|
||||
¶ms,
|
||||
); err != nil {
|
||||
return errors.Wrap(err, "failed to attach virtual disk")
|
||||
return fmt.Errorf("failed to attach virtual disk: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -234,19 +245,35 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual
|
||||
var (
|
||||
handle syscall.Handle
|
||||
defaultType VirtualStorageType
|
||||
getInfoOnly int32
|
||||
readOnly int32
|
||||
)
|
||||
if parameters.Version != 2 {
|
||||
return handle, fmt.Errorf("only version 2 VHDs are supported, found version: %d", parameters.Version)
|
||||
}
|
||||
if parameters.Version2.GetInfoOnly {
|
||||
getInfoOnly = 1
|
||||
}
|
||||
if parameters.Version2.ReadOnly {
|
||||
readOnly = 1
|
||||
}
|
||||
params := &openVirtualDiskParameters{
|
||||
version: parameters.Version,
|
||||
version2: openVersion2{
|
||||
getInfoOnly,
|
||||
readOnly,
|
||||
parameters.Version2.ResiliencyGUID,
|
||||
},
|
||||
}
|
||||
if err := openVirtualDisk(
|
||||
&defaultType,
|
||||
vhdPath,
|
||||
uint32(virtualDiskAccessMask),
|
||||
uint32(openVirtualDiskFlags),
|
||||
parameters,
|
||||
params,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return 0, errors.Wrap(err, "failed to open virtual disk")
|
||||
return 0, fmt.Errorf("failed to open virtual disk: %w", err)
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
@@ -272,7 +299,7 @@ func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask,
|
||||
nil,
|
||||
&handle,
|
||||
); err != nil {
|
||||
return handle, errors.Wrap(err, "failed to create virtual disk")
|
||||
return handle, fmt.Errorf("failed to create virtual disk: %w", err)
|
||||
}
|
||||
return handle, nil
|
||||
}
|
||||
@@ -290,7 +317,7 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) {
|
||||
&diskPathSizeInBytes,
|
||||
&diskPhysicalPathBuf[0],
|
||||
); err != nil {
|
||||
return "", errors.Wrap(err, "failed to get disk physical path")
|
||||
return "", fmt.Errorf("failed to get disk physical path: %w", err)
|
||||
}
|
||||
return windows.UTF16ToString(diskPhysicalPathBuf[:]), nil
|
||||
}
|
||||
@@ -314,10 +341,10 @@ func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error
|
||||
createParams,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create differencing vhd: %s", err)
|
||||
return fmt.Errorf("failed to create differencing vhd: %w", err)
|
||||
}
|
||||
if err := syscall.CloseHandle(vhdHandle); err != nil {
|
||||
return fmt.Errorf("failed to close differencing vhd handle: %s", err)
|
||||
return fmt.Errorf("failed to close differencing vhd handle: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
4
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
4
vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go
generated
vendored
@@ -88,7 +88,7 @@ func getVirtualDiskPhysicalPath(handle syscall.Handle, diskPathSizeInBytes *uint
|
||||
return
|
||||
}
|
||||
|
||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
var _p0 *uint16
|
||||
_p0, win32err = syscall.UTF16PtrFromString(path)
|
||||
if win32err != nil {
|
||||
@@ -97,7 +97,7 @@ func openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtua
|
||||
return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, openVirtualDiskFlags, parameters, handle)
|
||||
}
|
||||
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *OpenVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
func _openVirtualDisk(virtualStorageType *VirtualStorageType, path *uint16, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) {
|
||||
r0, _, _ := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(openVirtualDiskFlags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle)))
|
||||
if r0 != 0 {
|
||||
win32err = syscall.Errno(r0)
|
||||
|
||||
5
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
5
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
@@ -17,7 +17,7 @@
|
||||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||
// Use with fmt.Errorf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
@@ -28,8 +28,7 @@ package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
|
||||
10
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
10
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
@@ -18,9 +18,9 @@ package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@@ -68,9 +68,9 @@ func ToGRPC(err error) error {
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
@@ -104,9 +104,9 @@ func FromGRPC(err error) error {
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = errors.Wrap(cls, msg)
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else {
|
||||
err = errors.WithStack(cls)
|
||||
err = cls
|
||||
}
|
||||
|
||||
return err
|
||||
|
||||
5
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
5
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
@@ -52,7 +52,8 @@ const (
|
||||
// WithLogger returns a new context with the provided logger. Use in
|
||||
// combination with logger.WithField(s) for great effect.
|
||||
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
|
||||
return context.WithValue(ctx, loggerKey{}, logger)
|
||||
e := logger.WithContext(ctx)
|
||||
return context.WithValue(ctx, loggerKey{}, e)
|
||||
}
|
||||
|
||||
// GetLogger retrieves the current logger from the context. If no logger is
|
||||
@@ -61,7 +62,7 @@ func GetLogger(ctx context.Context) *logrus.Entry {
|
||||
logger := ctx.Value(loggerKey{})
|
||||
|
||||
if logger == nil {
|
||||
return L
|
||||
return L.WithContext(ctx)
|
||||
}
|
||||
|
||||
return logger.(*logrus.Entry)
|
||||
|
||||
12
vendor/github.com/containerd/containerd/platforms/compare.go
generated
vendored
12
vendor/github.com/containerd/containerd/platforms/compare.go
generated
vendored
@@ -38,12 +38,22 @@ func platformVector(platform specs.Platform) []specs.Platform {
|
||||
|
||||
switch platform.Architecture {
|
||||
case "amd64":
|
||||
if amd64Version, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && amd64Version > 1 {
|
||||
for amd64Version--; amd64Version >= 1; amd64Version-- {
|
||||
vector = append(vector, specs.Platform{
|
||||
Architecture: platform.Architecture,
|
||||
OS: platform.OS,
|
||||
OSVersion: platform.OSVersion,
|
||||
OSFeatures: platform.OSFeatures,
|
||||
Variant: "v" + strconv.Itoa(amd64Version),
|
||||
})
|
||||
}
|
||||
}
|
||||
vector = append(vector, specs.Platform{
|
||||
Architecture: "386",
|
||||
OS: platform.OS,
|
||||
OSVersion: platform.OSVersion,
|
||||
OSFeatures: platform.OSFeatures,
|
||||
Variant: platform.Variant,
|
||||
})
|
||||
case "arm":
|
||||
if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 {
|
||||
|
||||
6
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
6
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
@@ -18,6 +18,7 @@ package platforms
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
@@ -25,7 +26,6 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Present the ARM instruction set architecture, eg: v7, v8
|
||||
@@ -48,7 +48,7 @@ func cpuVariant() string {
|
||||
// by ourselves. We can just parse these information from /proc/cpuinfo
|
||||
func getCPUInfo(pattern string) (info string, err error) {
|
||||
if !isLinuxOS(runtime.GOOS) {
|
||||
return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS)
|
||||
return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
cpuinfo, err := os.Open("/proc/cpuinfo")
|
||||
@@ -75,7 +75,7 @@ func getCPUInfo(pattern string) (info string, err error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern)
|
||||
return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
func getCPUVariant() string {
|
||||
|
||||
10
vendor/github.com/containerd/containerd/platforms/database.go
generated
vendored
10
vendor/github.com/containerd/containerd/platforms/database.go
generated
vendored
@@ -38,7 +38,7 @@ func isLinuxOS(os string) bool {
|
||||
// The OS value should be normalized before calling this function.
|
||||
func isKnownOS(os string) bool {
|
||||
switch os {
|
||||
case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
|
||||
case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -60,7 +60,7 @@ func isArmArch(arch string) bool {
|
||||
// The arch value should be normalized before being passed to this function.
|
||||
func isKnownArch(arch string) bool {
|
||||
switch arch {
|
||||
case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
|
||||
case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@@ -86,9 +86,11 @@ func normalizeArch(arch, variant string) (string, string) {
|
||||
case "i386":
|
||||
arch = "386"
|
||||
variant = ""
|
||||
case "x86_64", "x86-64":
|
||||
case "x86_64", "x86-64", "amd64":
|
||||
arch = "amd64"
|
||||
variant = ""
|
||||
if variant == "v1" {
|
||||
variant = ""
|
||||
}
|
||||
case "aarch64", "arm64":
|
||||
arch = "arm64"
|
||||
switch variant {
|
||||
|
||||
16
vendor/github.com/containerd/containerd/platforms/defaults.go
generated
vendored
16
vendor/github.com/containerd/containerd/platforms/defaults.go
generated
vendored
@@ -16,27 +16,11 @@
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// DefaultString returns the default string specifier for the platform.
|
||||
func DefaultString() string {
|
||||
return Format(DefaultSpec())
|
||||
}
|
||||
|
||||
// DefaultSpec returns the current platform's default platform specification.
|
||||
func DefaultSpec() specs.Platform {
|
||||
return specs.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
// The Variant field will be empty if arch != ARM.
|
||||
Variant: cpuVariant(),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultStrict returns strict form of Default.
|
||||
func DefaultStrict() MatchComparer {
|
||||
return OnlyStrict(DefaultSpec())
|
||||
|
||||
45
vendor/github.com/containerd/containerd/platforms/defaults_darwin.go
generated
vendored
Normal file
45
vendor/github.com/containerd/containerd/platforms/defaults_darwin.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
//go:build darwin
|
||||
// +build darwin
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// DefaultSpec returns the current platform's default platform specification.
|
||||
func DefaultSpec() specs.Platform {
|
||||
return specs.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
// The Variant field will be empty if arch != ARM.
|
||||
Variant: cpuVariant(),
|
||||
}
|
||||
}
|
||||
|
||||
// Default returns the default matcher for the platform.
|
||||
func Default() MatchComparer {
|
||||
return Ordered(DefaultSpec(), specs.Platform{
|
||||
// darwin runtime also supports Linux binary via runu/LKL
|
||||
OS: "linux",
|
||||
Architecture: runtime.GOARCH,
|
||||
})
|
||||
}
|
||||
19
vendor/github.com/containerd/containerd/platforms/defaults_unix.go
generated
vendored
19
vendor/github.com/containerd/containerd/platforms/defaults_unix.go
generated
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !windows
|
||||
//go:build !windows && !darwin
|
||||
// +build !windows,!darwin
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
@@ -18,6 +19,22 @@
|
||||
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// DefaultSpec returns the current platform's default platform specification.
|
||||
func DefaultSpec() specs.Platform {
|
||||
return specs.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
// The Variant field will be empty if arch != ARM.
|
||||
Variant: cpuVariant(),
|
||||
}
|
||||
}
|
||||
|
||||
// Default returns the default matcher for the platform.
|
||||
func Default() MatchComparer {
|
||||
return Only(DefaultSpec())
|
||||
|
||||
14
vendor/github.com/containerd/containerd/platforms/defaults_windows.go
generated
vendored
14
vendor/github.com/containerd/containerd/platforms/defaults_windows.go
generated
vendored
@@ -1,5 +1,3 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
@@ -29,6 +27,18 @@ import (
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// DefaultSpec returns the current platform's default platform specification.
|
||||
func DefaultSpec() specs.Platform {
|
||||
major, minor, build := windows.RtlGetNtVersionNumbers()
|
||||
return specs.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
OSVersion: fmt.Sprintf("%d.%d.%d", major, minor, build),
|
||||
// The Variant field will be empty if arch != ARM.
|
||||
Variant: cpuVariant(),
|
||||
}
|
||||
}
|
||||
|
||||
type matchComparer struct {
|
||||
defaults Matcher
|
||||
osVersionPrefix string
|
||||
|
||||
31
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
31
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
@@ -107,6 +107,8 @@
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
@@ -114,7 +116,6 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -166,14 +167,14 @@ func (m *matcher) String() string {
|
||||
func Parse(specifier string) (specs.Platform, error) {
|
||||
if strings.Contains(specifier, "*") {
|
||||
// TODO(stevvooe): need to work out exact wildcard handling
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
|
||||
return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
parts := strings.Split(specifier, "/")
|
||||
|
||||
for _, part := range parts {
|
||||
if !specifierRe.MatchString(part) {
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
|
||||
return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -205,7 +206,7 @@ func Parse(specifier string) (specs.Platform, error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
|
||||
return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument)
|
||||
case 2:
|
||||
// In this case, we treat as a regular os/arch pair. We don't care
|
||||
// about whether or not we know of the platform.
|
||||
@@ -227,7 +228,7 @@ func Parse(specifier string) (specs.Platform, error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
|
||||
return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
// MustParse is like Parses but panics if the specifier cannot be parsed.
|
||||
@@ -246,20 +247,7 @@ func Format(platform specs.Platform) string {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant)
|
||||
}
|
||||
|
||||
func joinNotEmpty(s ...string) string {
|
||||
var ss []string
|
||||
for _, s := range s {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ss = append(ss, s)
|
||||
}
|
||||
|
||||
return strings.Join(ss, "/")
|
||||
return path.Join(platform.OS, platform.Architecture, platform.Variant)
|
||||
}
|
||||
|
||||
// Normalize validates and translate the platform to the canonical value.
|
||||
@@ -269,10 +257,5 @@ func joinNotEmpty(s ...string) string {
|
||||
func Normalize(platform specs.Platform) specs.Platform {
|
||||
platform.OS = normalizeOS(platform.OS)
|
||||
platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
|
||||
|
||||
// these fields are deprecated, remove them
|
||||
platform.OSFeatures = nil
|
||||
platform.OSVersion = ""
|
||||
|
||||
return platform
|
||||
}
|
||||
|
||||
58
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
58
vendor/github.com/containerd/stargz-snapshotter/estargz/build.go
generated
vendored
@@ -26,9 +26,10 @@ import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
@@ -38,7 +39,6 @@ import (
|
||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -48,6 +48,7 @@ type options struct {
|
||||
prioritizedFiles []string
|
||||
missedPrioritizedFiles *[]string
|
||||
compression Compression
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
type Option func(o *options) error
|
||||
@@ -104,6 +105,14 @@ func WithCompression(compression Compression) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithContext specifies a context that can be used for clean canceleration.
|
||||
func WithContext(ctx context.Context) Option {
|
||||
return func(o *options) error {
|
||||
o.ctx = ctx
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Blob is an eStargz blob.
|
||||
type Blob struct {
|
||||
io.ReadCloser
|
||||
@@ -139,12 +148,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) {
|
||||
opts.compression = newGzipCompressionWithLevel(opts.compressionLevel)
|
||||
}
|
||||
layerFiles := newTempFiles()
|
||||
ctx := opts.ctx
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
go func() {
|
||||
select {
|
||||
case <-done:
|
||||
// nop
|
||||
case <-ctx.Done():
|
||||
layerFiles.CleanupAll()
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
if rErr != nil {
|
||||
if err := layerFiles.CleanupAll(); err != nil {
|
||||
rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err)
|
||||
rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr)
|
||||
}
|
||||
}
|
||||
if cErr := ctx.Err(); cErr != nil {
|
||||
rErr = fmt.Errorf("error from context %q: %w", cErr, rErr)
|
||||
}
|
||||
}()
|
||||
tarBlob, err := decompressBlob(tarBlob, layerFiles)
|
||||
if err != nil {
|
||||
@@ -307,7 +333,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
|
||||
// Import tar file.
|
||||
intar, err := importTar(in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to sort")
|
||||
return nil, fmt.Errorf("failed to sort: %w", err)
|
||||
}
|
||||
|
||||
// Sort the tar file respecting to the prioritized files list.
|
||||
@@ -318,7 +344,7 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri
|
||||
*missedPrioritized = append(*missedPrioritized, l)
|
||||
continue // allow not found
|
||||
}
|
||||
return nil, errors.Wrap(err, "failed to sort tar entries")
|
||||
return nil, fmt.Errorf("failed to sort tar entries: %w", err)
|
||||
}
|
||||
}
|
||||
if len(prioritized) == 0 {
|
||||
@@ -371,7 +397,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
|
||||
tf := &tarFile{}
|
||||
pw, err := newCountReader(in)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make position watcher")
|
||||
return nil, fmt.Errorf("failed to make position watcher: %w", err)
|
||||
}
|
||||
tr := tar.NewReader(pw)
|
||||
|
||||
@@ -383,7 +409,7 @@ func importTar(in io.ReaderAt) (*tarFile, error) {
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else {
|
||||
return nil, errors.Wrap(err, "failed to parse tar file")
|
||||
return nil, fmt.Errorf("failed to parse tar file, %w", err)
|
||||
}
|
||||
}
|
||||
switch cleanEntryName(h.Name) {
|
||||
@@ -420,7 +446,7 @@ func moveRec(name string, in *tarFile, out *tarFile) error {
|
||||
_, okIn := in.get(name)
|
||||
_, okOut := out.get(name)
|
||||
if !okIn && !okOut {
|
||||
return errors.Wrapf(errNotFound, "file: %q", name)
|
||||
return fmt.Errorf("file: %q: %w", name, errNotFound)
|
||||
}
|
||||
|
||||
parent, _ := path.Split(strings.TrimSuffix(name, "/"))
|
||||
@@ -506,12 +532,13 @@ func newTempFiles() *tempFiles {
|
||||
}
|
||||
|
||||
type tempFiles struct {
|
||||
files []*os.File
|
||||
filesMu sync.Mutex
|
||||
files []*os.File
|
||||
filesMu sync.Mutex
|
||||
cleanupOnce sync.Once
|
||||
}
|
||||
|
||||
func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
|
||||
f, err := ioutil.TempFile(dir, pattern)
|
||||
f, err := os.CreateTemp(dir, pattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -521,7 +548,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (tf *tempFiles) CleanupAll() error {
|
||||
func (tf *tempFiles) CleanupAll() (err error) {
|
||||
tf.cleanupOnce.Do(func() {
|
||||
err = tf.cleanupAll()
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (tf *tempFiles) cleanupAll() error {
|
||||
tf.filesMu.Lock()
|
||||
defer tf.filesMu.Unlock()
|
||||
var allErr []error
|
||||
|
||||
15
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
15
vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go
generated
vendored
@@ -27,10 +27,10 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
@@ -40,7 +40,6 @@ import (
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/vbatts/tar-split/archive/tar"
|
||||
)
|
||||
|
||||
@@ -385,8 +384,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
|
||||
if e.Digest != "" {
|
||||
d, err := digest.Parse(e.Digest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err,
|
||||
"failed to parse regular file digest %q", e.Digest)
|
||||
return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err)
|
||||
}
|
||||
regDigestMap[e.Offset] = d
|
||||
} else {
|
||||
@@ -401,8 +399,7 @@ func (r *Reader) Verifiers() (TOCEntryVerifier, error) {
|
||||
if e.ChunkDigest != "" {
|
||||
d, err := digest.Parse(e.ChunkDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err,
|
||||
"failed to parse chunk digest %q", e.ChunkDigest)
|
||||
return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err)
|
||||
}
|
||||
chunkDigestMap[e.Offset] = d
|
||||
} else {
|
||||
@@ -581,7 +578,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err)
|
||||
}
|
||||
defer dr.Close()
|
||||
if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil {
|
||||
if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil {
|
||||
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
|
||||
}
|
||||
return io.ReadFull(dr, p)
|
||||
@@ -647,7 +644,7 @@ func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) {
|
||||
}
|
||||
blobPayloadSize, _, _, err := c.ParseFooter(footer)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse footer")
|
||||
return nil, fmt.Errorf("failed to parse footer: %w", err)
|
||||
}
|
||||
return c.Reader(io.LimitReader(sr, blobPayloadSize))
|
||||
}
|
||||
@@ -935,7 +932,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
remainDest := ioutil.Discard
|
||||
remainDest := io.Discard
|
||||
if lossless {
|
||||
remainDest = dst // Preserve the remaining bytes in lossless mode
|
||||
}
|
||||
|
||||
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
3
vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod
generated
vendored
@@ -3,9 +3,8 @@ module github.com/containerd/stargz-snapshotter/estargz
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/klauspost/compress v1.14.2
|
||||
github.com/klauspost/compress v1.15.1
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/vbatts/tar-split v0.11.2
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
|
||||
)
|
||||
|
||||
6
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
6
vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum
generated
vendored
@@ -1,12 +1,10 @@
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
|
||||
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
|
||||
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
|
||||
7
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
7
vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go
generated
vendored
@@ -34,7 +34,6 @@ import (
|
||||
"strconv"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type gzipCompression struct {
|
||||
@@ -150,7 +149,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
|
||||
}
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
@@ -179,7 +178,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader")
|
||||
return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err)
|
||||
}
|
||||
defer zr.Close()
|
||||
extra := zr.Header.Extra
|
||||
@@ -191,7 +190,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset")
|
||||
return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err)
|
||||
}
|
||||
return tocOffset, tocOffset, 0, nil
|
||||
}
|
||||
|
||||
13
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
13
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
@@ -28,9 +28,9 @@ import (
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
@@ -41,7 +41,6 @@ import (
|
||||
"github.com/containerd/stargz-snapshotter/estargz/errorutil"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// TestingController is Compression with some helper methods necessary for testing.
|
||||
@@ -287,11 +286,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool {
|
||||
return false
|
||||
|
||||
}
|
||||
aFile, err := ioutil.ReadAll(aTar)
|
||||
aFile, err := io.ReadAll(aTar)
|
||||
if err != nil {
|
||||
t.Fatal("failed to read tar payload of A")
|
||||
}
|
||||
bFile, err := ioutil.ReadAll(bTar)
|
||||
bFile, err := io.ReadAll(bTar)
|
||||
if err != nil {
|
||||
t.Fatal("failed to read tar payload of B")
|
||||
}
|
||||
@@ -1062,18 +1061,18 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT
|
||||
fSize := controller.FooterSize()
|
||||
footer := make([]byte, fSize)
|
||||
if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil {
|
||||
return nil, 0, errors.Wrap(err, "error reading footer")
|
||||
return nil, 0, fmt.Errorf("error reading footer: %w", err)
|
||||
}
|
||||
_, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):])
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "failed to parse footer")
|
||||
return nil, 0, fmt.Errorf("failed to parse footer: %w", err)
|
||||
}
|
||||
|
||||
// Decode the TOC JSON
|
||||
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize)
|
||||
decodedJTOC, _, err = controller.ParseTOC(tocReader)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrap(err, "failed to parse TOC")
|
||||
return nil, 0, fmt.Errorf("failed to parse TOC: %w", err)
|
||||
}
|
||||
return decodedJTOC, tocOffset, nil
|
||||
}
|
||||
|
||||
54
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
54
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -165,20 +166,21 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
||||
// parseCredentialsKey turns the provided argument into a valid credential key
|
||||
// and computes the registry part.
|
||||
func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry string, err error) {
|
||||
// URL arguments are replaced with their host[:port] parts.
|
||||
key, err = replaceURLByHostPort(arg)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
split := strings.Split(key, "/")
|
||||
registry = split[0]
|
||||
|
||||
if !acceptRepositories {
|
||||
registry = getRegistryName(arg)
|
||||
key = registry
|
||||
return key, registry, nil
|
||||
return registry, registry, nil
|
||||
}
|
||||
|
||||
key = trimScheme(arg)
|
||||
if key != arg {
|
||||
return "", "", errors.New("credentials key has https[s]:// prefix")
|
||||
}
|
||||
|
||||
registry = getRegistryName(key)
|
||||
// Return early if the key isn't namespaced or uses an http{s} prefix.
|
||||
if registry == key {
|
||||
// The key is not namespaced
|
||||
return key, registry, nil
|
||||
}
|
||||
|
||||
@@ -202,24 +204,18 @@ func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry str
|
||||
return key, registry, nil
|
||||
}
|
||||
|
||||
// getRegistryName scrubs and parses the input to get the server name
|
||||
func getRegistryName(server string) string {
|
||||
// removes 'http://' or 'https://' from the front of the
|
||||
// server/registry string if either is there. This will be mostly used
|
||||
// for user input from 'Buildah login' and 'Buildah logout'.
|
||||
server = trimScheme(server)
|
||||
// gets the registry from the input. If the input is of the form
|
||||
// quay.io/myuser/myimage, it will parse it and just return quay.io
|
||||
split := strings.Split(server, "/")
|
||||
return split[0]
|
||||
}
|
||||
|
||||
// trimScheme removes the HTTP(s) scheme from the provided repository.
|
||||
func trimScheme(repository string) string {
|
||||
// removes 'http://' or 'https://' from the front of the
|
||||
// server/registry string if either is there. This will be mostly used
|
||||
// for user input from 'Buildah login' and 'Buildah logout'.
|
||||
return strings.TrimPrefix(strings.TrimPrefix(repository, "https://"), "http://")
|
||||
// If the specified string starts with http{s} it is replaced with it's
|
||||
// host[:port] parts; everything else is stripped. Otherwise, the string is
|
||||
// returned as is.
|
||||
func replaceURLByHostPort(repository string) (string, error) {
|
||||
if !strings.HasPrefix(repository, "https://") && !strings.HasPrefix(repository, "http://") {
|
||||
return repository, nil
|
||||
}
|
||||
u, err := url.Parse(repository)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("trimming http{s} prefix: %v", err)
|
||||
}
|
||||
return u.Host, nil
|
||||
}
|
||||
|
||||
// getUserAndPass gets the username and password from STDIN if not given
|
||||
@@ -248,7 +244,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user
|
||||
}
|
||||
if password == "" {
|
||||
fmt.Fprint(opts.Stdout, "Password: ")
|
||||
pass, err := terminal.ReadPassword(0)
|
||||
pass, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "reading password")
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/common/pkg/report/camelcase/README.md
generated
vendored
4
vendor/github.com/containers/common/pkg/report/camelcase/README.md
generated
vendored
@@ -27,9 +27,9 @@ go get github.com/fatih/camelcase
|
||||
## Usage and examples
|
||||
|
||||
```go
|
||||
splitted := camelcase.Split("GolangPackage")
|
||||
split := camelcase.Split("GolangPackage")
|
||||
|
||||
fmt.Println(splitted[0], splitted[1]) // prints: "Golang", "Package"
|
||||
fmt.Println(split[0], split[1]) // prints: "Golang", "Package"
|
||||
```
|
||||
|
||||
Both lower camel case and upper camel case are supported. For more info please
|
||||
|
||||
4
vendor/github.com/containers/common/pkg/report/template.go
generated
vendored
4
vendor/github.com/containers/common/pkg/report/template.go
generated
vendored
@@ -40,14 +40,14 @@ var DefaultFuncs = FuncMap{
|
||||
buf := new(bytes.Buffer)
|
||||
enc := json.NewEncoder(buf)
|
||||
enc.SetEscapeHTML(false)
|
||||
enc.Encode(v)
|
||||
_ = enc.Encode(v)
|
||||
// Remove the trailing new line added by the encoder
|
||||
return strings.TrimSpace(buf.String())
|
||||
},
|
||||
"lower": strings.ToLower,
|
||||
"pad": padWithSpace,
|
||||
"split": strings.Split,
|
||||
"title": strings.Title,
|
||||
"title": strings.Title, //nolint:staticcheck
|
||||
"truncate": truncateWithLength,
|
||||
"upper": strings.ToUpper,
|
||||
}
|
||||
|
||||
1
vendor/github.com/containers/common/pkg/retry/retry_unsupported.go
generated
vendored
1
vendor/github.com/containers/common/pkg/retry/retry_unsupported.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package retry
|
||||
|
||||
306
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
306
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -15,8 +14,10 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/image"
|
||||
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/imagedestination"
|
||||
"github.com/containers/image/v5/internal/imagesource"
|
||||
"github.com/containers/image/v5/internal/pkg/platform"
|
||||
internalTypes "github.com/containers/image/v5/internal/types"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
@@ -31,7 +32,6 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbauerster/mpb/v7"
|
||||
"github.com/vbauerster/mpb/v7/decor"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
@@ -63,8 +63,8 @@ var expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
type copier struct {
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
dest private.ImageDestination
|
||||
rawSource private.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
@@ -122,9 +122,10 @@ type ImageListSelection int
|
||||
|
||||
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
|
||||
type Options struct {
|
||||
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
|
||||
SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
|
||||
SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`.
|
||||
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
|
||||
SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
|
||||
SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`.
|
||||
SignIdentity reference.Named // Identify to use when signing, defaults to the docker reference of the destination
|
||||
ReportWriter io.Writer
|
||||
SourceCtx *types.SystemContext
|
||||
DestinationCtx *types.SystemContext
|
||||
@@ -196,26 +197,28 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reportWriter := ioutil.Discard
|
||||
reportWriter := io.Discard
|
||||
|
||||
if options.ReportWriter != nil {
|
||||
reportWriter = options.ReportWriter
|
||||
}
|
||||
|
||||
dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef))
|
||||
}
|
||||
dest := imagedestination.FromPublic(publicDest)
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (dest: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
rawSource := imagesource.FromPublic(publicRawSource)
|
||||
defer func() {
|
||||
if err := rawSource.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (src: %v)", err)
|
||||
@@ -227,7 +230,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
// createProgressBar() will print a single line instead.
|
||||
progressOutput := reportWriter
|
||||
if !isTTY(reportWriter) {
|
||||
progressOutput = ioutil.Discard
|
||||
progressOutput = io.Discard
|
||||
}
|
||||
|
||||
c := &copier{
|
||||
@@ -570,7 +573,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
|
||||
// Sign the manifest list.
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase)
|
||||
newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase, options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -708,8 +711,6 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
||||
// If encrypted and decryption keys provided, we should try to decrypt
|
||||
ic.diffIDsAreNeeded = ic.diffIDsAreNeeded || (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || ic.c.ociEncryptConfig != nil
|
||||
|
||||
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
|
||||
if options.OptimizeDestinationImageAlreadyExists {
|
||||
@@ -792,7 +793,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase)
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase, options.SignIdentity)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
@@ -1065,85 +1066,6 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
|
||||
return man, manifestDigest, nil
|
||||
}
|
||||
|
||||
// newProgressPool creates a *mpb.Progress.
|
||||
// The caller must eventually call pool.Wait() after the pool will no longer be updated.
|
||||
// NOTE: Every progress bar created within the progress pool must either successfully
|
||||
// complete or be aborted, or pool.Wait() will hang. That is typically done
|
||||
// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
|
||||
func (c *copier) newProgressPool() *mpb.Progress {
|
||||
return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
|
||||
}
|
||||
|
||||
// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
|
||||
func customPartialBlobDecorFunc(s decor.Statistics) string {
|
||||
if s.Total == 0 {
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f)"
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
|
||||
}
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
|
||||
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
|
||||
}
|
||||
|
||||
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||
// is ioutil.Discard, the progress bar's output will be discarded
|
||||
// NOTE: Every progress bar created within a progress pool must either successfully
|
||||
// complete or be aborted, or pool.Wait() will hang. That is typically done
|
||||
// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *mpb.Bar {
|
||||
// shortDigestLen is the length of the digest used for blobs.
|
||||
const shortDigestLen = 12
|
||||
|
||||
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
||||
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
||||
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
||||
if len(prefix) > maxPrefixLen {
|
||||
prefix = prefix[:maxPrefixLen]
|
||||
}
|
||||
|
||||
// onComplete will replace prefix once the bar/spinner has completed
|
||||
onComplete = prefix + " " + onComplete
|
||||
|
||||
// Use a normal progress bar when we know the size (i.e., size > 0).
|
||||
// Otherwise, use a spinner to indicate that something's happening.
|
||||
var bar *mpb.Bar
|
||||
if info.Size > 0 {
|
||||
if partial {
|
||||
bar = pool.AddBar(info.Size,
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.Any(customPartialBlobDecorFunc),
|
||||
),
|
||||
)
|
||||
} else {
|
||||
bar = pool.AddBar(info.Size,
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
|
||||
),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
bar = pool.New(0,
|
||||
mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
)
|
||||
}
|
||||
if c.progressOutput == ioutil.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
return bar
|
||||
}
|
||||
|
||||
// copyConfig copies config.json, if any, from src to dest.
|
||||
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
srcInfo := src.ConfigInfo()
|
||||
@@ -1154,22 +1076,23 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
}
|
||||
defer c.concurrentBlobCopiesSemaphore.Release(1)
|
||||
|
||||
configBlob, err := src.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
|
||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||
progressPool := c.newProgressPool()
|
||||
defer progressPool.Wait()
|
||||
bar := c.createProgressBar(progressPool, false, srcInfo, "config", "done")
|
||||
defer bar.Abort(false)
|
||||
|
||||
configBlob, err := src.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, false, bar, -1, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
bar.SetTotal(int64(len(configBlob)), true)
|
||||
|
||||
bar.mark100PercentComplete()
|
||||
return destInfo, nil
|
||||
}()
|
||||
if err != nil {
|
||||
@@ -1214,48 +1137,37 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
}
|
||||
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
// Diffs are needed if we are encrypting an image or trying to decrypt an image
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" || toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
|
||||
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
|
||||
// but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not.
|
||||
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
|
||||
canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
|
||||
|
||||
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
||||
if !diffIDIsNeeded {
|
||||
// Don’t read the layer from the source if we already have the blob, and optimizations are acceptable.
|
||||
if canAvoidProcessingCompleteLayer {
|
||||
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
|
||||
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
|
||||
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
|
||||
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
|
||||
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
|
||||
// the ImageDestination interface lets us pass in.
|
||||
var (
|
||||
blobInfo types.BlobInfo
|
||||
reused bool
|
||||
err error
|
||||
)
|
||||
// Note: the storage destination optimizes the committing of
|
||||
// layers which requires passing the index of the layer.
|
||||
// Hence, we need to special case and cast.
|
||||
dest, ok := ic.c.dest.(internalTypes.ImageDestinationWithOptions)
|
||||
if ok {
|
||||
options := internalTypes.TryReusingBlobOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: ic.canSubstituteBlobs,
|
||||
SrcRef: srcRef,
|
||||
EmptyLayer: emptyLayer,
|
||||
}
|
||||
options.LayerIndex = &layerIndex
|
||||
reused, blobInfo, err = dest.TryReusingBlobWithOptions(ctx, srcInfo, options)
|
||||
} else {
|
||||
reused, blobInfo, err = ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||
}
|
||||
|
||||
reused, blobInfo, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: ic.canSubstituteBlobs,
|
||||
EmptyLayer: emptyLayer,
|
||||
LayerIndex: &layerIndex,
|
||||
SrcRef: srcRef,
|
||||
})
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
if reused {
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
func() { // A scope for defer
|
||||
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "skipped: already exists")
|
||||
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists")
|
||||
defer bar.Abort(false)
|
||||
bar.SetTotal(0, true)
|
||||
bar.mark100PercentComplete()
|
||||
}()
|
||||
|
||||
// Throw an event that the layer has been skipped
|
||||
@@ -1286,9 +1198,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
// of the source file are not known yet and must be fetched.
|
||||
// Attempt a partial only when the source allows to retrieve a blob partially and
|
||||
// the destination has support for it.
|
||||
imgSource, okSource := ic.c.rawSource.(internalTypes.ImageSourceSeekable)
|
||||
imgDest, okDest := ic.c.dest.(internalTypes.ImageDestinationPartial)
|
||||
if okSource && okDest && !diffIDIsNeeded {
|
||||
if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
|
||||
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
|
||||
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
|
||||
hideProgressBar := true
|
||||
@@ -1296,33 +1206,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
bar.Abort(hideProgressBar)
|
||||
}()
|
||||
|
||||
progress := make(chan int64)
|
||||
terminate := make(chan interface{})
|
||||
|
||||
defer close(terminate)
|
||||
defer close(progress)
|
||||
|
||||
proxy := imageSourceSeekableProxy{
|
||||
source: imgSource,
|
||||
progress: progress,
|
||||
proxy := blobChunkAccessorProxy{
|
||||
wrapped: ic.c.rawSource,
|
||||
bar: bar,
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case written := <-progress:
|
||||
bar.IncrInt64(written)
|
||||
case <-terminate:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
bar.SetTotal(srcInfo.Size, false)
|
||||
info, err := imgDest.PutBlobPartial(ctx, proxy, srcInfo, ic.c.blobInfoCache)
|
||||
info, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
|
||||
if err == nil {
|
||||
bar.SetRefill(srcInfo.Size - bar.Current())
|
||||
bar.SetCurrent(srcInfo.Size)
|
||||
bar.SetTotal(srcInfo.Size, true)
|
||||
if srcInfo.Size != -1 {
|
||||
bar.SetRefill(srcInfo.Size - bar.Current())
|
||||
}
|
||||
bar.mark100PercentComplete()
|
||||
hideProgressBar = false
|
||||
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
||||
return true, info
|
||||
@@ -1335,16 +1228,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
}
|
||||
|
||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
|
||||
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
|
||||
defer bar.Abort(false)
|
||||
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
@@ -1360,14 +1253,22 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
|
||||
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
|
||||
// Don’t record any associations that involve encrypted data. This is a bit crude,
|
||||
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
|
||||
// might be safe, but it’s not trivially obvious, so let’s be conservative for now.
|
||||
// This crude approach also means we don’t need to record whether a blob is encrypted
|
||||
// in the blob info cache (which would probably be necessary for any more complex logic),
|
||||
// and the simplicity is attractive.
|
||||
if !encryptingOrDecrypting {
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
|
||||
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
|
||||
}
|
||||
diffID = diffIDResult.digest
|
||||
}
|
||||
}
|
||||
|
||||
bar.SetTotal(srcInfo.Size, true)
|
||||
bar.mark100PercentComplete()
|
||||
return blobInfo, diffID, nil
|
||||
}()
|
||||
}
|
||||
@@ -1377,7 +1278,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
// perhaps (de/re/)compressing the stream,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
@@ -1454,7 +1355,7 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compressiontypes.DecompressorFunc) io.Writer,
|
||||
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *mpb.Bar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
|
||||
canModifyBlob bool, isConfig bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, error) {
|
||||
if isConfig { // This is guaranteed by the caller, but set it here to be explicit.
|
||||
canModifyBlob = false
|
||||
}
|
||||
@@ -1474,6 +1375,9 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
var destStream io.Reader = digestingReader
|
||||
|
||||
// === Update progress bars
|
||||
destStream = bar.ProxyReader(destStream)
|
||||
|
||||
// === Decrypt the stream, if required.
|
||||
var decrypted bool
|
||||
if isOciEncrypted(srcInfo.MediaType) && c.ociDecryptConfig != nil {
|
||||
@@ -1508,9 +1412,6 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedCompressionFormat.Name(), compressionFormat.Name())
|
||||
}
|
||||
|
||||
// === Update progress bars
|
||||
destStream = bar.ProxyReader(destStream)
|
||||
|
||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||
if getOriginalLayerCopyWriter != nil {
|
||||
@@ -1658,24 +1559,15 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
|
||||
// === Finally, send the layer stream to dest.
|
||||
var uploadedInfo types.BlobInfo
|
||||
// Note: the storage destination optimizes the committing of layers
|
||||
// which requires passing the index of the layer. Hence, we need to
|
||||
// special case and cast.
|
||||
dest, ok := c.dest.(internalTypes.ImageDestinationWithOptions)
|
||||
if ok {
|
||||
options := internalTypes.PutBlobOptions{
|
||||
Cache: c.blobInfoCache,
|
||||
IsConfig: isConfig,
|
||||
EmptyLayer: emptyLayer,
|
||||
}
|
||||
if !isConfig {
|
||||
options.LayerIndex = &layerIndex
|
||||
}
|
||||
uploadedInfo, err = dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
|
||||
} else {
|
||||
uploadedInfo, err = c.dest.PutBlob(ctx, &errorAnnotationReader{destStream}, inputInfo, c.blobInfoCache, isConfig)
|
||||
options := private.PutBlobOptions{
|
||||
Cache: c.blobInfoCache,
|
||||
IsConfig: isConfig,
|
||||
EmptyLayer: emptyLayer,
|
||||
}
|
||||
if !isConfig {
|
||||
options.LayerIndex = &layerIndex
|
||||
}
|
||||
uploadedInfo, err := c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{destStream}, inputInfo, options)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "writing blob")
|
||||
}
|
||||
@@ -1707,7 +1599,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
// sent there if we are not already at EOF.
|
||||
if getOriginalLayerCopyWriter != nil {
|
||||
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
|
||||
_, err := io.Copy(ioutil.Discard, originalLayerReader)
|
||||
_, err := io.Copy(io.Discard, originalLayerReader)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
|
||||
}
|
||||
@@ -1720,19 +1612,27 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
}
|
||||
if digestingReader.validationSucceeded {
|
||||
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
|
||||
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
|
||||
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
|
||||
// (because inputInfo.Digest == "", this must have been computed afresh).
|
||||
switch compressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
break // Do nothing, we have only one digest and we might not have even verified it.
|
||||
case types.Compress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
case types.Decompress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
default:
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
|
||||
// Don’t record any associations that involve encrypted data. This is a bit crude,
|
||||
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
|
||||
// might be safe, but it’s not trivially obvious, so let’s be conservative for now.
|
||||
// This crude approach also means we don’t need to record whether a blob is encrypted
|
||||
// in the blob info cache (which would probably be necessary for any more complex logic),
|
||||
// and the simplicity is attractive.
|
||||
if !encrypted && !decrypted {
|
||||
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
|
||||
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
|
||||
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
|
||||
// (because inputInfo.Digest == "", this must have been computed afresh).
|
||||
switch compressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
break // Do nothing, we have only one digest and we might not have even verified it.
|
||||
case types.Compress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
case types.Decompress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
default:
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
|
||||
}
|
||||
}
|
||||
if uploadCompressorName != "" && uploadCompressorName != internalblobinfocache.UnknownCompression {
|
||||
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, uploadCompressorName)
|
||||
|
||||
148
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
Normal file
148
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/vbauerster/mpb/v7"
|
||||
"github.com/vbauerster/mpb/v7/decor"
|
||||
)
|
||||
|
||||
// newProgressPool creates a *mpb.Progress.
|
||||
// The caller must eventually call pool.Wait() after the pool will no longer be updated.
|
||||
// NOTE: Every progress bar created within the progress pool must either successfully
|
||||
// complete or be aborted, or pool.Wait() will hang. That is typically done
|
||||
// using "defer bar.Abort(false)", which must be called BEFORE pool.Wait() is called.
|
||||
func (c *copier) newProgressPool() *mpb.Progress {
|
||||
return mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput))
|
||||
}
|
||||
|
||||
// customPartialBlobDecorFunc implements mpb.DecorFunc for the partial blobs retrieval progress bar
|
||||
func customPartialBlobDecorFunc(s decor.Statistics) string {
|
||||
if s.Total == 0 {
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f)"
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill))
|
||||
}
|
||||
pairFmt := "%.1f / %.1f (skipped: %.1f = %.2f%%)"
|
||||
percentage := 100.0 * float64(s.Refill) / float64(s.Total)
|
||||
return fmt.Sprintf(pairFmt, decor.SizeB1024(s.Current), decor.SizeB1024(s.Total), decor.SizeB1024(s.Refill), percentage)
|
||||
}
|
||||
|
||||
// progressBar wraps a *mpb.Bar, allowing us to add extra state and methods.
|
||||
type progressBar struct {
|
||||
*mpb.Bar
|
||||
originalSize int64 // or -1 if unknown
|
||||
}
|
||||
|
||||
// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter
|
||||
// is io.Discard, the progress bar's output will be discarded
|
||||
//
|
||||
// NOTE: Every progress bar created within a progress pool must either successfully
|
||||
// complete or be aborted, or pool.Wait() will hang. That is typically done
|
||||
// using "defer bar.Abort(false)", which must happen BEFORE pool.Wait() is called.
|
||||
//
|
||||
// As a convention, most users of progress bars should call mark100PercentComplete on full success;
|
||||
// by convention, we don't leave progress bars in partial state when fully done
|
||||
// (even if we copied much less data than anticipated).
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar {
|
||||
// shortDigestLen is the length of the digest used for blobs.
|
||||
const shortDigestLen = 12
|
||||
|
||||
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
||||
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
||||
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
||||
if len(prefix) > maxPrefixLen {
|
||||
prefix = prefix[:maxPrefixLen]
|
||||
}
|
||||
|
||||
// onComplete will replace prefix once the bar/spinner has completed
|
||||
onComplete = prefix + " " + onComplete
|
||||
|
||||
// Use a normal progress bar when we know the size (i.e., size > 0).
|
||||
// Otherwise, use a spinner to indicate that something's happening.
|
||||
var bar *mpb.Bar
|
||||
if info.Size > 0 {
|
||||
if partial {
|
||||
bar = pool.AddBar(info.Size,
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.Any(customPartialBlobDecorFunc),
|
||||
),
|
||||
)
|
||||
} else {
|
||||
bar = pool.AddBar(info.Size,
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""),
|
||||
),
|
||||
)
|
||||
}
|
||||
} else {
|
||||
bar = pool.New(0,
|
||||
mpb.SpinnerStyle(".", "..", "...", "....", "").PositionLeft(),
|
||||
mpb.BarFillerClearOnComplete(),
|
||||
mpb.PrependDecorators(
|
||||
decor.OnComplete(decor.Name(prefix), onComplete),
|
||||
),
|
||||
)
|
||||
}
|
||||
if c.progressOutput == io.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
return &progressBar{
|
||||
Bar: bar,
|
||||
originalSize: info.Size,
|
||||
}
|
||||
}
|
||||
|
||||
// mark100PercentComplete marks the progres bars as 100% complete;
|
||||
// it may do so by possibly advancing the current state if it is below the known total.
|
||||
func (bar *progressBar) mark100PercentComplete() {
|
||||
if bar.originalSize > 0 {
|
||||
// We can't call bar.SetTotal even if we wanted to; the total can not be changed
|
||||
// after a progress bar is created with a definite total.
|
||||
bar.SetCurrent(bar.originalSize) // This triggers the completion condition.
|
||||
} else {
|
||||
// -1 = unknown size
|
||||
// 0 is somewhat of a a special case: Unlike c/image, where 0 is a definite known
|
||||
// size (possible at least in theory), in mpb, zero-sized progress bars are treated
|
||||
// as unknown size, in particular they are not configured to be marked as
|
||||
// complete on bar.Current() reaching bar.total (because that would happen already
|
||||
// when creating the progress bar).
|
||||
// That means that we are both _allowed_ to call SetTotal, and we _have to_.
|
||||
bar.SetTotal(-1, true) // total < 0 = set it to bar.Current(), report it; and mark the bar as complete.
|
||||
}
|
||||
}
|
||||
|
||||
// blobChunkAccessorProxy wraps a BlobChunkAccessor and updates a *progressBar
|
||||
// with the number of received bytes.
|
||||
type blobChunkAccessorProxy struct {
|
||||
wrapped private.BlobChunkAccessor // The underlying BlobChunkAccessor
|
||||
bar *progressBar // A progress bar updated with the number of bytes read so far
|
||||
}
|
||||
|
||||
// GetBlobAt returns a sequential channel of readers that contain data for the requested
|
||||
// blob chunks, and a channel that might get a single error value.
|
||||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
|
||||
if err == nil {
|
||||
total := int64(0)
|
||||
for _, c := range chunks {
|
||||
total += int64(c.Length)
|
||||
}
|
||||
s.bar.IncrInt64(total)
|
||||
}
|
||||
return rc, errs, err
|
||||
}
|
||||
@@ -1,15 +1,13 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
internalTypes "github.com/containers/image/v5/internal/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// progressReader is a reader that reports its progress on an interval.
|
||||
// progressReader is a reader that reports its progress to a types.ProgressProperties channel on an interval.
|
||||
type progressReader struct {
|
||||
source io.Reader
|
||||
channel chan<- types.ProgressProperties
|
||||
@@ -79,26 +77,3 @@ func (r *progressReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// imageSourceSeekableProxy wraps ImageSourceSeekable and keeps track of how many bytes
|
||||
// are received.
|
||||
type imageSourceSeekableProxy struct {
|
||||
// source is the seekable input to read from.
|
||||
source internalTypes.ImageSourceSeekable
|
||||
// progress is the chan where the total number of bytes read so far are reported.
|
||||
progress chan int64
|
||||
}
|
||||
|
||||
// GetBlobAt reads from the ImageSourceSeekable and report how many bytes were received
|
||||
// to the progress chan.
|
||||
func (s imageSourceSeekableProxy) GetBlobAt(ctx context.Context, bInfo types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
rc, errs, err := s.source.GetBlobAt(ctx, bInfo, chunks)
|
||||
if err == nil {
|
||||
total := int64(0)
|
||||
for _, c := range chunks {
|
||||
total += int64(c.Length)
|
||||
}
|
||||
s.progress <- total
|
||||
}
|
||||
return rc, errs, err
|
||||
}
|
||||
17
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
17
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
@@ -1,13 +1,14 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// createSignature creates a new signature of manifest using keyIdentity.
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string) ([]byte, error) {
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) ([]byte, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "initializing GPG")
|
||||
@@ -17,13 +18,19 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase
|
||||
return nil, errors.Wrap(err, "Signing not supported")
|
||||
}
|
||||
|
||||
dockerReference := c.dest.Reference().DockerReference()
|
||||
if dockerReference == nil {
|
||||
return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
|
||||
if identity != nil {
|
||||
if reference.IsNameOnly(identity) {
|
||||
return nil, errors.Errorf("Sign identity must be a fully specified reference %s", identity)
|
||||
}
|
||||
} else {
|
||||
identity = c.dest.Reference().DockerReference()
|
||||
if identity == nil {
|
||||
return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
}
|
||||
|
||||
c.Printf("Signing manifest\n")
|
||||
newSig, err := signature.SignDockerManifestWithOptions(manifest, dockerReference.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
|
||||
newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating signature")
|
||||
}
|
||||
|
||||
15
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
15
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@@ -3,7 +3,6 @@ package directory
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -62,7 +61,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
|
||||
return nil, errors.Wrapf(err, "checking if path exists %q", d.ref.versionPath())
|
||||
}
|
||||
if versionExists {
|
||||
contents, err := ioutil.ReadFile(d.ref.versionPath())
|
||||
contents, err := os.ReadFile(d.ref.versionPath())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -86,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (types.Imag
|
||||
}
|
||||
}
|
||||
// create version file
|
||||
err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644)
|
||||
err = os.WriteFile(d.ref.versionPath(), []byte(version), 0644)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "creating version file %q", d.ref.versionPath())
|
||||
}
|
||||
@@ -149,7 +148,7 @@ func (d *dirImageDestination) HasThreadSafePutBlob() bool {
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
||||
blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -232,7 +231,7 @@ func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.Blo
|
||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
|
||||
return ioutil.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
|
||||
return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
|
||||
}
|
||||
|
||||
// PutSignatures writes a set of signatures to the destination.
|
||||
@@ -240,7 +239,7 @@ func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte,
|
||||
// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list.
|
||||
func (d *dirImageDestination) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error {
|
||||
for i, sig := range signatures {
|
||||
if err := ioutil.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
|
||||
if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), sig, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -272,7 +271,7 @@ func pathExists(path string) (bool, error) {
|
||||
|
||||
// returns true if directory is empty
|
||||
func isDirEmpty(path string) (bool, error) {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -281,7 +280,7 @@ func isDirEmpty(path string) (bool, error) {
|
||||
|
||||
// deletes the contents of a directory
|
||||
func removeDirContents(path string) error {
|
||||
files, err := ioutil.ReadDir(path)
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/image/v5/directory/directory_src.go
generated
vendored
5
vendor/github.com/containers/image/v5/directory/directory_src.go
generated
vendored
@@ -3,7 +3,6 @@ package directory
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
@@ -37,7 +36,7 @@ func (s *dirImageSource) Close() error {
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
|
||||
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
||||
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
m, err := ioutil.ReadFile(s.ref.manifestPath(instanceDigest))
|
||||
m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -71,7 +70,7 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
|
||||
func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
signatures := [][]byte{}
|
||||
for i := 0; ; i++ {
|
||||
signature, err := ioutil.ReadFile(s.ref.signaturePath(i, instanceDigest))
|
||||
signature, err := os.ReadFile(s.ref.signaturePath(i, instanceDigest))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
break
|
||||
|
||||
44
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
44
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@@ -1,13 +1,11 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -463,7 +461,11 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
url, err := url.Parse(urlString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
|
||||
}
|
||||
|
||||
@@ -500,7 +502,7 @@ func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Durat
|
||||
// makeRequest should generally be preferred.
|
||||
// In case of an HTTP 429 status code in the response, it may automatically retry a few times.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
delay := backoffInitialDelay
|
||||
attempts := 0
|
||||
for {
|
||||
@@ -518,7 +520,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
if delay > backoffMaxDelay {
|
||||
delay = backoffMaxDelay
|
||||
}
|
||||
logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url, delay.Seconds())
|
||||
logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", url.Redacted(), delay.Seconds())
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
@@ -533,12 +535,12 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// Note that no exponential back off is performed when receiving an http 429 status code.
|
||||
func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, method, url, stream)
|
||||
func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, url *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, method, url.String(), stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
|
||||
if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it.
|
||||
req.ContentLength = streamLen
|
||||
}
|
||||
req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
|
||||
@@ -553,7 +555,7 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method,
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
logrus.Debugf("%s %s", method, url)
|
||||
logrus.Debugf("%s %s", method, url.Redacted())
|
||||
res, err := c.client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -650,10 +652,10 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
|
||||
params.Add("refresh_token", c.auth.IdentityToken)
|
||||
params.Add("client_id", "containers/image")
|
||||
|
||||
authReq.Body = ioutil.NopCloser(bytes.NewBufferString(params.Encode()))
|
||||
authReq.Body = io.NopCloser(strings.NewReader(params.Encode()))
|
||||
authReq.Header.Add("User-Agent", c.userAgent)
|
||||
authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
|
||||
res, err := c.client.Do(authReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -705,7 +707,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
||||
}
|
||||
authReq.Header.Add("User-Agent", c.userAgent)
|
||||
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted())
|
||||
res, err := c.client.Do(authReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -735,14 +737,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
c.client = &http.Client{Transport: tr}
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
url, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err)
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||
logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return httpResponseToError(resp, "")
|
||||
}
|
||||
@@ -762,14 +767,17 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
}
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||
url, err := url.Parse(fmt.Sprintf(resolvedPingV1URL, scheme, c.registry))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url.Redacted(), err.Error(), err)
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||
logrus.Debugf("Ping %s status %d", url.Redacted(), resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return false
|
||||
}
|
||||
|
||||
24
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
24
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@@ -7,7 +7,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -182,7 +181,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// This error text should never be user-visible, we terminate only after makeRequestToResolvedURL
|
||||
// returns, so there isn’t a way for the error text to be provided to any of our callers.
|
||||
defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload"))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked %v", err)
|
||||
return nil, err
|
||||
@@ -207,7 +206,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
locationQuery := uploadLocation.Query()
|
||||
locationQuery.Set("digest", blobDigest.String())
|
||||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -257,9 +256,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
"from": {reference.Path(srcRepo)},
|
||||
}.Encode(),
|
||||
}
|
||||
mountPath := u.String()
|
||||
logrus.Debugf("Trying to mount %s", mountPath)
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPost, mountPath, nil, nil, v2Auth, extraScope)
|
||||
logrus.Debugf("Trying to mount %s", u.Redacted())
|
||||
res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -276,8 +274,8 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "determining upload URL after a mount attempt")
|
||||
}
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
|
||||
} else {
|
||||
@@ -593,16 +591,16 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(url.Path, signature, 0644)
|
||||
err = os.WriteFile(url.Path, signature, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
case "http", "https":
|
||||
return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted())
|
||||
default:
|
||||
return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
|
||||
return errors.Errorf("Unsupported scheme when writing signature to %s", url.Redacted())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -620,9 +618,9 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
|
||||
return false, err
|
||||
|
||||
case "http", "https":
|
||||
return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.Redacted())
|
||||
default:
|
||||
return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
|
||||
return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.Redacted())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
52
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
52
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
@@ -16,7 +15,7 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
internalTypes "github.com/containers/image/v5/internal/types"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
@@ -147,6 +146,11 @@ func (s *dockerImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
|
||||
func (s *dockerImageSource) SupportsGetBlobAt() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
|
||||
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
|
||||
// to read the image's layers.
|
||||
@@ -248,13 +252,14 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
for _, u := range urls {
|
||||
if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") {
|
||||
url, err := url.Parse(u)
|
||||
if err != nil || (url.Scheme != "http" && url.Scheme != "https") {
|
||||
continue // unsupported url. skip this url.
|
||||
}
|
||||
// NOTE: we must not authenticate on additional URLs as those
|
||||
// can be abused to leak credentials or tokens. Please
|
||||
// refer to CVE-2020-15157 for more information.
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, u, nil, nil, -1, noAuth, nil)
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
@@ -288,7 +293,7 @@ func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
|
||||
}
|
||||
|
||||
// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks
|
||||
func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk) {
|
||||
func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) {
|
||||
defer close(streams)
|
||||
defer close(errs)
|
||||
currentOffset := uint64(0)
|
||||
@@ -302,7 +307,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
|
||||
break
|
||||
}
|
||||
toSkip := c.Offset - currentOffset
|
||||
if _, err := io.Copy(ioutil.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
|
||||
if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil {
|
||||
errs <- err
|
||||
break
|
||||
}
|
||||
@@ -310,7 +315,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan interface{}),
|
||||
stream: ioutil.NopCloser(io.LimitReader(body, int64(c.Length))),
|
||||
stream: io.NopCloser(io.LimitReader(body, int64(c.Length))),
|
||||
consumeStream: true,
|
||||
}
|
||||
streams <- s
|
||||
@@ -322,7 +327,7 @@ func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error,
|
||||
}
|
||||
|
||||
// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan.
|
||||
func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []internalTypes.ImageSourceChunk, mediaType string, params map[string]string) {
|
||||
func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) {
|
||||
defer close(streams)
|
||||
defer close(errs)
|
||||
if !strings.HasPrefix(mediaType, "multipart/") {
|
||||
@@ -357,9 +362,12 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
||||
}
|
||||
}
|
||||
|
||||
// GetBlobAt returns a stream for the specified blob.
|
||||
// GetBlobAt returns a sequential channel of readers that contain data for the requested
|
||||
// blob chunks, and a channel that might get a single error value.
|
||||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []internalTypes.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
headers := make(map[string][]string)
|
||||
|
||||
var rangeVals []string
|
||||
@@ -401,7 +409,7 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
|
||||
return streams, errs, nil
|
||||
case http.StatusBadRequest:
|
||||
res.Body.Close()
|
||||
return nil, nil, internalTypes.BadPartialRequestError{Status: res.Status}
|
||||
return nil, nil, private.BadPartialRequestError{Status: res.Status}
|
||||
default:
|
||||
err := httpResponseToError(res, "Error fetching partial blob")
|
||||
if err == nil {
|
||||
@@ -506,7 +514,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
switch url.Scheme {
|
||||
case "file":
|
||||
logrus.Debugf("Reading %s", url.Path)
|
||||
sig, err := ioutil.ReadFile(url.Path)
|
||||
sig, err := os.ReadFile(url.Path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, true, nil
|
||||
@@ -516,7 +524,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
return sig, false, nil
|
||||
|
||||
case "http", "https":
|
||||
logrus.Debugf("GET %s", url)
|
||||
logrus.Debugf("GET %s", url.Redacted())
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
@@ -529,7 +537,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, true, nil
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.Redacted(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
sig, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize)
|
||||
if err != nil {
|
||||
@@ -538,7 +546,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
return sig, false, nil
|
||||
|
||||
default:
|
||||
return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String())
|
||||
return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.Redacted())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -602,8 +610,11 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
|
||||
}
|
||||
|
||||
digest := get.Header.Get("Docker-Content-Digest")
|
||||
deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest)
|
||||
manifestDigest, err := manifest.Digest(manifestBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("computing manifest digest: %w", err)
|
||||
}
|
||||
deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest)
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
@@ -621,11 +632,6 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
|
||||
}
|
||||
|
||||
manifestDigest, err := manifest.Digest(manifestBody)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
url := signatureStorageURL(c.signatureBase, manifestDigest, i)
|
||||
missing, err := c.deleteOneSignature(url)
|
||||
@@ -756,7 +762,7 @@ func (s signalCloseReader) Read(p []byte) (int, error) {
|
||||
func (s signalCloseReader) Close() error {
|
||||
defer close(s.closed)
|
||||
if s.consumeStream {
|
||||
if _, err := io.Copy(ioutil.Discard, s.stream); err != nil {
|
||||
if _, err := io.Copy(io.Discard, s.stream); err != nil {
|
||||
s.stream.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
3
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"archive/tar"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
@@ -53,7 +52,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
||||
// The caller should call .Close() on the returned archive when done.
|
||||
func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) {
|
||||
// Save inputStream to a temporary file
|
||||
tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating temporary file")
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
@@ -170,7 +169,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
|
||||
uncompressedSize := h.Size
|
||||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
|
||||
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "reading %s to find its size", layerPath)
|
||||
}
|
||||
@@ -263,7 +262,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
|
||||
}
|
||||
|
||||
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
|
||||
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
||||
return io.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
||||
}
|
||||
|
||||
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
|
||||
|
||||
5
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/lookaside.go
generated
vendored
@@ -2,7 +2,6 @@ package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
@@ -82,7 +81,7 @@ func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference,
|
||||
} else {
|
||||
// returns default directory if no sigstore specified in configuration file
|
||||
url = builtinDefaultSignatureStorageDir(rootless.GetRootlessEUID())
|
||||
logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.String())
|
||||
logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), url.Redacted())
|
||||
}
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
// FIXME? Restrict to explicitly supported schemes?
|
||||
@@ -146,7 +145,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
continue
|
||||
}
|
||||
configPath := filepath.Join(dirPath, configName)
|
||||
configBytes, err := ioutil.ReadFile(configPath)
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
6
vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
generated
vendored
Normal file
6
vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
package reference
|
||||
|
||||
// Return true if the specified string fully matches `IdentifierRegexp`.
|
||||
func IsFullIdentifier(s string) bool {
|
||||
return anchoredIdentifierRegexp.MatchString(s)
|
||||
}
|
||||
69
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
Normal file
69
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package imagedestination
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// FromPublic(dest) returns an object that provides the private.ImageDestination API
|
||||
//
|
||||
// Eventually, we might want to expose this function, and methods of the returned object,
|
||||
// as a public API (or rather, a variant that does not include the already-superseded
|
||||
// methods of types.ImageDestination, and has added more future-proofing), and more strongly
|
||||
// deprecate direct use of types.ImageDestination.
|
||||
//
|
||||
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
|
||||
// with public methods, or perhaps a private interface), so that we can add methods
|
||||
// without breaking any external implementors of a public interface.
|
||||
func FromPublic(dest types.ImageDestination) private.ImageDestination {
|
||||
if dest2, ok := dest.(private.ImageDestination); ok {
|
||||
return dest2
|
||||
}
|
||||
return &wrapped{ImageDestination: dest}
|
||||
}
|
||||
|
||||
// wrapped provides the private.ImageDestination operations
|
||||
// for a destination that only implements types.ImageDestination
|
||||
type wrapped struct {
|
||||
types.ImageDestination
|
||||
}
|
||||
|
||||
// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
|
||||
func (w *wrapped) SupportsPutBlobPartial() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
|
||||
return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
|
||||
}
|
||||
|
||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
func (w *wrapped) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error) {
|
||||
return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", w.Reference().Transport().Name())
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||
return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
|
||||
}
|
||||
47
vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
generated
vendored
Normal file
47
vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
package imagesource
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// FromPublic(src) returns an object that provides the private.ImageSource API
|
||||
//
|
||||
// Eventually, we might want to expose this function, and methods of the returned object,
|
||||
// as a public API (or rather, a variant that does not include the already-superseded
|
||||
// methods of types.ImageSource, and has added more future-proofing), and more strongly
|
||||
// deprecate direct use of types.ImageSource.
|
||||
//
|
||||
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
|
||||
// with public methods, or perhaps a private interface), so that we can add methods
|
||||
// without breaking any external implementors of a public interface.
|
||||
func FromPublic(src types.ImageSource) private.ImageSource {
|
||||
if src2, ok := src.(private.ImageSource); ok {
|
||||
return src2
|
||||
}
|
||||
return &wrapped{ImageSource: src}
|
||||
}
|
||||
|
||||
// wrapped provides the private.ImageSource operations
|
||||
// for a source that only implements types.ImageSource
|
||||
type wrapped struct {
|
||||
types.ImageSource
|
||||
}
|
||||
|
||||
// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
|
||||
func (w *wrapped) SupportsGetBlobAt() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlobAt returns a sequential channel of readers that contain data for the requested
|
||||
// blob chunks, and a channel that might get a single error value.
|
||||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
func (w *wrapped) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", w.Reference().Transport().Name())
|
||||
}
|
||||
3
vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
generated
vendored
3
vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go
generated
vendored
@@ -2,7 +2,6 @@ package iolimits
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -47,7 +46,7 @@ const (
|
||||
func ReadAtMost(reader io.Reader, limit int) ([]byte, error) {
|
||||
limitedReader := io.LimitReader(reader, int64(limit+1))
|
||||
|
||||
res, err := ioutil.ReadAll(limitedReader)
|
||||
res, err := io.ReadAll(limitedReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
74
vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
generated
vendored
74
vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go
generated
vendored
@@ -1,74 +0,0 @@
|
||||
// Copyright 2015 Jesse Sipprell. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package keyctl
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Key represents a single key linked to one or more kernel keyrings.
|
||||
type Key struct {
|
||||
Name string
|
||||
|
||||
id, ring keyID
|
||||
size int
|
||||
}
|
||||
|
||||
// ID returns the 32-bit kernel identifier for a specific key
|
||||
func (k *Key) ID() int32 {
|
||||
return int32(k.id)
|
||||
}
|
||||
|
||||
// Get the key's value as a byte slice
|
||||
func (k *Key) Get() ([]byte, error) {
|
||||
var (
|
||||
b []byte
|
||||
err error
|
||||
sizeRead int
|
||||
)
|
||||
|
||||
if k.size == 0 {
|
||||
k.size = 512
|
||||
}
|
||||
|
||||
size := k.size
|
||||
|
||||
b = make([]byte, int(size))
|
||||
sizeRead = size + 1
|
||||
for sizeRead > size {
|
||||
r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sizeRead = int(r1); sizeRead > size {
|
||||
b = make([]byte, sizeRead)
|
||||
size = sizeRead
|
||||
sizeRead = size + 1
|
||||
} else {
|
||||
k.size = sizeRead
|
||||
}
|
||||
}
|
||||
return b[:k.size], err
|
||||
}
|
||||
|
||||
// Unlink a key from the keyring it was loaded from (or added to). If the key
|
||||
// is not linked to any other keyrings, it is destroyed.
|
||||
func (k *Key) Unlink() error {
|
||||
_, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
// Describe returns a string describing the attributes of a specified key
|
||||
func (k *Key) Describe() (string, error) {
|
||||
keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return keyAttr, nil
|
||||
}
|
||||
118
vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
generated
vendored
118
vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go
generated
vendored
@@ -1,118 +0,0 @@
|
||||
// Copyright 2015 Jesse Sipprell. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface)
|
||||
package keyctl
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Keyring is the basic interface to a linux keyctl keyring.
|
||||
type Keyring interface {
|
||||
ID
|
||||
Add(string, []byte) (*Key, error)
|
||||
Search(string) (*Key, error)
|
||||
}
|
||||
|
||||
type keyring struct {
|
||||
id keyID
|
||||
}
|
||||
|
||||
// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have.
|
||||
type ID interface {
|
||||
ID() int32
|
||||
}
|
||||
|
||||
// Add a new key to a keyring. The key can be searched for later by name.
|
||||
func (kr *keyring) Add(name string, key []byte) (*Key, error) {
|
||||
r, err := unix.AddKey("user", name, key, int(kr.id))
|
||||
if err == nil {
|
||||
key := &Key{Name: name, id: keyID(r), ring: kr.id}
|
||||
return key, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Search for a key by name, this also searches child keyrings linked to this
|
||||
// one. The key, if found, is linked to the top keyring that Search() was called
|
||||
// from.
|
||||
func (kr *keyring) Search(name string) (*Key, error) {
|
||||
id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0)
|
||||
if err == nil {
|
||||
return &Key{Name: name, id: keyID(id), ring: kr.id}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// ID returns the 32-bit kernel identifier of a keyring
|
||||
func (kr *keyring) ID() int32 {
|
||||
return int32(kr.id)
|
||||
}
|
||||
|
||||
// SessionKeyring returns the current login session keyring
|
||||
func SessionKeyring() (Keyring, error) {
|
||||
return newKeyring(unix.KEY_SPEC_SESSION_KEYRING)
|
||||
}
|
||||
|
||||
// UserKeyring returns the keyring specific to the current user.
|
||||
func UserKeyring() (Keyring, error) {
|
||||
return newKeyring(unix.KEY_SPEC_USER_KEYRING)
|
||||
}
|
||||
|
||||
// Unlink an object from a keyring
|
||||
func Unlink(parent Keyring, child ID) error {
|
||||
_, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
// Link a key into a keyring
|
||||
func Link(parent Keyring, child ID) error {
|
||||
_, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it
|
||||
func ReadUserKeyring() ([]*Key, error) {
|
||||
var (
|
||||
b []byte
|
||||
err error
|
||||
sizeRead int
|
||||
)
|
||||
krSize := 4
|
||||
size := krSize
|
||||
b = make([]byte, size)
|
||||
sizeRead = size + 1
|
||||
for sizeRead > size {
|
||||
r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sizeRead = int(r1); sizeRead > size {
|
||||
b = make([]byte, sizeRead)
|
||||
size = sizeRead
|
||||
sizeRead = size + 1
|
||||
} else {
|
||||
krSize = sizeRead
|
||||
}
|
||||
}
|
||||
keyIDs := getKeyIDsFromByte(b[:krSize])
|
||||
return keyIDs, err
|
||||
}
|
||||
|
||||
func getKeyIDsFromByte(byteKeyIDs []byte) []*Key {
|
||||
idSize := 4
|
||||
var keys []*Key
|
||||
for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize {
|
||||
tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx]))
|
||||
keys = append(keys, &Key{id: keyID(tempID)})
|
||||
}
|
||||
return keys
|
||||
}
|
||||
34
vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
generated
vendored
34
vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
// Copyright 2015 Jesse Sipprell. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package keyctl
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// KeyPerm represents in-kernel access control permission to keys and keyrings
|
||||
// as a 32-bit integer broken up into four permission sets, one per byte.
|
||||
// In MSB order, the perms are: Processor, User, Group, Other.
|
||||
type KeyPerm uint32
|
||||
|
||||
const (
|
||||
// PermOtherAll sets all permission for Other
|
||||
PermOtherAll KeyPerm = 0x3f << (8 * iota)
|
||||
// PermGroupAll sets all permission for Group
|
||||
PermGroupAll
|
||||
// PermUserAll sets all permission for User
|
||||
PermUserAll
|
||||
// PermProcessAll sets all permission for Processor
|
||||
PermProcessAll
|
||||
)
|
||||
|
||||
// SetPerm sets the permissions on a key or keyring.
|
||||
func SetPerm(k ID, p KeyPerm) error {
|
||||
err := unix.KeyctlSetperm(int(k.ID()), uint32(p))
|
||||
return err
|
||||
}
|
||||
26
vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
generated
vendored
26
vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
// Copyright 2015 Jesse Sipprell. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package keyctl
|
||||
|
||||
import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type keyID int32
|
||||
|
||||
func newKeyring(id keyID) (*keyring, error) {
|
||||
r1, err := unix.KeyctlGetKeyringID(int(id), true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if id < 0 {
|
||||
r1 = int(id)
|
||||
}
|
||||
return &keyring{id: keyID(r1)}, nil
|
||||
}
|
||||
106
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
Normal file
106
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
package private
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// ImageSource is an internal extension to the types.ImageSource interface.
|
||||
type ImageSource interface {
|
||||
types.ImageSource
|
||||
|
||||
// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported.
|
||||
SupportsGetBlobAt() bool
|
||||
// BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt().
|
||||
BlobChunkAccessor
|
||||
}
|
||||
|
||||
// ImageDestination is an internal extension to the types.ImageDestination
|
||||
// interface.
|
||||
type ImageDestination interface {
|
||||
types.ImageDestination
|
||||
|
||||
// SupportsPutBlobPartial returns true if PutBlobPartial is supported.
|
||||
SupportsPutBlobPartial() bool
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error)
|
||||
|
||||
// PutBlobPartial attempts to create a blob using the data that is already present
|
||||
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache types.BlobInfoCache) (types.BlobInfo, error)
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
|
||||
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
|
||||
// reflected in the manifest that will be written.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error)
|
||||
}
|
||||
|
||||
// PutBlobOptions are used in PutBlobWithOptions.
|
||||
type PutBlobOptions struct {
|
||||
Cache types.BlobInfoCache // Cache to optionally update with the uploaded bloblook up blob infos.
|
||||
IsConfig bool // True if the blob is a config
|
||||
|
||||
// The following fields are new to internal/private. Users of internal/private MUST fill them in,
|
||||
// but they also must expect that they will be ignored by types.ImageDestination transports.
|
||||
|
||||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
}
|
||||
|
||||
// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
|
||||
type TryReusingBlobOptions struct {
|
||||
Cache types.BlobInfoCache // Cache to use and/or update.
|
||||
// If true, it is allowed to use an equivalent of the desired blob;
|
||||
// in that case the returned info may not match the input.
|
||||
CanSubstitute bool
|
||||
|
||||
// The following fields are new to internal/private. Users of internal/private MUST fill them in,
|
||||
// but they also must expect that they will be ignored by types.ImageDestination transports.
|
||||
|
||||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||
}
|
||||
|
||||
// ImageSourceChunk is a portion of a blob.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type ImageSourceChunk struct {
|
||||
Offset uint64
|
||||
Length uint64
|
||||
}
|
||||
|
||||
// BlobChunkAccessor allows fetching discontiguous chunks of a blob.
|
||||
type BlobChunkAccessor interface {
|
||||
// GetBlobAt returns a sequential channel of readers that contain data for the requested
|
||||
// blob chunks, and a channel that might get a single error value.
|
||||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
||||
}
|
||||
|
||||
// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request.
|
||||
type BadPartialRequestError struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
func (e BadPartialRequestError) Error() string {
|
||||
return e.Status
|
||||
}
|
||||
3
vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
generated
vendored
3
vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
generated
vendored
@@ -3,7 +3,6 @@ package streamdigest
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
@@ -16,7 +15,7 @@ import (
|
||||
// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
|
||||
// If an error occurs, inputInfo is not modified.
|
||||
func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
|
||||
diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
|
||||
diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
|
||||
}
|
||||
|
||||
91
vendor/github.com/containers/image/v5/internal/types/types.go
generated
vendored
91
vendor/github.com/containers/image/v5/internal/types/types.go
generated
vendored
@@ -1,91 +0,0 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
publicTypes "github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// ImageDestinationWithOptions is an internal extension to the ImageDestination
|
||||
// interface.
|
||||
type ImageDestinationWithOptions interface {
|
||||
publicTypes.ImageDestination
|
||||
|
||||
// PutBlobWithOptions is a wrapper around PutBlob. If
|
||||
// options.LayerIndex is set, the blob will be committed directly.
|
||||
// Either by the calling goroutine or by another goroutine already
|
||||
// committing layers.
|
||||
//
|
||||
// Please note that TryReusingBlobWithOptions and PutBlobWithOptions
|
||||
// *must* be used the together. Mixing the two with non "WithOptions"
|
||||
// functions is not supported.
|
||||
PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo publicTypes.BlobInfo, options PutBlobOptions) (publicTypes.BlobInfo, error)
|
||||
|
||||
// TryReusingBlobWithOptions is a wrapper around TryReusingBlob. If
|
||||
// options.LayerIndex is set, the reused blob will be recoreded as
|
||||
// already pulled.
|
||||
//
|
||||
// Please note that TryReusingBlobWithOptions and PutBlobWithOptions
|
||||
// *must* be used the together. Mixing the two with non "WithOptions"
|
||||
// functions is not supported.
|
||||
TryReusingBlobWithOptions(ctx context.Context, blobinfo publicTypes.BlobInfo, options TryReusingBlobOptions) (bool, publicTypes.BlobInfo, error)
|
||||
}
|
||||
|
||||
// PutBlobOptions are used in PutBlobWithOptions.
|
||||
type PutBlobOptions struct {
|
||||
// Cache to look up blob infos.
|
||||
Cache publicTypes.BlobInfoCache
|
||||
// Denotes whether the blob is a config or not.
|
||||
IsConfig bool
|
||||
// Indicates an empty layer.
|
||||
EmptyLayer bool
|
||||
// The corresponding index in the layer slice.
|
||||
LayerIndex *int
|
||||
}
|
||||
|
||||
// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
|
||||
type TryReusingBlobOptions struct {
|
||||
// Cache to look up blob infos.
|
||||
Cache publicTypes.BlobInfoCache
|
||||
// Use an equivalent of the desired blob.
|
||||
CanSubstitute bool
|
||||
// Indicates an empty layer.
|
||||
EmptyLayer bool
|
||||
// The corresponding index in the layer slice.
|
||||
LayerIndex *int
|
||||
// The reference of the image that contains the target blob.
|
||||
SrcRef reference.Named
|
||||
}
|
||||
|
||||
// ImageSourceChunk is a portion of a blob.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type ImageSourceChunk struct {
|
||||
Offset uint64
|
||||
Length uint64
|
||||
}
|
||||
|
||||
// ImageSourceSeekable is an image source that permits to fetch chunks of the entire blob.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type ImageSourceSeekable interface {
|
||||
// GetBlobAt returns a stream for the specified blob.
|
||||
// The specified chunks must be not overlapping and sorted by their offset.
|
||||
GetBlobAt(context.Context, publicTypes.BlobInfo, []ImageSourceChunk) (chan io.ReadCloser, chan error, error)
|
||||
}
|
||||
|
||||
// ImageDestinationPartial is a service to store a blob by requesting the missing chunks to a ImageSourceSeekable.
|
||||
// This API is experimental and can be changed without bumping the major version number.
|
||||
type ImageDestinationPartial interface {
|
||||
// PutBlobPartial writes contents of stream and returns data representing the result.
|
||||
PutBlobPartial(ctx context.Context, stream ImageSourceSeekable, srcInfo publicTypes.BlobInfo, cache publicTypes.BlobInfoCache) (publicTypes.BlobInfo, error)
|
||||
}
|
||||
|
||||
// BadPartialRequestError is returned by ImageSourceSeekable.GetBlobAt on an invalid request.
|
||||
type BadPartialRequestError struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
func (e BadPartialRequestError) Error() string {
|
||||
return e.Status
|
||||
}
|
||||
3
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
3
vendor/github.com/containers/image/v5/oci/archive/oci_transport.go
generated
vendored
@@ -3,7 +3,6 @@ package archive
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -161,7 +160,7 @@ func (t *tempDirOCIRef) deleteTempDir() error {
|
||||
// createOCIRef creates the oci reference of the image
|
||||
// If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files
|
||||
func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) {
|
||||
dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
|
||||
dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci")
|
||||
if err != nil {
|
||||
return tempDirOCIRef{}, errors.Wrapf(err, "creating temp directory")
|
||||
}
|
||||
|
||||
11
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
11
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -112,7 +111,7 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *ociImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
return true
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
@@ -124,7 +123,7 @@ func (d *ociImageDestination) HasThreadSafePutBlob() bool {
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
|
||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -238,7 +237,7 @@ func (d *ociImageDestination) PutManifest(ctx context.Context, m []byte, instanc
|
||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(blobPath, m, 0644); err != nil {
|
||||
if err := os.WriteFile(blobPath, m, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -309,14 +308,14 @@ func (d *ociImageDestination) PutSignatures(ctx context.Context, signatures [][]
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
|
||||
if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
||||
if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
indexJSON, err := json.Marshal(d.index)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||
return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
|
||||
3
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
3
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
@@ -3,7 +3,6 @@ package layout
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -93,7 +92,7 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
m, err := ioutil.ReadFile(manifestPath)
|
||||
m, err := os.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
5
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
@@ -5,7 +5,6 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -625,7 +624,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
|
||||
// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
|
||||
// LoadFromFile takes a filename and deserializes the contents into Config object
|
||||
func loadFromFile(filename string) (*clientcmdConfig, error) {
|
||||
kubeconfigBytes, err := ioutil.ReadFile(filename)
|
||||
kubeconfigBytes, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1013,7 +1012,7 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
|
||||
return data, nil
|
||||
}
|
||||
if len(file) > 0 {
|
||||
fileData, err := ioutil.ReadFile(file)
|
||||
fileData, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user