mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 22:08:44 +00:00
Compare commits
41 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8a88191c84 | ||
|
|
69728fdf93 | ||
|
|
904c745bb0 | ||
|
|
47066f2d77 | ||
|
|
fab344c335 | ||
|
|
adfa1d4e49 | ||
|
|
002978258c | ||
|
|
05a2ed4921 | ||
|
|
e9535f868b | ||
|
|
fa86297c36 | ||
|
|
2bb6f27d13 | ||
|
|
f90725d80c | ||
|
|
644074cbb4 | ||
|
|
83416068d3 | ||
|
|
a3adf36db6 | ||
|
|
6510f1011b | ||
|
|
e7b7be5734 | ||
|
|
1e01e38459 | ||
|
|
942cd6ec58 | ||
|
|
a902709e14 | ||
|
|
41de7f2f66 | ||
|
|
c264cec359 | ||
|
|
2b357d8276 | ||
|
|
4acc9f0d2c | ||
|
|
c2732cb15d | ||
|
|
49f709576a | ||
|
|
7885162a35 | ||
|
|
01e58f8e25 | ||
|
|
36d860ebce | ||
|
|
c8777f3bf7 | ||
|
|
8f64c0412f | ||
|
|
985d4c09ae | ||
|
|
8182255d22 | ||
|
|
11b5989872 | ||
|
|
2144a37c21 | ||
|
|
9c9a9f3a1f | ||
|
|
60c98cacde | ||
|
|
116e75fbfd | ||
|
|
89ecd5a4c0 | ||
|
|
fc81803bfa | ||
|
|
119eeb83a7 |
@@ -23,12 +23,12 @@ env:
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
####
|
||||
FEDORA_NAME: "fedora-34"
|
||||
PRIOR_FEDORA_NAME: "fedora-33"
|
||||
UBUNTU_NAME: "ubuntu-2104"
|
||||
FEDORA_NAME: "fedora-35"
|
||||
PRIOR_FEDORA_NAME: "fedora-34"
|
||||
UBUNTU_NAME: "ubuntu-2110"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c6431352024203264"
|
||||
IMAGE_SUFFIX: "c6226133906620416"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
@@ -24,16 +25,16 @@ type copyOptions struct {
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
digestFile string // Write digest to this file
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
digestFile string // Write digest to this file
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
all bool // Copy all of the images if the source is a list
|
||||
encryptLayer []int // The list of layers to encrypt
|
||||
encryptionKeys []string // Keys needed to encrypt the image
|
||||
decryptionKeys []string // Keys needed to decrypt the image
|
||||
}
|
||||
|
||||
func copyCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -74,7 +75,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE-IMAGE")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.StringVar(&opts.digestFile, "digestfile", "", "Write the digest of the pushed image to the specified file")
|
||||
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use in the destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringSliceVar(&opts.encryptionKeys, "encryption-key", []string{}, "*Experimental* key with the encryption protocol to use needed to encrypt the image (e.g. jwe:/path/to/key.pem)")
|
||||
flags.IntSliceVar(&opts.encryptLayer, "encrypt-layer", []int{}, "*Experimental* the 0-indexed layer indices, with support for negative indexing (e.g. 0 is the first layer, -1 is the last layer)")
|
||||
flags.StringSliceVar(&opts.decryptionKeys, "decryption-key", []string{}, "*Experimental* key needed to decrypt the image")
|
||||
@@ -117,8 +118,8 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
manifestType, err = parseManifestFormat(opts.format.value)
|
||||
if opts.format.Present() {
|
||||
manifestType, err = parseManifestFormat(opts.format.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,222 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOptionalBoolSet(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input string
|
||||
accepted bool
|
||||
value bool
|
||||
}{
|
||||
// Valid inputs documented for strconv.ParseBool == flag.BoolVar
|
||||
{"1", true, true},
|
||||
{"t", true, true},
|
||||
{"T", true, true},
|
||||
{"TRUE", true, true},
|
||||
{"true", true, true},
|
||||
{"True", true, true},
|
||||
{"0", true, false},
|
||||
{"f", true, false},
|
||||
{"F", true, false},
|
||||
{"FALSE", true, false},
|
||||
{"false", true, false},
|
||||
{"False", true, false},
|
||||
// A few invalid inputs
|
||||
{"", false, false},
|
||||
{"yes", false, false},
|
||||
{"no", false, false},
|
||||
{"2", false, false},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := internalNewOptionalBoolValue(&ob)
|
||||
require.False(t, ob.present)
|
||||
err := v.Set(c.input)
|
||||
if c.accepted {
|
||||
assert.NoError(t, err, c.input)
|
||||
assert.Equal(t, c.value, ob.value)
|
||||
} else {
|
||||
assert.Error(t, err, c.input)
|
||||
assert.False(t, ob.present) // Just to be extra paranoid.
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOB, commandOB optionalBool
|
||||
actionRun := false
|
||||
app := &cobra.Command{
|
||||
Use: "app",
|
||||
}
|
||||
optionalBoolFlag(app.PersistentFlags(), &globalOB, "global-OB", "")
|
||||
cmd := &cobra.Command{
|
||||
Use: "cmd",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
assert.False(t, globalOB.present)
|
||||
assert.False(t, commandOB.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}
|
||||
optionalBoolFlag(cmd.Flags(), &commandOB, "command-OB", "")
|
||||
app.AddCommand(cmd)
|
||||
app.SetArgs([]string{"cmd"})
|
||||
err := app.Execute()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalBoolString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalBool
|
||||
expected string
|
||||
}{
|
||||
{optionalBool{present: true, value: true}, "true"},
|
||||
{optionalBool{present: true, value: false}, "false"},
|
||||
{optionalBool{present: false, value: true}, ""},
|
||||
{optionalBool{present: false, value: false}, ""},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := internalNewOptionalBoolValue(&ob)
|
||||
ob = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalBoolIsBoolFlag(t *testing.T) {
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOB optionalBool
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalBool{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OB=true", "1", "2"}, optionalBool{present: true, value: true}, []string{"1", "2"}}, // --OB=true
|
||||
{[]string{"--OB=false", "1", "2"}, optionalBool{present: true, value: false}, []string{"1", "2"}}, // --OB=false
|
||||
{[]string{"--OB", "true", "1", "2"}, optionalBool{present: true, value: true}, []string{"true", "1", "2"}}, // --OB true
|
||||
{[]string{"--OB", "false", "1", "2"}, optionalBool{present: true, value: true}, []string{"false", "1", "2"}}, // --OB false
|
||||
} {
|
||||
var ob optionalBool
|
||||
actionRun := false
|
||||
app := &cobra.Command{Use: "app"}
|
||||
cmd := &cobra.Command{
|
||||
Use: "cmd",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
assert.Equal(t, c.expectedOB, ob)
|
||||
assert.Equal(t, c.expectedArgs, args)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}
|
||||
optionalBoolFlag(cmd.Flags(), &ob, "OB", "")
|
||||
app.AddCommand(cmd)
|
||||
|
||||
app.SetArgs(append([]string{"cmd"}, c.input...))
|
||||
err := app.Execute()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringSet(t *testing.T) {
|
||||
// Really just a smoke test, but differentiating between not present and empty.
|
||||
for _, c := range []string{"", "hello"} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
require.False(t, os.present)
|
||||
err := v.Set(c)
|
||||
assert.NoError(t, err, c)
|
||||
assert.Equal(t, c, os.value)
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOS, commandOS optionalString
|
||||
actionRun := false
|
||||
app := &cobra.Command{
|
||||
Use: "app",
|
||||
}
|
||||
app.PersistentFlags().Var(newOptionalStringValue(&globalOS), "global-OS", "")
|
||||
cmd := &cobra.Command{
|
||||
Use: "cmd",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
assert.False(t, globalOS.present)
|
||||
assert.False(t, commandOS.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().Var(newOptionalStringValue(&commandOS), "command-OS", "")
|
||||
app.AddCommand(cmd)
|
||||
app.SetArgs([]string{"cmd"})
|
||||
err := app.Execute()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalStringString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalString
|
||||
expected string
|
||||
}{
|
||||
{optionalString{present: true, value: "hello"}, "hello"},
|
||||
{optionalString{present: true, value: ""}, ""},
|
||||
{optionalString{present: false, value: "hello"}, ""},
|
||||
{optionalString{present: false, value: ""}, ""},
|
||||
} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
os = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringIsBoolFlag(t *testing.T) {
|
||||
// NOTE: optionalStringValue does not implement IsBoolFlag!
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOS optionalString
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalString{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OS=hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS=true
|
||||
{[]string{"--OS=", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS=false
|
||||
{[]string{"--OS", "hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS true
|
||||
{[]string{"--OS", "", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS false
|
||||
} {
|
||||
var os optionalString
|
||||
actionRun := false
|
||||
app := &cobra.Command{
|
||||
Use: "app",
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "cmd",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
assert.Equal(t, c.expectedOS, os)
|
||||
assert.Equal(t, c.expectedArgs, args)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}
|
||||
cmd.Flags().Var(newOptionalStringValue(&os), "OS", "")
|
||||
app.AddCommand(cmd)
|
||||
app.SetArgs(append([]string{"cmd"}, c.input...))
|
||||
err := app.Execute()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
@@ -24,12 +24,13 @@ import (
|
||||
)
|
||||
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
format string
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
format string
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
doNotListTags bool // Do not list all tags available in the same repository
|
||||
}
|
||||
|
||||
func inspectCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -60,6 +61,7 @@ See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
flags.BoolVar(&opts.raw, "raw", false, "output raw manifest or configuration")
|
||||
flags.BoolVar(&opts.config, "config", false, "output configuration")
|
||||
flags.StringVarP(&opts.format, "format", "f", "", "Format the output to a Go template")
|
||||
flags.BoolVarP(&opts.doNotListTags, "no-tags", "n", false, "Do not list the available tags from the repository in the output")
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.AddFlagSet(&retryFlags)
|
||||
@@ -192,7 +194,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
}
|
||||
if img.Reference().Transport() == docker.Transport {
|
||||
if !opts.doNotListTags && img.Reference().Transport() == docker.Transport {
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/containers/common/pkg/auth"
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -12,7 +13,7 @@ import (
|
||||
type loginOptions struct {
|
||||
global *globalOptions
|
||||
loginOpts auth.LoginOptions
|
||||
tlsVerify optionalBool
|
||||
tlsVerify commonFlag.OptionalBool
|
||||
}
|
||||
|
||||
func loginCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -28,7 +29,7 @@ func loginCmd(global *globalOptions) *cobra.Command {
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
optionalBoolFlag(flags, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the registry")
|
||||
commonFlag.OptionalBoolFlag(flags, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the registry")
|
||||
flags.AddFlagSet(auth.GetLoginFlags(&opts.loginOpts))
|
||||
return cmd
|
||||
}
|
||||
@@ -40,8 +41,8 @@ func (opts *loginOptions) run(args []string, stdout io.Writer) error {
|
||||
opts.loginOpts.Stdin = os.Stdin
|
||||
opts.loginOpts.AcceptRepositories = true
|
||||
sys := opts.global.newSystemContext()
|
||||
if opts.tlsVerify.present {
|
||||
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
if opts.tlsVerify.Present() {
|
||||
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.Value())
|
||||
}
|
||||
return auth.Login(ctx, sys, &opts.loginOpts, args)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/containers/common/pkg/auth"
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -11,7 +12,7 @@ import (
|
||||
type logoutOptions struct {
|
||||
global *globalOptions
|
||||
logoutOpts auth.LogoutOptions
|
||||
tlsVerify optionalBool
|
||||
tlsVerify commonFlag.OptionalBool
|
||||
}
|
||||
|
||||
func logoutCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -27,7 +28,7 @@ func logoutCmd(global *globalOptions) *cobra.Command {
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
optionalBoolFlag(flags, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the registry")
|
||||
commonFlag.OptionalBoolFlag(flags, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the registry")
|
||||
flags.AddFlagSet(auth.GetLogoutFlags(&opts.logoutOpts))
|
||||
return cmd
|
||||
}
|
||||
@@ -36,8 +37,8 @@ func (opts *logoutOptions) run(args []string, stdout io.Writer) error {
|
||||
opts.logoutOpts.Stdout = stdout
|
||||
opts.logoutOpts.AcceptRepositories = true
|
||||
sys := opts.global.newSystemContext()
|
||||
if opts.tlsVerify.present {
|
||||
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
if opts.tlsVerify.Present() {
|
||||
sys.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.Value())
|
||||
}
|
||||
return auth.Logout(sys, &opts.logoutOpts, args)
|
||||
}
|
||||
|
||||
@@ -3,8 +3,10 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/skopeo/version"
|
||||
@@ -20,17 +22,32 @@ var gitCommit = ""
|
||||
var defaultUserAgent = "skopeo/" + version.Version
|
||||
|
||||
type globalOptions struct {
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuration directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
overrideVariant string // Architecture variant to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
registriesConfPath string // Path to the "registries.conf" file
|
||||
tmpDir string // Path to use for big temporary files
|
||||
debug bool // Enable debug output
|
||||
tlsVerify commonFlag.OptionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuration directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
overrideVariant string // Architecture variant to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
registriesConfPath string // Path to the "registries.conf" file
|
||||
tmpDir string // Path to use for big temporary files
|
||||
}
|
||||
|
||||
// requireSubcommand returns an error if no sub command is provided
|
||||
// This was copied from podman: `github.com/containers/podman/cmd/podman/validate/args.go
|
||||
// Some small style changes to match skopeo were applied, but try to apply any
|
||||
// bugfixes there first.
|
||||
func requireSubcommand(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
suggestions := cmd.SuggestionsFor(args[0])
|
||||
if len(suggestions) == 0 {
|
||||
return fmt.Errorf("Unrecognized command `%[1]s %[2]s`\nTry '%[1]s --help' for more information", cmd.CommandPath(), args[0])
|
||||
}
|
||||
return fmt.Errorf("Unrecognized command `%[1]s %[2]s`\n\nDid you mean this?\n\t%[3]s\n\nTry '%[1]s --help' for more information", cmd.CommandPath(), args[0], strings.Join(suggestions, "\n\t"))
|
||||
}
|
||||
return fmt.Errorf("Missing command '%[1]s COMMAND'\nTry '%[1]s --help' for more information", cmd.CommandPath())
|
||||
}
|
||||
|
||||
// createApp returns a cobra.Command, and the underlying globalOptions object, to be run or tested.
|
||||
@@ -40,6 +57,7 @@ func createApp() (*cobra.Command, *globalOptions) {
|
||||
rootCommand := &cobra.Command{
|
||||
Use: "skopeo",
|
||||
Long: "Various operations with container images and container image registries",
|
||||
RunE: requireSubcommand,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
return opts.before(cmd)
|
||||
},
|
||||
@@ -78,7 +96,7 @@ func createApp() (*cobra.Command, *globalOptions) {
|
||||
logrus.Fatal("unable to mark registries-conf flag as hidden")
|
||||
}
|
||||
rootCommand.PersistentFlags().StringVar(&opts.tmpDir, "tmpdir", "", "directory used to store temporary files")
|
||||
flag := optionalBoolFlag(rootCommand.Flags(), &opts.tlsVerify, "tls-verify", "Require HTTPS and verify certificates when accessing the registry")
|
||||
flag := commonFlag.OptionalBoolFlag(rootCommand.Flags(), &opts.tlsVerify, "tls-verify", "Require HTTPS and verify certificates when accessing the registry")
|
||||
flag.Hidden = true
|
||||
rootCommand.AddCommand(
|
||||
copyCmd(&opts),
|
||||
@@ -88,6 +106,7 @@ func createApp() (*cobra.Command, *globalOptions) {
|
||||
loginCmd(&opts),
|
||||
logoutCmd(&opts),
|
||||
manifestDigestCmd(),
|
||||
proxyCmd(&opts),
|
||||
syncCmd(&opts),
|
||||
standaloneSignCmd(),
|
||||
standaloneVerifyCmd(),
|
||||
@@ -102,7 +121,7 @@ func (opts *globalOptions) before(cmd *cobra.Command) error {
|
||||
if opts.debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
if opts.tlsVerify.Present() {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
@@ -159,8 +178,8 @@ func (opts *globalOptions) newSystemContext() *types.SystemContext {
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
}
|
||||
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
if opts.tlsVerify.Present() {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.Value())
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
694
cmd/skopeo/proxy.go
Normal file
694
cmd/skopeo/proxy.go
Normal file
@@ -0,0 +1,694 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
/*
|
||||
This code is currently only intended to be used by ostree
|
||||
to fetch content via containers. The API is subject
|
||||
to change. A goal however is to stabilize the API
|
||||
eventually as a full out-of-process interface to the
|
||||
core containers/image library functionality.
|
||||
|
||||
To use this command, in a parent process create a
|
||||
`socketpair()` of type `SOCK_SEQPACKET`. Fork
|
||||
off this command, and pass one half of the socket
|
||||
pair to the child. Providing it on stdin (fd 0)
|
||||
is the expected default.
|
||||
|
||||
The protocol is JSON for the control layer,
|
||||
and a read side of a `pipe()` passed for large data.
|
||||
|
||||
Base JSON protocol:
|
||||
|
||||
request: { method: "MethodName": args: [arguments] }
|
||||
reply: { success: bool, value: JSVAL, pipeid: number, error: string }
|
||||
|
||||
For any non-metadata i.e. payload data from `GetManifest`
|
||||
and `GetBlob` the server will pass back the read half of a `pipe(2)` via FD passing,
|
||||
along with a `pipeid` integer.
|
||||
|
||||
The expected flow looks like this:
|
||||
|
||||
- Initialize
|
||||
And validate the returned protocol version versus
|
||||
what your client supports.
|
||||
- OpenImage docker://quay.io/someorg/example:latest
|
||||
(returns an imageid)
|
||||
- GetManifest imageid (and associated <pipeid>)
|
||||
(Streaming read data from pipe)
|
||||
- FinishPipe <pipeid>
|
||||
- GetBlob imageid sha256:...
|
||||
(Streaming read data from pipe)
|
||||
- FinishPipe <pipeid>
|
||||
- GetBlob imageid sha256:...
|
||||
(Streaming read data from pipe)
|
||||
- FinishPipe <pipeid>
|
||||
- CloseImage imageid
|
||||
|
||||
You may interleave invocations of these methods, e.g. one
|
||||
can also invoke `OpenImage` multiple times, as well as
|
||||
starting multiple GetBlob requests before calling `FinishPipe`
|
||||
on them. The server will stream data into the pipefd
|
||||
until `FinishPipe` is invoked.
|
||||
|
||||
Note that the pipe will not be closed by the server until
|
||||
the client has invoked `FinishPipe`. This is to ensure
|
||||
that the client checks for errors. For example, `GetBlob`
|
||||
performs digest (e.g. sha256) verification and this must
|
||||
be checked after all data has been written.
|
||||
*/
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// protocolVersion is semantic version of the protocol used by this proxy.
|
||||
// The first version of the protocol has major version 0.2 to signify a
|
||||
// departure from the original code which used HTTP.
|
||||
//
|
||||
// 0.2.1: Initial version
|
||||
// 0.2.2: Added support for fetching image configuration as OCI
|
||||
const protocolVersion = "0.2.2"
|
||||
|
||||
// maxMsgSize is the current limit on a packet size.
|
||||
// Note that all non-metadata (i.e. payload data) is sent over a pipe.
|
||||
const maxMsgSize = 32 * 1024
|
||||
|
||||
// maxJSONFloat is ECMA Number.MAX_SAFE_INTEGER
|
||||
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number/MAX_SAFE_INTEGER
|
||||
// We hard error if the input JSON numbers we expect to be
|
||||
// integers are above this.
|
||||
const maxJSONFloat = float64(1<<53 - 1)
|
||||
|
||||
// request is the JSON serialization of a function call
|
||||
type request struct {
|
||||
// Method is the name of the function
|
||||
Method string `json:"method"`
|
||||
// Args is the arguments (parsed inside the fuction)
|
||||
Args []interface{} `json:"args"`
|
||||
}
|
||||
|
||||
// reply is serialized to JSON as the return value from a function call.
|
||||
type reply struct {
|
||||
// Success is true if and only if the call succeeded.
|
||||
Success bool `json:"success"`
|
||||
// Value is an arbitrary value (or values, as array/map) returned from the call.
|
||||
Value interface{} `json:"value"`
|
||||
// PipeID is an index into open pipes, and should be passed to FinishPipe
|
||||
PipeID uint32 `json:"pipeid"`
|
||||
// Error should be non-empty if Success == false
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// replyBuf is our internal deserialization of reply plus optional fd
|
||||
type replyBuf struct {
|
||||
// value will be converted to a reply Value
|
||||
value interface{}
|
||||
// fd is the read half of a pipe, passed back to the client
|
||||
fd *os.File
|
||||
// pipeid will be provided to the client as PipeID, an index into our open pipes
|
||||
pipeid uint32
|
||||
}
|
||||
|
||||
// activePipe is an open pipe to the client.
|
||||
// It contains an error value
|
||||
type activePipe struct {
|
||||
// w is the write half of the pipe
|
||||
w *os.File
|
||||
// wg is completed when our worker goroutine is done
|
||||
wg sync.WaitGroup
|
||||
// err may be set in our worker goroutine
|
||||
err error
|
||||
}
|
||||
|
||||
// openImage is an opened image reference
|
||||
type openImage struct {
|
||||
// id is an opaque integer handle
|
||||
id uint32
|
||||
src types.ImageSource
|
||||
cachedimg types.Image
|
||||
}
|
||||
|
||||
// proxyHandler is the state associated with our socket.
|
||||
type proxyHandler struct {
|
||||
// lock protects everything else in this structure.
|
||||
lock sync.Mutex
|
||||
// opts is CLI options
|
||||
opts *proxyOptions
|
||||
sysctx *types.SystemContext
|
||||
cache types.BlobInfoCache
|
||||
|
||||
// imageSerial is a counter for open images
|
||||
imageSerial uint32
|
||||
// images holds our opened images
|
||||
images map[uint32]*openImage
|
||||
// activePipes maps from "pipeid" to a pipe + goroutine pair
|
||||
activePipes map[uint32]*activePipe
|
||||
}
|
||||
|
||||
// Initialize performs one-time initialization, and returns the protocol version
|
||||
func (h *proxyHandler) Initialize(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if len(args) != 0 {
|
||||
return ret, fmt.Errorf("invalid request, expecting zero arguments")
|
||||
}
|
||||
|
||||
if h.sysctx != nil {
|
||||
return ret, fmt.Errorf("already initialized")
|
||||
}
|
||||
|
||||
sysctx, err := h.opts.imageOpts.newSystemContext()
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
h.sysctx = sysctx
|
||||
h.cache = blobinfocache.DefaultCache(sysctx)
|
||||
|
||||
r := replyBuf{
|
||||
value: protocolVersion,
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// OpenImage accepts a string image reference i.e. TRANSPORT:REF - like `skopeo copy`.
|
||||
// The return value is an opaque integer handle.
|
||||
func (h *proxyHandler) OpenImage(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting one argument")
|
||||
}
|
||||
imageref, ok := args[0].(string)
|
||||
if !ok {
|
||||
return ret, fmt.Errorf("expecting string imageref, not %T", args[0])
|
||||
}
|
||||
|
||||
imgRef, err := alltransports.ParseImageName(imageref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
imgsrc, err := imgRef.NewImageSource(context.Background(), h.sysctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
h.imageSerial++
|
||||
openimg := &openImage{
|
||||
id: h.imageSerial,
|
||||
src: imgsrc,
|
||||
}
|
||||
h.images[openimg.id] = openimg
|
||||
ret.value = openimg.id
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (h *proxyHandler) CloseImage(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting one argument")
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
imgref.src.Close()
|
||||
delete(h.images, imgref.id)
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func parseImageID(v interface{}) (uint32, error) {
|
||||
imgidf, ok := v.(float64)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("expecting integer imageid, not %T", v)
|
||||
}
|
||||
return uint32(imgidf), nil
|
||||
}
|
||||
|
||||
// parseUint64 validates that a number fits inside a JavaScript safe integer
|
||||
func parseUint64(v interface{}) (uint64, error) {
|
||||
f, ok := v.(float64)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("expecting numeric, not %T", v)
|
||||
}
|
||||
if f > maxJSONFloat {
|
||||
return 0, fmt.Errorf("out of range integer for numeric %f", f)
|
||||
}
|
||||
return uint64(f), nil
|
||||
}
|
||||
|
||||
func (h *proxyHandler) parseImageFromID(v interface{}) (*openImage, error) {
|
||||
imgid, err := parseImageID(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imgref, ok := h.images[imgid]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no image %v", imgid)
|
||||
}
|
||||
return imgref, nil
|
||||
}
|
||||
|
||||
func (h *proxyHandler) allocPipe() (*os.File, *activePipe, error) {
|
||||
piper, pipew, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
f := activePipe{
|
||||
w: pipew,
|
||||
}
|
||||
h.activePipes[uint32(pipew.Fd())] = &f
|
||||
f.wg.Add(1)
|
||||
return piper, &f, nil
|
||||
}
|
||||
|
||||
// returnBytes generates a return pipe() from a byte array
|
||||
// In the future it might be nicer to return this via memfd_create()
|
||||
func (h *proxyHandler) returnBytes(retval interface{}, buf []byte) (replyBuf, error) {
|
||||
var ret replyBuf
|
||||
piper, f, err := h.allocPipe()
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
// Signal completion when we return
|
||||
defer f.wg.Done()
|
||||
_, err = io.Copy(f.w, bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
f.err = err
|
||||
}
|
||||
}()
|
||||
|
||||
ret.value = retval
|
||||
ret.fd = piper
|
||||
ret.pipeid = uint32(f.w.Fd())
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// cacheTargetManifest is invoked when GetManifest or GetConfig is invoked
|
||||
// the first time for a given image. If the requested image is a manifest
|
||||
// list, this function resolves it to the image matching the calling process'
|
||||
// operating system and architecture.
|
||||
//
|
||||
// TODO: Add GetRawManifest or so that exposes manifest lists
|
||||
func (h *proxyHandler) cacheTargetManifest(img *openImage) error {
|
||||
ctx := context.Background()
|
||||
if img.cachedimg != nil {
|
||||
return nil
|
||||
}
|
||||
unparsedToplevel := image.UnparsedInstance(img.src, nil)
|
||||
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var target *image.UnparsedImage
|
||||
if manifest.MIMETypeIsMultiImage(manifestType) {
|
||||
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
instanceDigest, err := manifestList.ChooseInstance(h.sysctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
target = image.UnparsedInstance(img.src, &instanceDigest)
|
||||
} else {
|
||||
target = unparsedToplevel
|
||||
}
|
||||
cachedimg, err := image.FromUnparsedImage(ctx, h.sysctx, target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
img.cachedimg = cachedimg
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetManifest returns a copy of the manifest, converted to OCI format, along with the original digest.
|
||||
// Manifest lists are resolved to the current operating system and architecture.
|
||||
func (h *proxyHandler) GetManifest(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting one argument")
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
err = h.cacheTargetManifest(imgref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
img := imgref.cachedimg
|
||||
|
||||
ctx := context.Background()
|
||||
rawManifest, manifestType, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// We only support OCI and docker2schema2. We know docker2schema2 can be easily+cheaply
|
||||
// converted into OCI, so consumers only need to see OCI.
|
||||
switch manifestType {
|
||||
case imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType:
|
||||
break
|
||||
// Explicitly reject e.g. docker schema 1 type with a "legacy" note
|
||||
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
|
||||
return ret, fmt.Errorf("unsupported legacy manifest MIME type: %s", manifestType)
|
||||
default:
|
||||
return ret, fmt.Errorf("unsupported manifest MIME type: %s", manifestType)
|
||||
}
|
||||
|
||||
// We always return the original digest, as that's what clients need to do pull-by-digest
|
||||
// and in general identify the image.
|
||||
digest, err := manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
var serialized []byte
|
||||
// But, we convert to OCI format on the wire if it's not already. The idea here is that by reusing the containers/image
|
||||
// stack, clients to this proxy can pretend the world is OCI only, and not need to care about e.g.
|
||||
// docker schema and MIME types.
|
||||
if manifestType != imgspecv1.MediaTypeImageManifest {
|
||||
manifestUpdates := types.ManifestUpdateOptions{ManifestMIMEType: imgspecv1.MediaTypeImageManifest}
|
||||
ociImage, err := img.UpdatedImage(ctx, manifestUpdates)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
ociSerialized, _, err := ociImage.Manifest(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
serialized = ociSerialized
|
||||
} else {
|
||||
serialized = rawManifest
|
||||
}
|
||||
return h.returnBytes(digest, serialized)
|
||||
}
|
||||
|
||||
// GetConfig returns a copy of the image configuration, converted to OCI format.
|
||||
func (h *proxyHandler) GetConfig(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return ret, fmt.Errorf("invalid request, expecting: [imgid]")
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
err = h.cacheTargetManifest(imgref)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
img := imgref.cachedimg
|
||||
|
||||
ctx := context.TODO()
|
||||
config, err := img.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
serialized, err := json.Marshal(&config.Config)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
return h.returnBytes(nil, serialized)
|
||||
|
||||
}
|
||||
|
||||
// GetBlob fetches a blob, performing digest verification.
|
||||
func (h *proxyHandler) GetBlob(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
if h.sysctx == nil {
|
||||
return ret, fmt.Errorf("client error: must invoke Initialize")
|
||||
}
|
||||
if len(args) != 3 {
|
||||
return ret, fmt.Errorf("found %d args, expecting (imgid, digest, size)", len(args))
|
||||
}
|
||||
imgref, err := h.parseImageFromID(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
digestStr, ok := args[1].(string)
|
||||
if !ok {
|
||||
return ret, fmt.Errorf("expecting string blobid")
|
||||
}
|
||||
size, err := parseUint64(args[2])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
d, err := digest.Parse(digestStr)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
blobr, blobSize, err := imgref.src.GetBlob(ctx, types.BlobInfo{Digest: d, Size: int64(size)}, h.cache)
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
|
||||
piper, f, err := h.allocPipe()
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
go func() {
|
||||
// Signal completion when we return
|
||||
defer f.wg.Done()
|
||||
verifier := d.Verifier()
|
||||
tr := io.TeeReader(blobr, verifier)
|
||||
n, err := io.Copy(f.w, tr)
|
||||
if err != nil {
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
if n != int64(size) {
|
||||
f.err = fmt.Errorf("expected %d bytes in blob, got %d", size, n)
|
||||
}
|
||||
if !verifier.Verified() {
|
||||
f.err = fmt.Errorf("corrupted blob, expecting %s", d.String())
|
||||
}
|
||||
}()
|
||||
|
||||
ret.value = blobSize
|
||||
ret.fd = piper
|
||||
ret.pipeid = uint32(f.w.Fd())
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// FinishPipe waits for the worker goroutine to finish, and closes the write side of the pipe.
|
||||
func (h *proxyHandler) FinishPipe(args []interface{}) (replyBuf, error) {
|
||||
h.lock.Lock()
|
||||
defer h.lock.Unlock()
|
||||
|
||||
var ret replyBuf
|
||||
|
||||
pipeidv, err := parseUint64(args[0])
|
||||
if err != nil {
|
||||
return ret, err
|
||||
}
|
||||
pipeid := uint32(pipeidv)
|
||||
|
||||
f, ok := h.activePipes[pipeid]
|
||||
if !ok {
|
||||
return ret, fmt.Errorf("finishpipe: no active pipe %d", pipeid)
|
||||
}
|
||||
|
||||
// Wait for the goroutine to complete
|
||||
f.wg.Wait()
|
||||
// And only now do we close the write half; this forces the client to call this API
|
||||
f.w.Close()
|
||||
// Propagate any errors from the goroutine worker
|
||||
err = f.err
|
||||
delete(h.activePipes, pipeid)
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// send writes a reply buffer to the socket
|
||||
func (buf replyBuf) send(conn *net.UnixConn, err error) error {
|
||||
replyToSerialize := reply{
|
||||
Success: err == nil,
|
||||
Value: buf.value,
|
||||
PipeID: buf.pipeid,
|
||||
}
|
||||
if err != nil {
|
||||
replyToSerialize.Error = err.Error()
|
||||
}
|
||||
serializedReply, err := json.Marshal(&replyToSerialize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We took ownership of the FD - close it when we're done.
|
||||
defer func() {
|
||||
if buf.fd != nil {
|
||||
buf.fd.Close()
|
||||
}
|
||||
}()
|
||||
// Copy the FD number to the socket ancillary buffer
|
||||
fds := make([]int, 0)
|
||||
if buf.fd != nil {
|
||||
fds = append(fds, int(buf.fd.Fd()))
|
||||
}
|
||||
oob := syscall.UnixRights(fds...)
|
||||
n, oobn, err := conn.WriteMsgUnix(serializedReply, oob, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Validate that we sent the full packet
|
||||
if n != len(serializedReply) || oobn != len(oob) {
|
||||
return io.ErrShortWrite
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type proxyOptions struct {
|
||||
global *globalOptions
|
||||
imageOpts *imageOptions
|
||||
sockFd int
|
||||
}
|
||||
|
||||
func proxyCmd(global *globalOptions) *cobra.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, nil, "", "")
|
||||
opts := proxyOptions{global: global, imageOpts: imageOpts}
|
||||
cmd := &cobra.Command{
|
||||
Use: "experimental-image-proxy [command options] IMAGE",
|
||||
Short: "Interactive proxy for fetching container images (EXPERIMENTAL)",
|
||||
Long: `Run skopeo as a proxy, supporting HTTP requests to fetch manifests and blobs.`,
|
||||
RunE: commandAction(opts.run),
|
||||
Args: cobra.ExactArgs(0),
|
||||
// Not stabilized yet
|
||||
Hidden: true,
|
||||
Example: `skopeo experimental-image-proxy --sockfd 3`,
|
||||
}
|
||||
adjustUsage(cmd)
|
||||
flags := cmd.Flags()
|
||||
flags.AddFlagSet(&sharedFlags)
|
||||
flags.AddFlagSet(&imageFlags)
|
||||
flags.IntVar(&opts.sockFd, "sockfd", 0, "Serve on opened socket pair (default 0/stdin)")
|
||||
return cmd
|
||||
}
|
||||
|
||||
// processRequest dispatches a remote request.
|
||||
// replyBuf is the result of the invocation.
|
||||
// terminate should be true if processing of requests should halt.
|
||||
func (h *proxyHandler) processRequest(req request) (rb replyBuf, terminate bool, err error) {
|
||||
// Dispatch on the method
|
||||
switch req.Method {
|
||||
case "Initialize":
|
||||
rb, err = h.Initialize(req.Args)
|
||||
case "OpenImage":
|
||||
rb, err = h.OpenImage(req.Args)
|
||||
case "CloseImage":
|
||||
rb, err = h.CloseImage(req.Args)
|
||||
case "GetManifest":
|
||||
rb, err = h.GetManifest(req.Args)
|
||||
case "GetConfig":
|
||||
rb, err = h.GetConfig(req.Args)
|
||||
case "GetBlob":
|
||||
rb, err = h.GetBlob(req.Args)
|
||||
case "FinishPipe":
|
||||
rb, err = h.FinishPipe(req.Args)
|
||||
case "Shutdown":
|
||||
terminate = true
|
||||
default:
|
||||
err = fmt.Errorf("unknown method: %s", req.Method)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Implementation of podman experimental-image-proxy
|
||||
func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
|
||||
handler := &proxyHandler{
|
||||
opts: opts,
|
||||
images: make(map[uint32]*openImage),
|
||||
activePipes: make(map[uint32]*activePipe),
|
||||
}
|
||||
|
||||
// Convert the socket FD passed by client into a net.FileConn
|
||||
fd := os.NewFile(uintptr(opts.sockFd), "sock")
|
||||
fconn, err := net.FileConn(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conn := fconn.(*net.UnixConn)
|
||||
|
||||
// Allocate a buffer to copy the packet into
|
||||
buf := make([]byte, maxMsgSize)
|
||||
for {
|
||||
n, _, err := conn.ReadFrom(buf)
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("reading socket: %v", err)
|
||||
}
|
||||
// Parse the request JSON
|
||||
readbuf := buf[0:n]
|
||||
var req request
|
||||
if err := json.Unmarshal(readbuf, &req); err != nil {
|
||||
rb := replyBuf{}
|
||||
rb.send(conn, fmt.Errorf("invalid request: %v", err))
|
||||
}
|
||||
|
||||
rb, terminate, err := handler.processRequest(req)
|
||||
if terminate {
|
||||
return nil
|
||||
}
|
||||
rb.send(conn, err)
|
||||
}
|
||||
}
|
||||
30
cmd/skopeo/proxy_windows.go
Normal file
30
cmd/skopeo/proxy_windows.go
Normal file
@@ -0,0 +1,30 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type proxyOptions struct {
|
||||
global *globalOptions
|
||||
}
|
||||
|
||||
func proxyCmd(global *globalOptions) *cobra.Command {
|
||||
opts := proxyOptions{global: global}
|
||||
cmd := &cobra.Command{
|
||||
RunE: commandAction(opts.run),
|
||||
Args: cobra.ExactArgs(0),
|
||||
// Not stabilized yet
|
||||
Hidden: true,
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (opts *proxyOptions) run(args []string, stdout io.Writer) error {
|
||||
return fmt.Errorf("This command is not supported on Windows")
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/copy"
|
||||
"github.com/containers/image/v5/directory"
|
||||
@@ -32,14 +33,14 @@ type syncOptions struct {
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.RetryOptions
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format commonFlag.OptionalString // Force conversion of the image to a specified format
|
||||
source string // Source repository name
|
||||
destination string // Destination registry name
|
||||
scoped bool // When true, namespace copied images at destination using the source repository name
|
||||
all bool // Copy all of the images if an image in the source is a list
|
||||
keepGoing bool // Whether or not to abort the sync if there are any errors during syncing the images
|
||||
}
|
||||
|
||||
// repoDescriptor contains information of a single repository used as a sync source.
|
||||
@@ -100,7 +101,7 @@ See skopeo-sync(1) for details.
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&opts.removeSignatures, "remove-signatures", false, "Do not copy signatures from SOURCE images")
|
||||
flags.StringVar(&opts.signByFingerprint, "sign-by", "", "Sign the image using a GPG key with the specified `FINGERPRINT`")
|
||||
flags.VarP(newOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.VarP(commonFlag.NewOptionalStringValue(&opts.format), "format", "f", `MANIFEST TYPE (oci, v2s1, or v2s2) to use when syncing image(s) to a destination (default is manifest type of source, with fallbacks)`)
|
||||
flags.StringVarP(&opts.source, "src", "s", "", "SOURCE transport type")
|
||||
flags.StringVarP(&opts.destination, "dest", "d", "", "DESTINATION transport type")
|
||||
flags.BoolVar(&opts.scoped, "scoped", false, "Images at DESTINATION are prefix using the full source image path as scope")
|
||||
@@ -545,8 +546,8 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) error {
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
manifestType, err = parseManifestFormat(opts.format.value)
|
||||
if opts.format.Present() {
|
||||
manifestType, err = parseManifestFormat(opts.format.Value())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
@@ -45,7 +46,7 @@ func commandAction(handler func(args []string, stdout io.Writer) error) func(cmd
|
||||
// whether or not the value actually ends up being used.
|
||||
// DO NOT ADD ANY NEW USES OF THIS; just call dockerImageFlags with an appropriate, possibly empty, flagPrefix.
|
||||
type deprecatedTLSVerifyOption struct {
|
||||
tlsVerify optionalBool // FIXME FIXME: Warn if this is used, or even if it is ignored.
|
||||
tlsVerify commonFlag.OptionalBool // FIXME FIXME: Warn if this is used, or even if it is ignored.
|
||||
}
|
||||
|
||||
// warnIfUsed warns if tlsVerify was set by the user, and suggests alternatives (which should
|
||||
@@ -53,7 +54,7 @@ type deprecatedTLSVerifyOption struct {
|
||||
// Every user should call this as part of handling the CLI, whether or not the value actually
|
||||
// ends up being used.
|
||||
func (opts *deprecatedTLSVerifyOption) warnIfUsed(alternatives []string) {
|
||||
if opts.tlsVerify.present {
|
||||
if opts.tlsVerify.Present() {
|
||||
logrus.Warnf("'--tls-verify' is deprecated, instead use: %s", strings.Join(alternatives, ", "))
|
||||
}
|
||||
}
|
||||
@@ -63,7 +64,7 @@ func (opts *deprecatedTLSVerifyOption) warnIfUsed(alternatives []string) {
|
||||
func deprecatedTLSVerifyFlags() (pflag.FlagSet, *deprecatedTLSVerifyOption) {
|
||||
opts := deprecatedTLSVerifyOption{}
|
||||
fs := pflag.FlagSet{}
|
||||
flag := optionalBoolFlag(&fs, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the container registry")
|
||||
flag := commonFlag.OptionalBoolFlag(&fs, &opts.tlsVerify, "tls-verify", "require HTTPS and verify certificates when accessing the container registry")
|
||||
flag.Hidden = true
|
||||
return fs, &opts
|
||||
}
|
||||
@@ -89,11 +90,13 @@ type dockerImageOptions struct {
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption // May be shared across several imageOptions instances, or nil.
|
||||
authFilePath optionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
registryToken optionalString // token to be used directly as a Bearer token when accessing the registry
|
||||
authFilePath commonFlag.OptionalString // Path to a */containers/auth.json (prefixed version to override shared image option).
|
||||
credsOption commonFlag.OptionalString // username[:password] for accessing a registry
|
||||
userName commonFlag.OptionalString // username for accessing a registry
|
||||
password commonFlag.OptionalString // password for accessing a registry
|
||||
registryToken commonFlag.OptionalString // token to be used directly as a Bearer token when accessing the registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
tlsVerify commonFlag.OptionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
noCreds bool // Access the registry anonymously
|
||||
}
|
||||
|
||||
@@ -119,18 +122,20 @@ func dockerImageFlags(global *globalOptions, shared *sharedImageOptions, depreca
|
||||
fs := pflag.FlagSet{}
|
||||
if flagPrefix != "" {
|
||||
// the non-prefixed flag is handled by a shared flag.
|
||||
fs.Var(newOptionalStringValue(&flags.authFilePath), flagPrefix+"authfile", "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.authFilePath), flagPrefix+"authfile", "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json")
|
||||
}
|
||||
fs.Var(newOptionalStringValue(&flags.credsOption), flagPrefix+"creds", "Use `USERNAME[:PASSWORD]` for accessing the registry")
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.credsOption), flagPrefix+"creds", "Use `USERNAME[:PASSWORD]` for accessing the registry")
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.userName), flagPrefix+"username", "Username for accessing the registry")
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.password), flagPrefix+"password", "Password for accessing the registry")
|
||||
if credsOptionAlias != "" {
|
||||
// This is horribly ugly, but we need to support the old option forms of (skopeo copy) for compatibility.
|
||||
// Don't add any more cases like this.
|
||||
f := fs.VarPF(newOptionalStringValue(&flags.credsOption), credsOptionAlias, "", "Use `USERNAME[:PASSWORD]` for accessing the registry")
|
||||
f := fs.VarPF(commonFlag.NewOptionalStringValue(&flags.credsOption), credsOptionAlias, "", "Use `USERNAME[:PASSWORD]` for accessing the registry")
|
||||
f.Hidden = true
|
||||
}
|
||||
fs.Var(newOptionalStringValue(&flags.registryToken), flagPrefix+"registry-token", "Provide a Bearer token for accessing the registry")
|
||||
fs.Var(commonFlag.NewOptionalStringValue(&flags.registryToken), flagPrefix+"registry-token", "Provide a Bearer token for accessing the registry")
|
||||
fs.StringVar(&flags.dockerCertPath, flagPrefix+"cert-dir", "", "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon")
|
||||
optionalBoolFlag(&fs, &flags.tlsVerify, flagPrefix+"tls-verify", "require HTTPS and verify certificates when talking to the container registry or daemon")
|
||||
commonFlag.OptionalBoolFlag(&fs, &flags.tlsVerify, flagPrefix+"tls-verify", "require HTTPS and verify certificates when talking to the container registry or daemon")
|
||||
fs.BoolVar(&flags.noCreds, flagPrefix+"no-creds", false, "Access the registry anonymously")
|
||||
return fs, &flags
|
||||
}
|
||||
@@ -164,31 +169,49 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx.AuthFilePath = opts.shared.authFilePath
|
||||
ctx.DockerDaemonHost = opts.dockerDaemonHost
|
||||
ctx.DockerDaemonCertPath = opts.dockerCertPath
|
||||
if opts.dockerImageOptions.authFilePath.present {
|
||||
ctx.AuthFilePath = opts.dockerImageOptions.authFilePath.value
|
||||
if opts.dockerImageOptions.authFilePath.Present() {
|
||||
ctx.AuthFilePath = opts.dockerImageOptions.authFilePath.Value()
|
||||
}
|
||||
if opts.deprecatedTLSVerify != nil && opts.deprecatedTLSVerify.tlsVerify.present {
|
||||
if opts.deprecatedTLSVerify != nil && opts.deprecatedTLSVerify.tlsVerify.Present() {
|
||||
// If both this deprecated option and a non-deprecated option is present, we use the latter value.
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.deprecatedTLSVerify.tlsVerify.value)
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.deprecatedTLSVerify.tlsVerify.Value())
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
|
||||
if opts.tlsVerify.Present() {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.Value()
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
if opts.tlsVerify.Present() {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.Value())
|
||||
}
|
||||
if opts.credsOption.present && opts.noCreds {
|
||||
if opts.credsOption.Present() && opts.noCreds {
|
||||
return nil, errors.New("creds and no-creds cannot be specified at the same time")
|
||||
}
|
||||
if opts.credsOption.present {
|
||||
if opts.userName.Present() && opts.noCreds {
|
||||
return nil, errors.New("username and no-creds cannot be specified at the same time")
|
||||
}
|
||||
if opts.credsOption.Present() && opts.userName.Present() {
|
||||
return nil, errors.New("creds and username cannot be specified at the same time")
|
||||
}
|
||||
// if any of username or password is present, then both are expected to be present
|
||||
if opts.userName.Present() != opts.password.Present() {
|
||||
if opts.userName.Present() {
|
||||
return nil, errors.New("password must be specified when username is specified")
|
||||
}
|
||||
return nil, errors.New("username must be specified when password is specified")
|
||||
}
|
||||
if opts.credsOption.Present() {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.value)
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.Value())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if opts.userName.Present() {
|
||||
ctx.DockerAuthConfig = &types.DockerAuthConfig{
|
||||
Username: opts.userName.Value(),
|
||||
Password: opts.password.Value(),
|
||||
}
|
||||
}
|
||||
if opts.registryToken.present {
|
||||
ctx.DockerBearerRegistryToken = opts.registryToken.value
|
||||
if opts.registryToken.Present() {
|
||||
ctx.DockerBearerRegistryToken = opts.registryToken.Value()
|
||||
}
|
||||
if opts.noCreds {
|
||||
ctx.DockerAuthConfig = &types.DockerAuthConfig{}
|
||||
@@ -200,11 +223,12 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
// imageDestOptions is a superset of imageOptions specialized for image destinations.
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
dirForceDecompression bool // Decompress layers when saving to the dir: transport
|
||||
ociAcceptUncompressedLayers bool // Whether to accept uncompressed layers in the oci: transport
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel optionalInt // Level to use for the compression
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
dirForceDecompression bool // Decompress layers when saving to the dir: transport
|
||||
ociAcceptUncompressedLayers bool // Whether to accept uncompressed layers in the oci: transport
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel commonFlag.OptionalInt // Level to use for the compression
|
||||
precomputeDigests bool // Precompute digests to dedup layers when saving to the docker: transport
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
@@ -217,7 +241,8 @@ func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecate
|
||||
fs.BoolVar(&opts.dirForceDecompression, flagPrefix+"decompress", false, "Decompress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
fs.BoolVar(&opts.ociAcceptUncompressedLayers, flagPrefix+"oci-accept-uncompressed-layers", false, "Allow uncompressed image layers when saving to an OCI image using the 'oci' transport. (default is to compress things that aren't compressed)")
|
||||
fs.StringVar(&opts.compressionFormat, flagPrefix+"compress-format", "", "`FORMAT` to use for the compression")
|
||||
fs.Var(newOptionalIntValue(&opts.compressionLevel), flagPrefix+"compress-level", "`LEVEL` to use for the compression")
|
||||
fs.Var(commonFlag.NewOptionalIntValue(&opts.compressionLevel), flagPrefix+"compress-level", "`LEVEL` to use for the compression")
|
||||
fs.BoolVar(&opts.precomputeDigests, flagPrefix+"precompute-digests", false, "Precompute digests to prevent uploading layers already on the registry using the 'docker' transport.")
|
||||
return fs, &opts
|
||||
}
|
||||
|
||||
@@ -239,9 +264,11 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
}
|
||||
ctx.CompressionFormat = &cf
|
||||
}
|
||||
if opts.compressionLevel.present {
|
||||
ctx.CompressionLevel = &opts.compressionLevel.value
|
||||
if opts.compressionLevel.Present() {
|
||||
value := opts.compressionLevel.Value()
|
||||
ctx.CompressionLevel = &value
|
||||
}
|
||||
ctx.DockerRegistryPushPrecomputeDigests = opts.precomputeDigests
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
|
||||
@@ -167,26 +167,28 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
"--dest-registry-token", "faketoken",
|
||||
"--dest-precompute-digests=true",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
VariantChoice: "overridden-variant",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerBearerRegistryToken: "faketoken",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
DirForceCompress: true,
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
VariantChoice: "overridden-variant",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerBearerRegistryToken: "faketoken",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
DirForceCompress: true,
|
||||
BigFilesTemporaryDir: "/srv",
|
||||
DockerRegistryPushPrecomputeDigests: true,
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior is tested in TestTLSVerifyFlags.
|
||||
@@ -197,6 +199,54 @@ func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// TestImageOptionsUsernamePassword verifies that using the username and password
|
||||
// options works as expected
|
||||
func TestImageOptionsUsernamePassword(t *testing.T) {
|
||||
for _, command := range []struct {
|
||||
commandArgs []string
|
||||
expectedAuthConfig *types.DockerAuthConfig // data to expect, or nil if an error is expected
|
||||
}{
|
||||
// Set only username/password (without --creds), expected to pass
|
||||
{
|
||||
commandArgs: []string{"--dest-username", "foo", "--dest-password", "bar"},
|
||||
expectedAuthConfig: &types.DockerAuthConfig{Username: "foo", Password: "bar"},
|
||||
},
|
||||
// no username but set password, expect error
|
||||
{
|
||||
commandArgs: []string{"--dest-password", "foo"},
|
||||
expectedAuthConfig: nil,
|
||||
},
|
||||
// set username but no password. expected to fail (we currently don't allow a user without password)
|
||||
{
|
||||
commandArgs: []string{"--dest-username", "bar"},
|
||||
expectedAuthConfig: nil,
|
||||
},
|
||||
// set username with --creds, expected to fail
|
||||
{
|
||||
commandArgs: []string{"--dest-username", "bar", "--dest-creds", "hello:world", "--dest-password", "foo"},
|
||||
expectedAuthConfig: nil,
|
||||
},
|
||||
// set username with --no-creds, expected to fail
|
||||
{
|
||||
commandArgs: []string{"--dest-username", "bar", "--dest-no-creds", "--dest-password", "foo"},
|
||||
expectedAuthConfig: nil,
|
||||
},
|
||||
} {
|
||||
opts := fakeImageDestOptions(t, "dest-", true, []string{}, command.commandArgs)
|
||||
// parse the command options
|
||||
res, err := opts.newSystemContext()
|
||||
if command.expectedAuthConfig == nil {
|
||||
assert.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
DockerRegistryUserAgent: defaultUserAgent,
|
||||
DockerAuthConfig: command.expectedAuthConfig,
|
||||
}, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTLSVerifyFlags(t *testing.T) {
|
||||
type systemContextOpts interface { // Either *imageOptions or *imageDestOptions
|
||||
newSystemContext() (*types.SystemContext, error)
|
||||
|
||||
@@ -51,6 +51,10 @@ _skopeo_copy() {
|
||||
--dest-daemon-host
|
||||
--src-registry-token
|
||||
--dest-registry-token
|
||||
--src-username
|
||||
--src-password
|
||||
--dest-username
|
||||
--dest-password
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
@@ -61,6 +65,7 @@ _skopeo_copy() {
|
||||
--src-no-creds
|
||||
--dest-no-creds
|
||||
--dest-oci-accept-uncompressed-layers
|
||||
--dest-precompute-digests
|
||||
"
|
||||
|
||||
local transports
|
||||
@@ -87,6 +92,10 @@ _skopeo_sync() {
|
||||
--src-cert-dir
|
||||
--src-creds
|
||||
--src-registry-token
|
||||
--src-username
|
||||
--src-password
|
||||
--dest-username
|
||||
--dest-password
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
@@ -116,12 +125,15 @@ _skopeo_inspect() {
|
||||
--format
|
||||
--retry-times
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
local boolean_options="
|
||||
--config
|
||||
--raw
|
||||
--tls-verify
|
||||
--no-creds
|
||||
--no-tags -n
|
||||
"
|
||||
|
||||
local transports
|
||||
@@ -163,6 +175,8 @@ _skopeo_delete() {
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
@@ -183,6 +197,8 @@ _skopeo_layers() {
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
@@ -197,6 +213,8 @@ _skopeo_list_repository_tags() {
|
||||
--creds
|
||||
--cert-dir
|
||||
--registry-token
|
||||
--username
|
||||
--password
|
||||
"
|
||||
|
||||
local boolean_options="
|
||||
|
||||
@@ -58,10 +58,6 @@ _run_setup() {
|
||||
return
|
||||
fi
|
||||
|
||||
# This is required as part of the standard Fedora GCE VM setup
|
||||
growpart /dev/sda 1
|
||||
resize2fs /dev/sda1
|
||||
|
||||
# VM's come with the distro. skopeo package pre-installed
|
||||
dnf erase -y skopeo
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
# This image can be used to create a secured container
|
||||
# that runs safely with privileges within the container.
|
||||
#
|
||||
FROM registry.fedoraproject.org/fedora:33
|
||||
FROM registry.fedoraproject.org/fedora:latest
|
||||
|
||||
# Don't include container-selinux and remove
|
||||
# directories used by yum that are just taking
|
||||
|
||||
@@ -160,10 +160,30 @@ Bearer token for accessing the source registry.
|
||||
|
||||
Bearer token for accessing the destination registry.
|
||||
|
||||
**--dest-precompute-digests** _bool-value_
|
||||
|
||||
Precompute digests to ensure layers are not uploaded that already exist on the destination registry. Layers with initially unknown digests (ex. compressing "on the fly") will be temporarily streamed to disk.
|
||||
|
||||
**--retry-times**
|
||||
|
||||
The number of times to retry. Retry wait time will be exponentially increased based on the number of failed attempts.
|
||||
|
||||
**--src-username**
|
||||
|
||||
The username to access the source registry.
|
||||
|
||||
**--src-password**
|
||||
|
||||
The password to access the source registry.
|
||||
|
||||
**--dest-username**
|
||||
|
||||
The username to access the destination registry.
|
||||
|
||||
**--dest-password**
|
||||
|
||||
The password to access the destination registry.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To just copy an image from one registry to another:
|
||||
|
||||
@@ -64,6 +64,14 @@ Directory to use to share blobs across OCI repositories.
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon. Default to registry.conf setting.
|
||||
|
||||
**--username**
|
||||
|
||||
The username to access the registry.
|
||||
|
||||
**--password**
|
||||
|
||||
The password to access the registry.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
|
||||
@@ -69,6 +69,18 @@ Directory to use to share blobs across OCI repositories.
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon. Default to registry.conf setting.
|
||||
|
||||
**--username**
|
||||
|
||||
The username to access the registry.
|
||||
|
||||
**--password**
|
||||
|
||||
The password to access the registry.
|
||||
|
||||
**--no-tags**, **-n**=_bool_
|
||||
|
||||
Do not list the available tags from the repository in the output. When `true`, the `RepoTags` array will be empty. Defaults to `false`, which includes all available tags.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
@@ -98,6 +110,42 @@ $ skopeo inspect docker://docker.io/fedora
|
||||
}
|
||||
```
|
||||
|
||||
To inspect python from the docker.io registry and not show the available tags:
|
||||
```sh
|
||||
$ skopeo inspect --no-tags docker://docker.io/library/python
|
||||
{
|
||||
"Name": "docker.io/library/python",
|
||||
"Digest": "sha256:5ca194a80ddff913ea49c8154f38da66a41d2b73028c5cf7e46bc3c1d6fda572",
|
||||
"RepoTags": [],
|
||||
"Created": "2021-10-05T23:40:54.936108045Z",
|
||||
"DockerVersion": "20.10.7",
|
||||
"Labels": null,
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:df5590a8898bedd76f02205dc8caa5cc9863267dbcd8aac038bcd212688c1cc7",
|
||||
"sha256:705bb4cb554eb7751fd21a994f6f32aee582fbe5ea43037db6c43d321763992b",
|
||||
"sha256:519df5fceacdeaadeec563397b1d9f4d7c29c9f6eff879739cab6f0c144f49e1",
|
||||
"sha256:ccc287cbeddc96a0772397ca00ec85482a7b7f9a9fac643bfddd87b932f743db",
|
||||
"sha256:e3f8e6af58ed3a502f0c3c15dce636d9d362a742eb5b67770d0cfcb72f3a9884",
|
||||
"sha256:aebed27b2d86a5a3a2cbe186247911047a7e432b9d17daad8f226597c0ea4276",
|
||||
"sha256:54c32182bdcc3041bf64077428467109a70115888d03f7757dcf614ff6d95ebe",
|
||||
"sha256:cc8b7caedab13af07adf4836e13af2d4e9e54d794129b0fd4c83ece6b1112e86",
|
||||
"sha256:462c3718af1d5cdc050cfba102d06c26f78fe3b738ce2ca2eb248034b1738945"
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"LANG=C.UTF-8",
|
||||
"GPG_KEY=A035C8C19219BA821ECEA86B64E628F8D684696D",
|
||||
"PYTHON_VERSION=3.10.0",
|
||||
"PYTHON_PIP_VERSION=21.2.4",
|
||||
"PYTHON_SETUPTOOLS_VERSION=57.5.0",
|
||||
"PYTHON_GET_PIP_URL=https://github.com/pypa/get-pip/raw/d781367b97acf0ece7e9e304bf281e99b618bf10/public/get-pip.py",
|
||||
"PYTHON_GET_PIP_SHA256=01249aa3e58ffb3e1686b7141b4e9aac4d398ef4ac3012ed9dff8dd9f685ffe0"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
$ /bin/skopeo inspect --config docker://registry.fedoraproject.org/fedora --format "{{ .Architecture }}"
|
||||
amd64
|
||||
|
||||
@@ -43,6 +43,14 @@ The number of times to retry. Retry wait time will be exponentially increased ba
|
||||
|
||||
Require HTTPS and verify certificates when talking to the container registry or daemon. Default to registry.conf setting.
|
||||
|
||||
**--username**
|
||||
|
||||
The username to access the registry.
|
||||
|
||||
**--password**
|
||||
|
||||
The password to access the registry.
|
||||
|
||||
## REPOSITORY NAMES
|
||||
|
||||
Repository names are transport-specific references as each transport may have its own concept of a "repository" and "tags". Currently, only the Docker transport is supported.
|
||||
|
||||
@@ -91,6 +91,22 @@ Print usage statement.
|
||||
**--keep-going**
|
||||
If any errors occur during copying of images, those errors are logged and the process continues syncing rest of the images and finally fails at the end.
|
||||
|
||||
**--src-username**
|
||||
|
||||
The username to access the source registry.
|
||||
|
||||
**--src-password**
|
||||
|
||||
The password to access the source registry.
|
||||
|
||||
**--dest-username**
|
||||
|
||||
The username to access the destination registry.
|
||||
|
||||
**--dest-password**
|
||||
|
||||
The password to access the destination registry.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
### Synchronizing to a local directory
|
||||
|
||||
15
go.mod
15
go.mod
@@ -3,16 +3,12 @@ module github.com/containers/skopeo
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/bits-and-blooms/bitset v1.2.1 // indirect
|
||||
github.com/containerd/containerd v1.5.7 // indirect
|
||||
github.com/containers/common v0.46.0
|
||||
github.com/containers/image/v5 v5.16.1
|
||||
github.com/containers/common v0.46.1-0.20211026130826-7abfd453c86f
|
||||
github.com/containers/image/v5 v5.17.0
|
||||
github.com/containers/ocicrypt v1.1.2
|
||||
github.com/containers/storage v1.37.0
|
||||
github.com/docker/docker v20.10.9+incompatible
|
||||
github.com/docker/docker v20.10.11+incompatible
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
@@ -23,11 +19,6 @@ require (
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b // indirect
|
||||
golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211005153810-c76a74d43a8e // indirect
|
||||
google.golang.org/grpc v1.41.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
32
go.sum
32
go.sum
@@ -75,7 +75,6 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2
|
||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||
github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||
github.com/Microsoft/hcsshim v0.8.22 h1:CulZ3GW8sNJExknToo+RWD+U+6ZM5kkNfuxywSDPd08=
|
||||
github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0=
|
||||
@@ -110,8 +109,6 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bits-and-blooms/bitset v1.2.1 h1:M+/hrU9xlMp7t4TyTDQW97d3tRPVuKFC6zBEK16QnXY=
|
||||
github.com/bits-and-blooms/bitset v1.2.1/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
@@ -207,7 +204,6 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ
|
||||
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
|
||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.9.0 h1:PkB6BSTfOKX23erT2GkoUKkJEcXfNcyKskIViK770v8=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
@@ -229,11 +225,11 @@ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ
|
||||
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||
github.com/containers/common v0.46.0 h1:95zB7kYBQJW+aK5xxZnaobCwoPyYOf85Y0yUx0E5aRg=
|
||||
github.com/containers/common v0.46.0/go.mod h1:zxv7KjdYddSGoWuLUVp6eSb++Ow1zmSMB2jwxuNB4cU=
|
||||
github.com/containers/image/v5 v5.16.0/go.mod h1:XgTpfAPLRGOd1XYyCU5cISFr777bLmOerCSpt/v7+Q4=
|
||||
github.com/containers/image/v5 v5.16.1 h1:4786k48/af3dOkVf9EM+xB880ArkXalICsGC4AXC6to=
|
||||
github.com/containers/common v0.46.1-0.20211026130826-7abfd453c86f h1:jFFIV8QvsPgwkJHh3tjfREFRwSeMq5M8lt8vklkZaOk=
|
||||
github.com/containers/common v0.46.1-0.20211026130826-7abfd453c86f/go.mod h1:pVvmLTLCOZE300e4rex/QDmpnRmEM/5aZ/YfCkkjgZo=
|
||||
github.com/containers/image/v5 v5.16.1/go.mod h1:mCvIFdzyyP1B0NBcZ80OIuaYqFn/OpFpaOMOMn1kU2M=
|
||||
github.com/containers/image/v5 v5.17.0 h1:KS5pro80CCsSp5qDBTMmSAWQo+xcBX19zUPExmYX2OQ=
|
||||
github.com/containers/image/v5 v5.17.0/go.mod h1:GnYVusVRFPMMTAAUkrcS8NNSpBp8oyrjOUe04AAmRr4=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b h1:Q8ePgVfHDplZ7U33NwHZkrVELsZP5fYj9pM5WBZB2GE=
|
||||
github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
@@ -241,8 +237,6 @@ github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgU
|
||||
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/containers/ocicrypt v1.1.2 h1:Ez+GAMP/4GLix5Ywo/fL7O0nY771gsBIigiqUm1aXz0=
|
||||
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/containers/storage v1.35.0/go.mod h1:qzYhasQP2/V9D9XdO+vRwkHBhsBO0oznMLzzRDQ8s20=
|
||||
github.com/containers/storage v1.36.0/go.mod h1:vbd3SKVQNHdmU5qQI6hTEcKPxnZkGqydG4f6uwrI5a8=
|
||||
github.com/containers/storage v1.37.0 h1:HVhDsur6sx889ZIZ1d1kEiOzv3gsr5q0diX2VZmOdSg=
|
||||
github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c+Q/45RlH6r4=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
@@ -285,8 +279,9 @@ github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.9+incompatible h1:JlsVnETOjM2RLQa0Cc1XCIspUdXW3Zenq9P54uXBm6k=
|
||||
github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo=
|
||||
github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
|
||||
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
@@ -399,7 +394,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -517,8 +511,6 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
@@ -544,6 +536,7 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
|
||||
github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
@@ -610,8 +603,9 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
@@ -651,9 +645,9 @@ github.com/opencontainers/runtime-tools v0.9.0/go.mod h1:r3f7wjNzSs2extwzU3Y+6pK
|
||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||
github.com/opencontainers/selinux v1.8.4/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
|
||||
github.com/opencontainers/selinux v1.8.5 h1:OkT6bMHOQ1JQQO4ihjQ49sj0+wciDcjziSVTRn8VeTA=
|
||||
github.com/opencontainers/selinux v1.8.5/go.mod h1:HTvjPFoGMbpQsG886e3lQwnsRWtE4TC1OF3OUvG9FAo=
|
||||
github.com/opencontainers/selinux v1.9.1 h1:b4VPEF3O5JLZgdTDBmGepaaIbAo0GqoF6EBRq5f/g3Y=
|
||||
github.com/opencontainers/selinux v1.9.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913 h1:TnbXhKzrTOyuvWrjI8W6pcoI9XPbLHFXCdN2dtUw7Rw=
|
||||
github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
@@ -789,7 +783,6 @@ github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
|
||||
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
|
||||
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
|
||||
github.com/vbauerster/mpb/v7 v7.1.3/go.mod h1:X5GlohZw2fIpypMXWaKart+HGSAjpz49skxkDk+ZL7c=
|
||||
github.com/vbauerster/mpb/v7 v7.1.5 h1:vtUEUfQHmNeJETyF4AcRCOV6RC4wqFwNORy52UMXPbQ=
|
||||
github.com/vbauerster/mpb/v7 v7.1.5/go.mod h1:4M8+qAoQqV60WDNktBM5k05i1iTrXE7rjKOHEVkVlec=
|
||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||
@@ -1055,7 +1048,6 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs=
|
||||
|
||||
56
install.md
56
install.md
@@ -1,4 +1,4 @@
|
||||
# Installing from packages
|
||||
# Installing Skopeo
|
||||
|
||||
## Distribution Packages
|
||||
`skopeo` may already be packaged in your distribution.
|
||||
@@ -91,6 +91,20 @@ request](https://github.com/containers/skopeo/issues/715) and contributions are
|
||||
always welcome.
|
||||
|
||||
|
||||
## Container Images
|
||||
|
||||
Skopeo container images are available at `quay.io/skopeo/stable:latest`.
|
||||
For example,
|
||||
|
||||
```bash
|
||||
podman run docker://quay.io/skopeo/stable:latest copy --help
|
||||
```
|
||||
|
||||
[Read more](./contrib/skopeoimage/README.md).
|
||||
|
||||
|
||||
## Building from Source
|
||||
|
||||
Otherwise, read on for building and installing it from source:
|
||||
|
||||
To build the `skopeo` binary you need at least Go 1.12.
|
||||
@@ -98,8 +112,6 @@ To build the `skopeo` binary you need at least Go 1.12.
|
||||
There are two ways to build skopeo: in a container, or locally without a
|
||||
container. Choose the one which better matches your needs and environment.
|
||||
|
||||
## Building from Source
|
||||
|
||||
### Building without a container
|
||||
|
||||
Building without a container requires a bit more manual work and setup in your
|
||||
@@ -191,3 +203,41 @@ Finally, after the binary and documentation is built:
|
||||
```bash
|
||||
sudo make install
|
||||
```
|
||||
|
||||
### Building a static binary
|
||||
|
||||
There have been efforts in the past to produce and maintain static builds, but the maintainers prefer to run Skopeo using distro packages or within containers. This is because static builds of Skopeo tend to be unreliable and functionally restricted. Specifically:
|
||||
- Some features of Skopeo depend on non-Go libraries like `libgpgme` and `libdevmapper`.
|
||||
- Generating static Go binaries uses native Go libraries, which don't support e.g. `.local` or LDAP-based name resolution.
|
||||
|
||||
That being said, if you would like to build Skopeo statically, you might be able to do it by combining all the following steps.
|
||||
- Export environment variable `CGO_ENABLED=0` (disabling CGO causes Go to prefer native libraries when possible, instead of dynamically linking against system libraries).
|
||||
- Set the `BUILDTAGS=containers_image_openpgp` Make variable (this remove the dependency on `libgpgme` and its companion libraries).
|
||||
- Clear the `GO_DYN_FLAGS` Make variable (which otherwise seems to force the creation of a dynamic executable).
|
||||
|
||||
The following command implements these steps to produce a static binary in the `bin` subdirectory of the repository:
|
||||
|
||||
```bash
|
||||
docker run -v $PWD:/src -w /src -e CGO_ENABLED=0 golang \
|
||||
make BUILDTAGS=containers_image_openpgp GO_DYN_FLAGS=
|
||||
```
|
||||
|
||||
Keep in mind that the resulting binary is unsupported and might crash randomly. Only use if you know what you're doing!
|
||||
|
||||
For more information, history, and context about static builds, check the following issues:
|
||||
|
||||
- [#391] - Consider distributing statically built binaries as part of release
|
||||
- [#669] - Static build fails with segmentation violation
|
||||
- [#670] - Fixing static binary build using container
|
||||
- [#755] - Remove static and in-container targets from Makefile
|
||||
- [#932] - Add nix derivation for static builds
|
||||
- [#1336] - Unable to run skopeo on Fedora 30 (due to dyn lib dependency)
|
||||
- [#1478] - Publish binary releases to GitHub (request+discussion)
|
||||
|
||||
[#391]: https://github.com/containers/skopeo/issues/391
|
||||
[#669]: https://github.com/containers/skopeo/issues/669
|
||||
[#670]: https://github.com/containers/skopeo/issues/670
|
||||
[#755]: https://github.com/containers/skopeo/issues/755
|
||||
[#932]: https://github.com/containers/skopeo/issues/932
|
||||
[#1336]: https://github.com/containers/skopeo/issues/1336
|
||||
[#1478]: https://github.com/containers/skopeo/issues/1478
|
||||
|
||||
12
integration/procutils.go
Normal file
12
integration/procutils.go
Normal file
@@ -0,0 +1,12 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// cmdLifecycleToParentIfPossible tries to exit if the parent process exits (only works on Linux)
|
||||
func cmdLifecycleToParentIfPossible(c *exec.Cmd) {
|
||||
}
|
||||
14
integration/procutils_linux.go
Normal file
14
integration/procutils_linux.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// cmdLifecyleToParentIfPossible is a thin wrapper around prctl(PR_SET_PDEATHSIG)
|
||||
// on Linux.
|
||||
func cmdLifecycleToParentIfPossible(c *exec.Cmd) {
|
||||
c.SysProcAttr = &syscall.SysProcAttr{
|
||||
Pdeathsig: syscall.SIGTERM,
|
||||
}
|
||||
}
|
||||
288
integration/proxy_test.go
Normal file
288
integration/proxy_test.go
Normal file
@@ -0,0 +1,288 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"gopkg.in/check.v1"
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// This image is known to be x86_64 only right now
|
||||
const knownNotManifestListedImage_x8664 = "docker://quay.io/coreos/11bot"
|
||||
|
||||
const expectedProxySemverMajor = "0.2"
|
||||
|
||||
// request is copied from proxy.go
|
||||
// We intentionally copy to ensure that we catch any unexpected "API" changes
|
||||
// in the JSON.
|
||||
type request struct {
|
||||
// Method is the name of the function
|
||||
Method string `json:"method"`
|
||||
// Args is the arguments (parsed inside the fuction)
|
||||
Args []interface{} `json:"args"`
|
||||
}
|
||||
|
||||
// reply is copied from proxy.go
|
||||
type reply struct {
|
||||
// Success is true if and only if the call succeeded.
|
||||
Success bool `json:"success"`
|
||||
// Value is an arbitrary value (or values, as array/map) returned from the call.
|
||||
Value interface{} `json:"value"`
|
||||
// PipeID is an index into open pipes, and should be passed to FinishPipe
|
||||
PipeID uint32 `json:"pipeid"`
|
||||
// Error should be non-empty if Success == false
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
// maxMsgSize is also copied from proxy.go
|
||||
const maxMsgSize = 32 * 1024
|
||||
|
||||
type proxy struct {
|
||||
c *net.UnixConn
|
||||
}
|
||||
|
||||
type pipefd struct {
|
||||
// id is the remote identifier "pipeid"
|
||||
id uint
|
||||
fd *os.File
|
||||
}
|
||||
|
||||
func (self *proxy) call(method string, args []interface{}) (rval interface{}, fd *pipefd, err error) {
|
||||
req := request{
|
||||
Method: method,
|
||||
Args: args,
|
||||
}
|
||||
reqbuf, err := json.Marshal(&req)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n, err := self.c.Write(reqbuf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if n != len(reqbuf) {
|
||||
err = fmt.Errorf("short write during call of %d bytes", n)
|
||||
return
|
||||
}
|
||||
oob := make([]byte, syscall.CmsgSpace(1))
|
||||
replybuf := make([]byte, maxMsgSize)
|
||||
n, oobn, _, _, err := self.c.ReadMsgUnix(replybuf, oob)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("reading reply: %v", err)
|
||||
return
|
||||
}
|
||||
var reply reply
|
||||
err = json.Unmarshal(replybuf[0:n], &reply)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to parse reply: %w", err)
|
||||
return
|
||||
}
|
||||
if !reply.Success {
|
||||
err = fmt.Errorf("remote error: %s", reply.Error)
|
||||
return
|
||||
}
|
||||
|
||||
if reply.PipeID > 0 {
|
||||
var scms []syscall.SocketControlMessage
|
||||
scms, err = syscall.ParseSocketControlMessage(oob[:oobn])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to parse control message: %v", err)
|
||||
return
|
||||
}
|
||||
if len(scms) != 1 {
|
||||
err = fmt.Errorf("Expected 1 received fd, found %d", len(scms))
|
||||
return
|
||||
}
|
||||
var fds []int
|
||||
fds, err = syscall.ParseUnixRights(&scms[0])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to parse unix rights: %v", err)
|
||||
return
|
||||
}
|
||||
fd = &pipefd{
|
||||
fd: os.NewFile(uintptr(fds[0]), "replyfd"),
|
||||
id: uint(reply.PipeID),
|
||||
}
|
||||
}
|
||||
|
||||
rval = reply.Value
|
||||
return
|
||||
}
|
||||
|
||||
func (self *proxy) callNoFd(method string, args []interface{}) (rval interface{}, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = self.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if fd != nil {
|
||||
err = fmt.Errorf("Unexpected fd from method %s", method)
|
||||
return
|
||||
}
|
||||
return rval, nil
|
||||
}
|
||||
|
||||
func (self *proxy) callReadAllBytes(method string, args []interface{}) (rval interface{}, buf []byte, err error) {
|
||||
var fd *pipefd
|
||||
rval, fd, err = self.call(method, args)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if fd == nil {
|
||||
err = fmt.Errorf("Expected fd from method %s", method)
|
||||
return
|
||||
}
|
||||
fetchchan := make(chan byteFetch)
|
||||
go func() {
|
||||
manifestBytes, err := ioutil.ReadAll(fd.fd)
|
||||
fetchchan <- byteFetch{
|
||||
content: manifestBytes,
|
||||
err: err,
|
||||
}
|
||||
}()
|
||||
_, err = self.callNoFd("FinishPipe", []interface{}{fd.id})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case fetchRes := <-fetchchan:
|
||||
err = fetchRes.err
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf = fetchRes.content
|
||||
case <-time.After(5 * time.Minute):
|
||||
err = fmt.Errorf("timed out during proxy fetch")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func newProxy() (*proxy, error) {
|
||||
fds, err := syscall.Socketpair(syscall.AF_LOCAL, syscall.SOCK_SEQPACKET, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
myfd := os.NewFile(uintptr(fds[0]), "myfd")
|
||||
defer myfd.Close()
|
||||
theirfd := os.NewFile(uintptr(fds[1]), "theirfd")
|
||||
defer theirfd.Close()
|
||||
|
||||
mysock, err := net.FileConn(myfd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Note ExtraFiles starts at 3
|
||||
proc := exec.Command("skopeo", "experimental-image-proxy", "--sockfd", "3")
|
||||
proc.Stderr = os.Stderr
|
||||
cmdLifecycleToParentIfPossible(proc)
|
||||
proc.ExtraFiles = append(proc.ExtraFiles, theirfd)
|
||||
|
||||
if err = proc.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p := &proxy{
|
||||
c: mysock.(*net.UnixConn),
|
||||
}
|
||||
|
||||
v, err := p.callNoFd("Initialize", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
semver, ok := v.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("proxy Initialize: Unexpected value %T", v)
|
||||
}
|
||||
if !strings.HasPrefix(semver, expectedProxySemverMajor) {
|
||||
return nil, fmt.Errorf("Unexpected semver %s", semver)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
check.Suite(&ProxySuite{})
|
||||
}
|
||||
|
||||
type ProxySuite struct {
|
||||
}
|
||||
|
||||
func (s *ProxySuite) SetUpSuite(c *check.C) {
|
||||
}
|
||||
|
||||
func (s *ProxySuite) TearDownSuite(c *check.C) {
|
||||
}
|
||||
|
||||
type byteFetch struct {
|
||||
content []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func runTestGetManifestAndConfig(p *proxy, img string) error {
|
||||
v, err := p.callNoFd("OpenImage", []interface{}{knownNotManifestListedImage_x8664})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imgidv, ok := v.(float64)
|
||||
if !ok {
|
||||
return fmt.Errorf("OpenImage return value is %T", v)
|
||||
}
|
||||
imgid := uint32(imgidv)
|
||||
|
||||
v, manifestBytes, err := p.callReadAllBytes("GetManifest", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = manifest.OCI1FromManifest(manifestBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
v, configBytes, err := p.callReadAllBytes("GetConfig", []interface{}{imgid})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var config imgspecv1.ImageConfig
|
||||
err = json.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate that the config seems sane
|
||||
if len(config.Cmd) == 0 && len(config.Entrypoint) == 0 {
|
||||
return fmt.Errorf("No CMD or ENTRYPOINT set")
|
||||
}
|
||||
|
||||
_, err = p.callNoFd("CloseImage", []interface{}{imgid})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ProxySuite) TestProxy(c *check.C) {
|
||||
p, err := newProxy()
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = runTestGetManifestAndConfig(p, knownNotManifestListedImage_x8664)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Testing image %s: %v", knownNotManifestListedImage_x8664, err)
|
||||
}
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
err = runTestGetManifestAndConfig(p, knownListImage)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Testing image %s: %v", knownListImage, err)
|
||||
}
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
@@ -108,4 +108,15 @@ END_EXPECT
|
||||
"os - variant - architecture of $img"
|
||||
}
|
||||
|
||||
@test "inspect: don't list tags" {
|
||||
remote_image=docker://quay.io/fedora/fedora
|
||||
# use --no-tags to not list any tags
|
||||
run_skopeo inspect --no-tags $remote_image
|
||||
inspect_output=$output
|
||||
# extract the content of "RepoTags" property from the JSON output
|
||||
repo_tags=$(jq '.RepoTags[]' <<<"$inspect_output")
|
||||
# verify that the RepoTags was empty
|
||||
expect_output --from="$repo_tags" "" "inspect --no-tags was expected to return empty RepoTags[]"
|
||||
}
|
||||
|
||||
# vim: filetype=sh
|
||||
|
||||
@@ -356,7 +356,7 @@ start_registry() {
|
||||
return
|
||||
fi
|
||||
|
||||
timeout=$(expr $timeout - 1)
|
||||
timeout=$(( timeout - 1 ))
|
||||
sleep 1
|
||||
done
|
||||
log_and_run $PODMAN logs $name
|
||||
|
||||
26
vendor/github.com/bits-and-blooms/bitset/.gitignore
generated
vendored
26
vendor/github.com/bits-and-blooms/bitset/.gitignore
generated
vendored
@@ -1,26 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
target
|
||||
37
vendor/github.com/bits-and-blooms/bitset/.travis.yml
generated
vendored
37
vendor/github.com/bits-and-blooms/bitset/.travis.yml
generated
vendored
@@ -1,37 +0,0 @@
|
||||
language: go
|
||||
|
||||
sudo: false
|
||||
|
||||
branches:
|
||||
except:
|
||||
- release
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- travis
|
||||
|
||||
go:
|
||||
- "1.11.x"
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
before_install:
|
||||
- if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi;
|
||||
- if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi;
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
before_script:
|
||||
- make deps
|
||||
|
||||
script:
|
||||
- make qa
|
||||
|
||||
after_failure:
|
||||
- cat ./target/test/report.xml
|
||||
|
||||
after_success:
|
||||
- if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi;
|
||||
27
vendor/github.com/bits-and-blooms/bitset/LICENSE
generated
vendored
27
vendor/github.com/bits-and-blooms/bitset/LICENSE
generated
vendored
@@ -1,27 +0,0 @@
|
||||
Copyright (c) 2014 Will Fitzgerald. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
93
vendor/github.com/bits-and-blooms/bitset/README.md
generated
vendored
93
vendor/github.com/bits-and-blooms/bitset/README.md
generated
vendored
@@ -1,93 +0,0 @@
|
||||
# bitset
|
||||
|
||||
*Go language library to map between non-negative integers and boolean values*
|
||||
|
||||
[](https://github.com/willf/bitset/actions?query=workflow%3ATest)
|
||||
[](https://goreportcard.com/report/github.com/willf/bitset)
|
||||
[](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc)
|
||||
|
||||
|
||||
## Description
|
||||
|
||||
Package bitset implements bitsets, a mapping between non-negative integers and boolean values.
|
||||
It should be more efficient than map[uint] bool.
|
||||
|
||||
It provides methods for setting, clearing, flipping, and testing individual integers.
|
||||
|
||||
But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits.
|
||||
|
||||
BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used.
|
||||
|
||||
Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining.
|
||||
|
||||
### Example use:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
|
||||
"github.com/bits-and-blooms/bitset"
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Printf("Hello from BitSet!\n")
|
||||
var b bitset.BitSet
|
||||
// play some Go Fish
|
||||
for i := 0; i < 100; i++ {
|
||||
card1 := uint(rand.Intn(52))
|
||||
card2 := uint(rand.Intn(52))
|
||||
b.Set(card1)
|
||||
if b.Test(card2) {
|
||||
fmt.Println("Go Fish!")
|
||||
}
|
||||
b.Clear(card1)
|
||||
}
|
||||
|
||||
// Chaining
|
||||
b.Set(10).Set(11)
|
||||
|
||||
for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) {
|
||||
fmt.Println("The following bit is set:", i)
|
||||
}
|
||||
if b.Intersection(bitset.New(100).Set(10)).Count() == 1 {
|
||||
fmt.Println("Intersection works.")
|
||||
} else {
|
||||
fmt.Println("Intersection doesn't work???")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets.
|
||||
|
||||
Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc
|
||||
|
||||
## Memory Usage
|
||||
|
||||
The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring).
|
||||
|
||||
## Implementation Note
|
||||
|
||||
Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed.
|
||||
|
||||
It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
go get github.com/bits-and-blooms/bitset
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)")
|
||||
|
||||
## Running all tests
|
||||
|
||||
Before committing the code, please check if it passes tests, has adequate coverage, etc.
|
||||
```bash
|
||||
go test
|
||||
go test -cover
|
||||
```
|
||||
39
vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml
generated
vendored
39
vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml
generated
vendored
@@ -1,39 +0,0 @@
|
||||
# Go
|
||||
# Build your Go project.
|
||||
# Add steps that test, save build artifacts, deploy, and more:
|
||||
# https://docs.microsoft.com/azure/devops/pipelines/languages/go
|
||||
|
||||
trigger:
|
||||
- master
|
||||
|
||||
pool:
|
||||
vmImage: 'Ubuntu-16.04'
|
||||
|
||||
variables:
|
||||
GOBIN: '$(GOPATH)/bin' # Go binaries path
|
||||
GOROOT: '/usr/local/go1.11' # Go installation path
|
||||
GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path
|
||||
modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code
|
||||
|
||||
steps:
|
||||
- script: |
|
||||
mkdir -p '$(GOBIN)'
|
||||
mkdir -p '$(GOPATH)/pkg'
|
||||
mkdir -p '$(modulePath)'
|
||||
shopt -s extglob
|
||||
shopt -s dotglob
|
||||
mv !(gopath) '$(modulePath)'
|
||||
echo '##vso[task.prependpath]$(GOBIN)'
|
||||
echo '##vso[task.prependpath]$(GOROOT)/bin'
|
||||
displayName: 'Set up the Go workspace'
|
||||
|
||||
- script: |
|
||||
go version
|
||||
go get -v -t -d ./...
|
||||
if [ -f Gopkg.toml ]; then
|
||||
curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||
dep ensure
|
||||
fi
|
||||
go build -v .
|
||||
workingDirectory: '$(modulePath)'
|
||||
displayName: 'Get dependencies, then build'
|
||||
954
vendor/github.com/bits-and-blooms/bitset/bitset.go
generated
vendored
954
vendor/github.com/bits-and-blooms/bitset/bitset.go
generated
vendored
@@ -1,954 +0,0 @@
|
||||
/*
|
||||
Package bitset implements bitsets, a mapping
|
||||
between non-negative integers and boolean values. It should be more
|
||||
efficient than map[uint] bool.
|
||||
|
||||
It provides methods for setting, clearing, flipping, and testing
|
||||
individual integers.
|
||||
|
||||
But it also provides set intersection, union, difference,
|
||||
complement, and symmetric operations, as well as tests to
|
||||
check whether any, all, or no bits are set, and querying a
|
||||
bitset's current length and number of positive bits.
|
||||
|
||||
BitSets are expanded to the size of the largest set bit; the
|
||||
memory allocation is approximately Max bits, where Max is
|
||||
the largest set bit. BitSets are never shrunk. On creation,
|
||||
a hint can be given for the number of bits that will be used.
|
||||
|
||||
Many of the methods, including Set,Clear, and Flip, return
|
||||
a BitSet pointer, which allows for chaining.
|
||||
|
||||
Example use:
|
||||
|
||||
import "bitset"
|
||||
var b BitSet
|
||||
b.Set(10).Set(11)
|
||||
if b.Test(1000) {
|
||||
b.Clear(1000)
|
||||
}
|
||||
if B.Intersection(bitset.New(100).Set(10)).Count() > 1 {
|
||||
fmt.Println("Intersection works.")
|
||||
}
|
||||
|
||||
As an alternative to BitSets, one should check out the 'big' package,
|
||||
which provides a (less set-theoretical) view of bitsets.
|
||||
|
||||
*/
|
||||
package bitset
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// the wordSize of a bit set
|
||||
const wordSize = uint(64)
|
||||
|
||||
// log2WordSize is lg(wordSize)
|
||||
const log2WordSize = uint(6)
|
||||
|
||||
// allBits has every bit set
|
||||
const allBits uint64 = 0xffffffffffffffff
|
||||
|
||||
// default binary BigEndian
|
||||
var binaryOrder binary.ByteOrder = binary.BigEndian
|
||||
|
||||
// default json encoding base64.URLEncoding
|
||||
var base64Encoding = base64.URLEncoding
|
||||
|
||||
// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding)
|
||||
func Base64StdEncoding() { base64Encoding = base64.StdEncoding }
|
||||
|
||||
// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian)
|
||||
func LittleEndian() { binaryOrder = binary.LittleEndian }
|
||||
|
||||
// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0.
|
||||
type BitSet struct {
|
||||
length uint
|
||||
set []uint64
|
||||
}
|
||||
|
||||
// Error is used to distinguish errors (panics) generated in this package.
|
||||
type Error string
|
||||
|
||||
// safeSet will fixup b.set to be non-nil and return the field value
|
||||
func (b *BitSet) safeSet() []uint64 {
|
||||
if b.set == nil {
|
||||
b.set = make([]uint64, wordsNeeded(0))
|
||||
}
|
||||
return b.set
|
||||
}
|
||||
|
||||
// From is a constructor used to create a BitSet from an array of integers
|
||||
func From(buf []uint64) *BitSet {
|
||||
return &BitSet{uint(len(buf)) * 64, buf}
|
||||
}
|
||||
|
||||
// Bytes returns the bitset as array of integers
|
||||
func (b *BitSet) Bytes() []uint64 {
|
||||
return b.set
|
||||
}
|
||||
|
||||
// wordsNeeded calculates the number of words needed for i bits
|
||||
func wordsNeeded(i uint) int {
|
||||
if i > (Cap() - wordSize + 1) {
|
||||
return int(Cap() >> log2WordSize)
|
||||
}
|
||||
return int((i + (wordSize - 1)) >> log2WordSize)
|
||||
}
|
||||
|
||||
// New creates a new BitSet with a hint that length bits will be required
|
||||
func New(length uint) (bset *BitSet) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
bset = &BitSet{
|
||||
0,
|
||||
make([]uint64, 0),
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
bset = &BitSet{
|
||||
length,
|
||||
make([]uint64, wordsNeeded(length)),
|
||||
}
|
||||
|
||||
return bset
|
||||
}
|
||||
|
||||
// Cap returns the total possible capacity, or number of bits
|
||||
func Cap() uint {
|
||||
return ^uint(0)
|
||||
}
|
||||
|
||||
// Len returns the number of bits in the BitSet.
|
||||
// Note the difference to method Count, see example.
|
||||
func (b *BitSet) Len() uint {
|
||||
return b.length
|
||||
}
|
||||
|
||||
// extendSetMaybe adds additional words to incorporate new bits if needed
|
||||
func (b *BitSet) extendSetMaybe(i uint) {
|
||||
if i >= b.length { // if we need more bits, make 'em
|
||||
if i >= Cap() {
|
||||
panic("You are exceeding the capacity")
|
||||
}
|
||||
nsize := wordsNeeded(i + 1)
|
||||
if b.set == nil {
|
||||
b.set = make([]uint64, nsize)
|
||||
} else if cap(b.set) >= nsize {
|
||||
b.set = b.set[:nsize] // fast resize
|
||||
} else if len(b.set) < nsize {
|
||||
newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x
|
||||
copy(newset, b.set)
|
||||
b.set = newset
|
||||
}
|
||||
b.length = i + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Test whether bit i is set.
|
||||
func (b *BitSet) Test(i uint) bool {
|
||||
if i >= b.length {
|
||||
return false
|
||||
}
|
||||
return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0
|
||||
}
|
||||
|
||||
// Set bit i to 1, the capacity of the bitset is automatically
|
||||
// increased accordingly.
|
||||
// If i>= Cap(), this function will panic.
|
||||
// Warning: using a very large value for 'i'
|
||||
// may lead to a memory shortage and a panic: the caller is responsible
|
||||
// for providing sensible parameters in line with their memory capacity.
|
||||
func (b *BitSet) Set(i uint) *BitSet {
|
||||
b.extendSetMaybe(i)
|
||||
b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1))
|
||||
return b
|
||||
}
|
||||
|
||||
// Clear bit i to 0
|
||||
func (b *BitSet) Clear(i uint) *BitSet {
|
||||
if i >= b.length {
|
||||
return b
|
||||
}
|
||||
b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1))
|
||||
return b
|
||||
}
|
||||
|
||||
// SetTo sets bit i to value.
|
||||
// If i>= Cap(), this function will panic.
|
||||
// Warning: using a very large value for 'i'
|
||||
// may lead to a memory shortage and a panic: the caller is responsible
|
||||
// for providing sensible parameters in line with their memory capacity.
|
||||
func (b *BitSet) SetTo(i uint, value bool) *BitSet {
|
||||
if value {
|
||||
return b.Set(i)
|
||||
}
|
||||
return b.Clear(i)
|
||||
}
|
||||
|
||||
// Flip bit at i.
|
||||
// If i>= Cap(), this function will panic.
|
||||
// Warning: using a very large value for 'i'
|
||||
// may lead to a memory shortage and a panic: the caller is responsible
|
||||
// for providing sensible parameters in line with their memory capacity.
|
||||
func (b *BitSet) Flip(i uint) *BitSet {
|
||||
if i >= b.length {
|
||||
return b.Set(i)
|
||||
}
|
||||
b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1))
|
||||
return b
|
||||
}
|
||||
|
||||
// FlipRange bit in [start, end).
|
||||
// If end>= Cap(), this function will panic.
|
||||
// Warning: using a very large value for 'end'
|
||||
// may lead to a memory shortage and a panic: the caller is responsible
|
||||
// for providing sensible parameters in line with their memory capacity.
|
||||
func (b *BitSet) FlipRange(start, end uint) *BitSet {
|
||||
if start >= end {
|
||||
return b
|
||||
}
|
||||
|
||||
b.extendSetMaybe(end - 1)
|
||||
var startWord uint = start >> log2WordSize
|
||||
var endWord uint = end >> log2WordSize
|
||||
b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1)))
|
||||
for i := startWord; i < endWord; i++ {
|
||||
b.set[i] = ^b.set[i]
|
||||
}
|
||||
b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1))
|
||||
return b
|
||||
}
|
||||
|
||||
// Shrink shrinks BitSet so that the provided value is the last possible
|
||||
// set value. It clears all bits > the provided index and reduces the size
|
||||
// and length of the set.
|
||||
//
|
||||
// Note that the parameter value is not the new length in bits: it is the
|
||||
// maximal value that can be stored in the bitset after the function call.
|
||||
// The new length in bits is the parameter value + 1. Thus it is not possible
|
||||
// to use this function to set the length to 0, the minimal value of the length
|
||||
// after this function call is 1.
|
||||
//
|
||||
// A new slice is allocated to store the new bits, so you may see an increase in
|
||||
// memory usage until the GC runs. Normally this should not be a problem, but if you
|
||||
// have an extremely large BitSet its important to understand that the old BitSet will
|
||||
// remain in memory until the GC frees it.
|
||||
func (b *BitSet) Shrink(lastbitindex uint) *BitSet {
|
||||
length := lastbitindex + 1
|
||||
idx := wordsNeeded(length)
|
||||
if idx > len(b.set) {
|
||||
return b
|
||||
}
|
||||
shrunk := make([]uint64, idx)
|
||||
copy(shrunk, b.set[:idx])
|
||||
b.set = shrunk
|
||||
b.length = length
|
||||
if length < 64 {
|
||||
b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1))))
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Compact shrinks BitSet to so that we preserve all set bits, while minimizing
|
||||
// memory usage. Compact calls Shrink.
|
||||
func (b *BitSet) Compact() *BitSet {
|
||||
idx := len(b.set) - 1
|
||||
for ; idx >= 0 && b.set[idx] == 0; idx-- {
|
||||
}
|
||||
newlength := uint((idx + 1) << log2WordSize)
|
||||
if newlength >= b.length {
|
||||
return b // nothing to do
|
||||
}
|
||||
if newlength > 0 {
|
||||
return b.Shrink(newlength - 1)
|
||||
}
|
||||
// We preserve one word
|
||||
return b.Shrink(63)
|
||||
}
|
||||
|
||||
// InsertAt takes an index which indicates where a bit should be
|
||||
// inserted. Then it shifts all the bits in the set to the left by 1, starting
|
||||
// from the given index position, and sets the index position to 0.
|
||||
//
|
||||
// Depending on the size of your BitSet, and where you are inserting the new entry,
|
||||
// this method could be extremely slow and in some cases might cause the entire BitSet
|
||||
// to be recopied.
|
||||
func (b *BitSet) InsertAt(idx uint) *BitSet {
|
||||
insertAtElement := (idx >> log2WordSize)
|
||||
|
||||
// if length of set is a multiple of wordSize we need to allocate more space first
|
||||
if b.isLenExactMultiple() {
|
||||
b.set = append(b.set, uint64(0))
|
||||
}
|
||||
|
||||
var i uint
|
||||
for i = uint(len(b.set) - 1); i > insertAtElement; i-- {
|
||||
// all elements above the position where we want to insert can simply by shifted
|
||||
b.set[i] <<= 1
|
||||
|
||||
// we take the most significant bit of the previous element and set it as
|
||||
// the least significant bit of the current element
|
||||
b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63
|
||||
}
|
||||
|
||||
// generate a mask to extract the data that we need to shift left
|
||||
// within the element where we insert a bit
|
||||
dataMask := ^(uint64(1)<<uint64(idx&(wordSize-1)) - 1)
|
||||
|
||||
// extract that data that we'll shift
|
||||
data := b.set[i] & dataMask
|
||||
|
||||
// set the positions of the data mask to 0 in the element where we insert
|
||||
b.set[i] &= ^dataMask
|
||||
|
||||
// shift data mask to the left and insert its data to the slice element
|
||||
b.set[i] |= data << 1
|
||||
|
||||
// add 1 to length of BitSet
|
||||
b.length++
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// String creates a string representation of the Bitmap
|
||||
func (b *BitSet) String() string {
|
||||
// follows code from https://github.com/RoaringBitmap/roaring
|
||||
var buffer bytes.Buffer
|
||||
start := []byte("{")
|
||||
buffer.Write(start)
|
||||
counter := 0
|
||||
i, e := b.NextSet(0)
|
||||
for e {
|
||||
counter = counter + 1
|
||||
// to avoid exhausting the memory
|
||||
if counter > 0x40000 {
|
||||
buffer.WriteString("...")
|
||||
break
|
||||
}
|
||||
buffer.WriteString(strconv.FormatInt(int64(i), 10))
|
||||
i, e = b.NextSet(i + 1)
|
||||
if e {
|
||||
buffer.WriteString(",")
|
||||
}
|
||||
}
|
||||
buffer.WriteString("}")
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// DeleteAt deletes the bit at the given index position from
|
||||
// within the bitset
|
||||
// All the bits residing on the left of the deleted bit get
|
||||
// shifted right by 1
|
||||
// The running time of this operation may potentially be
|
||||
// relatively slow, O(length)
|
||||
func (b *BitSet) DeleteAt(i uint) *BitSet {
|
||||
// the index of the slice element where we'll delete a bit
|
||||
deleteAtElement := i >> log2WordSize
|
||||
|
||||
// generate a mask for the data that needs to be shifted right
|
||||
// within that slice element that gets modified
|
||||
dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1)
|
||||
|
||||
// extract the data that we'll shift right from the slice element
|
||||
data := b.set[deleteAtElement] & dataMask
|
||||
|
||||
// set the masked area to 0 while leaving the rest as it is
|
||||
b.set[deleteAtElement] &= ^dataMask
|
||||
|
||||
// shift the previously extracted data to the right and then
|
||||
// set it in the previously masked area
|
||||
b.set[deleteAtElement] |= (data >> 1) & dataMask
|
||||
|
||||
// loop over all the consecutive slice elements to copy each
|
||||
// lowest bit into the highest position of the previous element,
|
||||
// then shift the entire content to the right by 1
|
||||
for i := int(deleteAtElement) + 1; i < len(b.set); i++ {
|
||||
b.set[i-1] |= (b.set[i] & 1) << 63
|
||||
b.set[i] >>= 1
|
||||
}
|
||||
|
||||
b.length = b.length - 1
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// NextSet returns the next bit set from the specified index,
|
||||
// including possibly the current index
|
||||
// along with an error code (true = valid, false = no set bit found)
|
||||
// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...}
|
||||
//
|
||||
// Users concerned with performance may want to use NextSetMany to
|
||||
// retrieve several values at once.
|
||||
func (b *BitSet) NextSet(i uint) (uint, bool) {
|
||||
x := int(i >> log2WordSize)
|
||||
if x >= len(b.set) {
|
||||
return 0, false
|
||||
}
|
||||
w := b.set[x]
|
||||
w = w >> (i & (wordSize - 1))
|
||||
if w != 0 {
|
||||
return i + trailingZeroes64(w), true
|
||||
}
|
||||
x = x + 1
|
||||
for x < len(b.set) {
|
||||
if b.set[x] != 0 {
|
||||
return uint(x)*wordSize + trailingZeroes64(b.set[x]), true
|
||||
}
|
||||
x = x + 1
|
||||
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// NextSetMany returns many next bit sets from the specified index,
|
||||
// including possibly the current index and up to cap(buffer).
|
||||
// If the returned slice has len zero, then no more set bits were found
|
||||
//
|
||||
// buffer := make([]uint, 256) // this should be reused
|
||||
// j := uint(0)
|
||||
// j, buffer = bitmap.NextSetMany(j, buffer)
|
||||
// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) {
|
||||
// for k := range buffer {
|
||||
// do something with buffer[k]
|
||||
// }
|
||||
// j += 1
|
||||
// }
|
||||
//
|
||||
//
|
||||
// It is possible to retrieve all set bits as follow:
|
||||
//
|
||||
// indices := make([]uint, bitmap.Count())
|
||||
// bitmap.NextSetMany(0, indices)
|
||||
//
|
||||
// However if bitmap.Count() is large, it might be preferable to
|
||||
// use several calls to NextSetMany, for performance reasons.
|
||||
func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) {
|
||||
myanswer := buffer
|
||||
capacity := cap(buffer)
|
||||
x := int(i >> log2WordSize)
|
||||
if x >= len(b.set) || capacity == 0 {
|
||||
return 0, myanswer[:0]
|
||||
}
|
||||
skip := i & (wordSize - 1)
|
||||
word := b.set[x] >> skip
|
||||
myanswer = myanswer[:capacity]
|
||||
size := int(0)
|
||||
for word != 0 {
|
||||
r := trailingZeroes64(word)
|
||||
t := word & ((^word) + 1)
|
||||
myanswer[size] = r + i
|
||||
size++
|
||||
if size == capacity {
|
||||
goto End
|
||||
}
|
||||
word = word ^ t
|
||||
}
|
||||
x++
|
||||
for idx, word := range b.set[x:] {
|
||||
for word != 0 {
|
||||
r := trailingZeroes64(word)
|
||||
t := word & ((^word) + 1)
|
||||
myanswer[size] = r + (uint(x+idx) << 6)
|
||||
size++
|
||||
if size == capacity {
|
||||
goto End
|
||||
}
|
||||
word = word ^ t
|
||||
}
|
||||
}
|
||||
End:
|
||||
if size > 0 {
|
||||
return myanswer[size-1], myanswer[:size]
|
||||
}
|
||||
return 0, myanswer[:0]
|
||||
}
|
||||
|
||||
// NextClear returns the next clear bit from the specified index,
|
||||
// including possibly the current index
|
||||
// along with an error code (true = valid, false = no bit found i.e. all bits are set)
|
||||
func (b *BitSet) NextClear(i uint) (uint, bool) {
|
||||
x := int(i >> log2WordSize)
|
||||
if x >= len(b.set) {
|
||||
return 0, false
|
||||
}
|
||||
w := b.set[x]
|
||||
w = w >> (i & (wordSize - 1))
|
||||
wA := allBits >> (i & (wordSize - 1))
|
||||
index := i + trailingZeroes64(^w)
|
||||
if w != wA && index < b.length {
|
||||
return index, true
|
||||
}
|
||||
x++
|
||||
for x < len(b.set) {
|
||||
index = uint(x)*wordSize + trailingZeroes64(^b.set[x])
|
||||
if b.set[x] != allBits && index < b.length {
|
||||
return index, true
|
||||
}
|
||||
x++
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// ClearAll clears the entire BitSet
|
||||
func (b *BitSet) ClearAll() *BitSet {
|
||||
if b != nil && b.set != nil {
|
||||
for i := range b.set {
|
||||
b.set[i] = 0
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// wordCount returns the number of words used in a bit set
|
||||
func (b *BitSet) wordCount() int {
|
||||
return len(b.set)
|
||||
}
|
||||
|
||||
// Clone this BitSet
|
||||
func (b *BitSet) Clone() *BitSet {
|
||||
c := New(b.length)
|
||||
if b.set != nil { // Clone should not modify current object
|
||||
copy(c.set, b.set)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Copy into a destination BitSet
|
||||
// Returning the size of the destination BitSet
|
||||
// like array copy
|
||||
func (b *BitSet) Copy(c *BitSet) (count uint) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
if b.set != nil { // Copy should not modify current object
|
||||
copy(c.set, b.set)
|
||||
}
|
||||
count = c.length
|
||||
if b.length < c.length {
|
||||
count = b.length
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Count (number of set bits).
|
||||
// Also known as "popcount" or "population count".
|
||||
func (b *BitSet) Count() uint {
|
||||
if b != nil && b.set != nil {
|
||||
return uint(popcntSlice(b.set))
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Equal tests the equivalence of two BitSets.
|
||||
// False if they are of different sizes, otherwise true
|
||||
// only if all the same bits are set
|
||||
func (b *BitSet) Equal(c *BitSet) bool {
|
||||
if c == nil || b == nil {
|
||||
return c == b
|
||||
}
|
||||
if b.length != c.length {
|
||||
return false
|
||||
}
|
||||
if b.length == 0 { // if they have both length == 0, then could have nil set
|
||||
return true
|
||||
}
|
||||
// testing for equality shoud not transform the bitset (no call to safeSet)
|
||||
|
||||
for p, v := range b.set {
|
||||
if c.set[p] != v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func panicIfNull(b *BitSet) {
|
||||
if b == nil {
|
||||
panic(Error("BitSet must not be null"))
|
||||
}
|
||||
}
|
||||
|
||||
// Difference of base set and other set
|
||||
// This is the BitSet equivalent of &^ (and not)
|
||||
func (b *BitSet) Difference(compare *BitSet) (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
result = b.Clone() // clone b (in case b is bigger than compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
result.set[i] = b.set[i] &^ compare.set[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DifferenceCardinality computes the cardinality of the differnce
|
||||
func (b *BitSet) DifferenceCardinality(compare *BitSet) uint {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
cnt := uint64(0)
|
||||
cnt += popcntMaskSlice(b.set[:l], compare.set[:l])
|
||||
cnt += popcntSlice(b.set[l:])
|
||||
return uint(cnt)
|
||||
}
|
||||
|
||||
// InPlaceDifference computes the difference of base set and other set
|
||||
// This is the BitSet equivalent of &^ (and not)
|
||||
func (b *BitSet) InPlaceDifference(compare *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
b.set[i] &^= compare.set[i]
|
||||
}
|
||||
}
|
||||
|
||||
// Convenience function: return two bitsets ordered by
|
||||
// increasing length. Note: neither can be nil
|
||||
func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) {
|
||||
if a.length <= b.length {
|
||||
ap, bp = a, b
|
||||
} else {
|
||||
ap, bp = b, a
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Intersection of base set and other set
|
||||
// This is the BitSet equivalent of & (and)
|
||||
func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
result = New(b.length)
|
||||
for i, word := range b.set {
|
||||
result.set[i] = word & compare.set[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// IntersectionCardinality computes the cardinality of the union
|
||||
func (b *BitSet) IntersectionCardinality(compare *BitSet) uint {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
cnt := popcntAndSlice(b.set, compare.set)
|
||||
return uint(cnt)
|
||||
}
|
||||
|
||||
// InPlaceIntersection destructively computes the intersection of
|
||||
// base set and the compare set.
|
||||
// This is the BitSet equivalent of & (and)
|
||||
func (b *BitSet) InPlaceIntersection(compare *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
b.set[i] &= compare.set[i]
|
||||
}
|
||||
for i := l; i < len(b.set); i++ {
|
||||
b.set[i] = 0
|
||||
}
|
||||
if compare.length > 0 {
|
||||
b.extendSetMaybe(compare.length - 1)
|
||||
}
|
||||
}
|
||||
|
||||
// Union of base set and other set
|
||||
// This is the BitSet equivalent of | (or)
|
||||
func (b *BitSet) Union(compare *BitSet) (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
result = compare.Clone()
|
||||
for i, word := range b.set {
|
||||
result.set[i] = word | compare.set[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// UnionCardinality computes the cardinality of the uniton of the base set
|
||||
// and the compare set.
|
||||
func (b *BitSet) UnionCardinality(compare *BitSet) uint {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
cnt := popcntOrSlice(b.set, compare.set)
|
||||
if len(compare.set) > len(b.set) {
|
||||
cnt += popcntSlice(compare.set[len(b.set):])
|
||||
}
|
||||
return uint(cnt)
|
||||
}
|
||||
|
||||
// InPlaceUnion creates the destructive union of base set and compare set.
|
||||
// This is the BitSet equivalent of | (or).
|
||||
func (b *BitSet) InPlaceUnion(compare *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
if compare.length > 0 {
|
||||
b.extendSetMaybe(compare.length - 1)
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
b.set[i] |= compare.set[i]
|
||||
}
|
||||
if len(compare.set) > l {
|
||||
for i := l; i < len(compare.set); i++ {
|
||||
b.set[i] = compare.set[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SymmetricDifference of base set and other set
|
||||
// This is the BitSet equivalent of ^ (xor)
|
||||
func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
// compare is bigger, so clone it
|
||||
result = compare.Clone()
|
||||
for i, word := range b.set {
|
||||
result.set[i] = word ^ compare.set[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference
|
||||
func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
b, compare = sortByLength(b, compare)
|
||||
cnt := popcntXorSlice(b.set, compare.set)
|
||||
if len(compare.set) > len(b.set) {
|
||||
cnt += popcntSlice(compare.set[len(b.set):])
|
||||
}
|
||||
return uint(cnt)
|
||||
}
|
||||
|
||||
// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set
|
||||
// This is the BitSet equivalent of ^ (xor)
|
||||
func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) {
|
||||
panicIfNull(b)
|
||||
panicIfNull(compare)
|
||||
l := int(compare.wordCount())
|
||||
if l > int(b.wordCount()) {
|
||||
l = int(b.wordCount())
|
||||
}
|
||||
if compare.length > 0 {
|
||||
b.extendSetMaybe(compare.length - 1)
|
||||
}
|
||||
for i := 0; i < l; i++ {
|
||||
b.set[i] ^= compare.set[i]
|
||||
}
|
||||
if len(compare.set) > l {
|
||||
for i := l; i < len(compare.set); i++ {
|
||||
b.set[i] = compare.set[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Is the length an exact multiple of word sizes?
|
||||
func (b *BitSet) isLenExactMultiple() bool {
|
||||
return b.length%wordSize == 0
|
||||
}
|
||||
|
||||
// Clean last word by setting unused bits to 0
|
||||
func (b *BitSet) cleanLastWord() {
|
||||
if !b.isLenExactMultiple() {
|
||||
b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Complement computes the (local) complement of a biset (up to length bits)
|
||||
func (b *BitSet) Complement() (result *BitSet) {
|
||||
panicIfNull(b)
|
||||
result = New(b.length)
|
||||
for i, word := range b.set {
|
||||
result.set[i] = ^word
|
||||
}
|
||||
result.cleanLastWord()
|
||||
return
|
||||
}
|
||||
|
||||
// All returns true if all bits are set, false otherwise. Returns true for
|
||||
// empty sets.
|
||||
func (b *BitSet) All() bool {
|
||||
panicIfNull(b)
|
||||
return b.Count() == b.length
|
||||
}
|
||||
|
||||
// None returns true if no bit is set, false otherwise. Returns true for
|
||||
// empty sets.
|
||||
func (b *BitSet) None() bool {
|
||||
panicIfNull(b)
|
||||
if b != nil && b.set != nil {
|
||||
for _, word := range b.set {
|
||||
if word > 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Any returns true if any bit is set, false otherwise
|
||||
func (b *BitSet) Any() bool {
|
||||
panicIfNull(b)
|
||||
return !b.None()
|
||||
}
|
||||
|
||||
// IsSuperSet returns true if this is a superset of the other set
|
||||
func (b *BitSet) IsSuperSet(other *BitSet) bool {
|
||||
for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) {
|
||||
if !b.Test(i) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// IsStrictSuperSet returns true if this is a strict superset of the other set
|
||||
func (b *BitSet) IsStrictSuperSet(other *BitSet) bool {
|
||||
return b.Count() > other.Count() && b.IsSuperSet(other)
|
||||
}
|
||||
|
||||
// DumpAsBits dumps a bit set as a string of bits
|
||||
func (b *BitSet) DumpAsBits() string {
|
||||
if b.set == nil {
|
||||
return "."
|
||||
}
|
||||
buffer := bytes.NewBufferString("")
|
||||
i := len(b.set) - 1
|
||||
for ; i >= 0; i-- {
|
||||
fmt.Fprintf(buffer, "%064b.", b.set[i])
|
||||
}
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// BinaryStorageSize returns the binary storage requirements
|
||||
func (b *BitSet) BinaryStorageSize() int {
|
||||
return binary.Size(uint64(0)) + binary.Size(b.set)
|
||||
}
|
||||
|
||||
// WriteTo writes a BitSet to a stream
|
||||
func (b *BitSet) WriteTo(stream io.Writer) (int64, error) {
|
||||
length := uint64(b.length)
|
||||
|
||||
// Write length
|
||||
err := binary.Write(stream, binaryOrder, length)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Write set
|
||||
err = binary.Write(stream, binaryOrder, b.set)
|
||||
return int64(b.BinaryStorageSize()), err
|
||||
}
|
||||
|
||||
// ReadFrom reads a BitSet from a stream written using WriteTo
|
||||
func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) {
|
||||
var length uint64
|
||||
|
||||
// Read length first
|
||||
err := binary.Read(stream, binaryOrder, &length)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
newset := New(uint(length))
|
||||
|
||||
if uint64(newset.length) != length {
|
||||
return 0, errors.New("unmarshalling error: type mismatch")
|
||||
}
|
||||
|
||||
// Read remaining bytes as set
|
||||
err = binary.Read(stream, binaryOrder, newset.set)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
*b = *newset
|
||||
return int64(b.BinaryStorageSize()), nil
|
||||
}
|
||||
|
||||
// MarshalBinary encodes a BitSet into a binary form and returns the result.
|
||||
func (b *BitSet) MarshalBinary() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
writer := bufio.NewWriter(&buf)
|
||||
|
||||
_, err := b.WriteTo(writer)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
err = writer.Flush()
|
||||
|
||||
return buf.Bytes(), err
|
||||
}
|
||||
|
||||
// UnmarshalBinary decodes the binary form generated by MarshalBinary.
|
||||
func (b *BitSet) UnmarshalBinary(data []byte) error {
|
||||
buf := bytes.NewReader(data)
|
||||
reader := bufio.NewReader(buf)
|
||||
|
||||
_, err := b.ReadFrom(reader)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// MarshalJSON marshals a BitSet as a JSON structure
|
||||
func (b *BitSet) MarshalJSON() ([]byte, error) {
|
||||
buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize()))
|
||||
_, err := b.WriteTo(buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// URLEncode all bytes
|
||||
return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes()))
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON
|
||||
func (b *BitSet) UnmarshalJSON(data []byte) error {
|
||||
// Unmarshal as string
|
||||
var s string
|
||||
err := json.Unmarshal(data, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// URLDecode string
|
||||
buf, err := base64Encoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = b.ReadFrom(bytes.NewReader(buf))
|
||||
return err
|
||||
}
|
||||
3
vendor/github.com/bits-and-blooms/bitset/go.mod
generated
vendored
3
vendor/github.com/bits-and-blooms/bitset/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module github.com/bits-and-blooms/bitset
|
||||
|
||||
go 1.14
|
||||
0
vendor/github.com/bits-and-blooms/bitset/go.sum
generated
vendored
0
vendor/github.com/bits-and-blooms/bitset/go.sum
generated
vendored
53
vendor/github.com/bits-and-blooms/bitset/popcnt.go
generated
vendored
53
vendor/github.com/bits-and-blooms/bitset/popcnt.go
generated
vendored
@@ -1,53 +0,0 @@
|
||||
package bitset
|
||||
|
||||
// bit population count, take from
|
||||
// https://code.google.com/p/go/issues/detail?id=4988#c11
|
||||
// credit: https://code.google.com/u/arnehormann/
|
||||
func popcount(x uint64) (n uint64) {
|
||||
x -= (x >> 1) & 0x5555555555555555
|
||||
x = (x>>2)&0x3333333333333333 + x&0x3333333333333333
|
||||
x += x >> 4
|
||||
x &= 0x0f0f0f0f0f0f0f0f
|
||||
x *= 0x0101010101010101
|
||||
return x >> 56
|
||||
}
|
||||
|
||||
func popcntSliceGo(s []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for _, x := range s {
|
||||
cnt += popcount(x)
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntMaskSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] &^ m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntAndSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] & m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntOrSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] | m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
|
||||
func popcntXorSliceGo(s, m []uint64) uint64 {
|
||||
cnt := uint64(0)
|
||||
for i := range s {
|
||||
cnt += popcount(s[i] ^ m[i])
|
||||
}
|
||||
return cnt
|
||||
}
|
||||
45
vendor/github.com/bits-and-blooms/bitset/popcnt_19.go
generated
vendored
45
vendor/github.com/bits-and-blooms/bitset/popcnt_19.go
generated
vendored
@@ -1,45 +0,0 @@
|
||||
// +build go1.9
|
||||
|
||||
package bitset
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func popcntSlice(s []uint64) uint64 {
|
||||
var cnt int
|
||||
for _, x := range s {
|
||||
cnt += bits.OnesCount64(x)
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] &^ m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntAndSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] & m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntOrSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] | m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
|
||||
func popcntXorSlice(s, m []uint64) uint64 {
|
||||
var cnt int
|
||||
for i := range s {
|
||||
cnt += bits.OnesCount64(s[i] ^ m[i])
|
||||
}
|
||||
return uint64(cnt)
|
||||
}
|
||||
68
vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go
generated
vendored
68
vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go
generated
vendored
@@ -1,68 +0,0 @@
|
||||
// +build !go1.9
|
||||
// +build amd64,!appengine
|
||||
|
||||
package bitset
|
||||
|
||||
// *** the following functions are defined in popcnt_amd64.s
|
||||
|
||||
//go:noescape
|
||||
|
||||
func hasAsm() bool
|
||||
|
||||
// useAsm is a flag used to select the GO or ASM implementation of the popcnt function
|
||||
var useAsm = hasAsm()
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntSliceAsm(s []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntMaskSliceAsm(s, m []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntAndSliceAsm(s, m []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntOrSliceAsm(s, m []uint64) uint64
|
||||
|
||||
//go:noescape
|
||||
|
||||
func popcntXorSliceAsm(s, m []uint64) uint64
|
||||
|
||||
func popcntSlice(s []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntSliceAsm(s)
|
||||
}
|
||||
return popcntSliceGo(s)
|
||||
}
|
||||
|
||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntMaskSliceAsm(s, m)
|
||||
}
|
||||
return popcntMaskSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntAndSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntAndSliceAsm(s, m)
|
||||
}
|
||||
return popcntAndSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntOrSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntOrSliceAsm(s, m)
|
||||
}
|
||||
return popcntOrSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntXorSlice(s, m []uint64) uint64 {
|
||||
if useAsm {
|
||||
return popcntXorSliceAsm(s, m)
|
||||
}
|
||||
return popcntXorSliceGo(s, m)
|
||||
}
|
||||
104
vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s
generated
vendored
104
vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s
generated
vendored
@@ -1,104 +0,0 @@
|
||||
// +build !go1.9
|
||||
// +build amd64,!appengine
|
||||
|
||||
TEXT ·hasAsm(SB),4,$0-1
|
||||
MOVQ $1, AX
|
||||
CPUID
|
||||
SHRQ $23, CX
|
||||
ANDQ $1, CX
|
||||
MOVB CX, ret+0(FP)
|
||||
RET
|
||||
|
||||
#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2
|
||||
|
||||
TEXT ·popcntSliceAsm(SB),4,$0-32
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntSliceEnd
|
||||
popcntSliceLoop:
|
||||
BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
LOOP popcntSliceLoop
|
||||
popcntSliceEnd:
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntMaskSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntMaskSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntMaskSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
NOTQ DX
|
||||
ANDQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntMaskSliceLoop
|
||||
popcntMaskSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntAndSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntAndSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntAndSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
ANDQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntAndSliceLoop
|
||||
popcntAndSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntOrSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntOrSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntOrSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
ORQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntOrSliceLoop
|
||||
popcntOrSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
|
||||
TEXT ·popcntXorSliceAsm(SB),4,$0-56
|
||||
XORQ AX, AX
|
||||
MOVQ s+0(FP), SI
|
||||
MOVQ s_len+8(FP), CX
|
||||
TESTQ CX, CX
|
||||
JZ popcntXorSliceEnd
|
||||
MOVQ m+24(FP), DI
|
||||
popcntXorSliceLoop:
|
||||
MOVQ (DI), DX
|
||||
XORQ (SI), DX
|
||||
POPCNTQ_DX_DX
|
||||
ADDQ DX, AX
|
||||
ADDQ $8, SI
|
||||
ADDQ $8, DI
|
||||
LOOP popcntXorSliceLoop
|
||||
popcntXorSliceEnd:
|
||||
MOVQ AX, ret+48(FP)
|
||||
RET
|
||||
24
vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go
generated
vendored
24
vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go
generated
vendored
@@ -1,24 +0,0 @@
|
||||
// +build !go1.9
|
||||
// +build !amd64 appengine
|
||||
|
||||
package bitset
|
||||
|
||||
func popcntSlice(s []uint64) uint64 {
|
||||
return popcntSliceGo(s)
|
||||
}
|
||||
|
||||
func popcntMaskSlice(s, m []uint64) uint64 {
|
||||
return popcntMaskSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntAndSlice(s, m []uint64) uint64 {
|
||||
return popcntAndSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntOrSlice(s, m []uint64) uint64 {
|
||||
return popcntOrSliceGo(s, m)
|
||||
}
|
||||
|
||||
func popcntXorSlice(s, m []uint64) uint64 {
|
||||
return popcntXorSliceGo(s, m)
|
||||
}
|
||||
14
vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go
generated
vendored
14
vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go
generated
vendored
@@ -1,14 +0,0 @@
|
||||
// +build !go1.9
|
||||
|
||||
package bitset
|
||||
|
||||
var deBruijn = [...]byte{
|
||||
0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4,
|
||||
62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5,
|
||||
63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11,
|
||||
54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6,
|
||||
}
|
||||
|
||||
func trailingZeroes64(v uint64) uint {
|
||||
return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58])
|
||||
}
|
||||
9
vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go
generated
vendored
9
vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go
generated
vendored
@@ -1,9 +0,0 @@
|
||||
// +build go1.9
|
||||
|
||||
package bitset
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func trailingZeroes64(v uint64) uint {
|
||||
return uint(bits.TrailingZeros64(v))
|
||||
}
|
||||
72
cmd/skopeo/flag.go → vendor/github.com/containers/common/pkg/flag/flag.go
generated
vendored
72
cmd/skopeo/flag.go → vendor/github.com/containers/common/pkg/flag/flag.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
package main
|
||||
package flag
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
@@ -6,19 +6,31 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// optionalBool is a boolean with a separate presence flag.
|
||||
type optionalBool struct {
|
||||
// OptionalBool is a boolean with a separate presence flag and value.
|
||||
type OptionalBool struct {
|
||||
present bool
|
||||
value bool
|
||||
}
|
||||
|
||||
// Present returns the bool's presence flag.
|
||||
func (ob *OptionalBool) Present() bool {
|
||||
return ob.present
|
||||
}
|
||||
|
||||
// Present returns the bool's value. Should only be used if Present() is true.
|
||||
func (ob *OptionalBool) Value() bool {
|
||||
return ob.value
|
||||
}
|
||||
|
||||
// optionalBool is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.Bool, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalBool to (pretend to) force callers to use
|
||||
// optionalBoolFlag
|
||||
type optionalBoolValue optionalBool
|
||||
type optionalBoolValue OptionalBool
|
||||
|
||||
func optionalBoolFlag(fs *pflag.FlagSet, p *optionalBool, name, usage string) *pflag.Flag {
|
||||
// OptionalBoolFlag creates new flag for an optional in the specified flag with
|
||||
// the specified name and usage.
|
||||
func OptionalBoolFlag(fs *pflag.FlagSet, p *OptionalBool, name, usage string) *pflag.Flag {
|
||||
flag := fs.VarPF(internalNewOptionalBoolValue(p), name, "", usage)
|
||||
flag.NoOptDefVal = "true"
|
||||
flag.DefValue = "false"
|
||||
@@ -27,11 +39,12 @@ func optionalBoolFlag(fs *pflag.FlagSet, p *optionalBool, name, usage string) *p
|
||||
|
||||
// WARNING: Do not directly use this method to define optionalBool flag.
|
||||
// Caller should use optionalBoolFlag
|
||||
func internalNewOptionalBoolValue(p *optionalBool) pflag.Value {
|
||||
func internalNewOptionalBoolValue(p *OptionalBool) pflag.Value {
|
||||
p.present = false
|
||||
return (*optionalBoolValue)(p)
|
||||
}
|
||||
|
||||
// Set parses the string to a bool and sets it.
|
||||
func (ob *optionalBoolValue) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
@@ -42,6 +55,7 @@ func (ob *optionalBoolValue) Set(s string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the string representation of the string.
|
||||
func (ob *optionalBoolValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag is interpreted as --flag=true
|
||||
@@ -49,37 +63,52 @@ func (ob *optionalBoolValue) String() string {
|
||||
return strconv.FormatBool(ob.value)
|
||||
}
|
||||
|
||||
// Type returns the type.
|
||||
func (ob *optionalBoolValue) Type() string {
|
||||
return "bool"
|
||||
}
|
||||
|
||||
// IsBoolFlag indicates that it's a bool flag.
|
||||
func (ob *optionalBoolValue) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// optionalString is a string with a separate presence flag.
|
||||
type optionalString struct {
|
||||
// OptionalString is a string with a separate presence flag.
|
||||
type OptionalString struct {
|
||||
present bool
|
||||
value string
|
||||
}
|
||||
|
||||
// Present returns the strings's presence flag.
|
||||
func (os *OptionalString) Present() bool {
|
||||
return os.present
|
||||
}
|
||||
|
||||
// Present returns the string's value. Should only be used if Present() is true.
|
||||
func (os *OptionalString) Value() string {
|
||||
return os.value
|
||||
}
|
||||
|
||||
// optionalString is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.String, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalString to (pretend to) force callers to use
|
||||
// newoptionalString
|
||||
type optionalStringValue optionalString
|
||||
type optionalStringValue OptionalString
|
||||
|
||||
func newOptionalStringValue(p *optionalString) pflag.Value {
|
||||
// NewOptionalStringValue returns a pflag.Value fo the string.
|
||||
func NewOptionalStringValue(p *OptionalString) pflag.Value {
|
||||
p.present = false
|
||||
return (*optionalStringValue)(p)
|
||||
}
|
||||
|
||||
// Set sets the string.
|
||||
func (ob *optionalStringValue) Set(s string) error {
|
||||
ob.value = s
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the string if present.
|
||||
func (ob *optionalStringValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag= is interpreted as {present:true, value:""}
|
||||
@@ -87,27 +116,40 @@ func (ob *optionalStringValue) String() string {
|
||||
return ob.value
|
||||
}
|
||||
|
||||
// Type returns the string type.
|
||||
func (ob *optionalStringValue) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
// optionalInt is a int with a separate presence flag.
|
||||
type optionalInt struct {
|
||||
// OptionalInt is a int with a separate presence flag.
|
||||
type OptionalInt struct {
|
||||
present bool
|
||||
value int
|
||||
}
|
||||
|
||||
// Present returns the int's presence flag.
|
||||
func (oi *OptionalInt) Present() bool {
|
||||
return oi.present
|
||||
}
|
||||
|
||||
// Present returns the int's value. Should only be used if Present() is true.
|
||||
func (oi *OptionalInt) Value() int {
|
||||
return oi.value
|
||||
}
|
||||
|
||||
// optionalInt is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.Int, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalInt to (pretend to) force callers to use
|
||||
// newoptionalIntValue
|
||||
type optionalIntValue optionalInt
|
||||
type optionalIntValue OptionalInt
|
||||
|
||||
func newOptionalIntValue(p *optionalInt) pflag.Value {
|
||||
// NewOptionalIntValue returns the pflag.Value of the int.
|
||||
func NewOptionalIntValue(p *OptionalInt) pflag.Value {
|
||||
p.present = false
|
||||
return (*optionalIntValue)(p)
|
||||
}
|
||||
|
||||
// Set parses the string to an int and sets it.
|
||||
func (ob *optionalIntValue) Set(s string) error {
|
||||
v, err := strconv.ParseInt(s, 0, strconv.IntSize)
|
||||
if err != nil {
|
||||
@@ -118,6 +160,7 @@ func (ob *optionalIntValue) Set(s string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns the string representation of the int.
|
||||
func (ob *optionalIntValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // If the value is not present, just return an empty string, any other value wouldn't make sense.
|
||||
@@ -125,6 +168,7 @@ func (ob *optionalIntValue) String() string {
|
||||
return strconv.Itoa(int(ob.value))
|
||||
}
|
||||
|
||||
// Type returns the int's type.
|
||||
func (ob *optionalIntValue) Type() string {
|
||||
return "int"
|
||||
}
|
||||
2
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
2
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
@@ -30,7 +30,7 @@ func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions
|
||||
if retryOptions.Delay != 0 {
|
||||
delay = retryOptions.Delay
|
||||
}
|
||||
logrus.Warnf("failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, retryOptions.MaxRetry, err)
|
||||
logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, retryOptions.MaxRetry, err)
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
break
|
||||
|
||||
40
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
40
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@@ -17,9 +17,9 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/streamdigest"
|
||||
"github.com/containers/image/v5/internal/uploadreader"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
@@ -131,11 +131,23 @@ func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
|
||||
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
|
||||
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
|
||||
if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests {
|
||||
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
|
||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer cleanup()
|
||||
stream = streamCopy
|
||||
}
|
||||
|
||||
if inputInfo.Digest != "" {
|
||||
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
|
||||
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
||||
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
|
||||
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, none.NoCache, false)
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, cache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -282,6 +294,21 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
}
|
||||
}
|
||||
|
||||
// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
|
||||
// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
|
||||
// The caller must ensure info.Digest is set.
|
||||
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (bool, types.BlobInfo, error) {
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
if exists {
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
|
||||
}
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
@@ -297,13 +324,12 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
|
||||
}
|
||||
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, cache)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
if exists {
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
|
||||
if haveBlob {
|
||||
return true, reusedInfo, nil
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
|
||||
22
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
22
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@@ -236,6 +236,9 @@ func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
|
||||
// This function can return nil reader when no url is supported by this function. In this case, the caller
|
||||
// should fallback to fetch the non-external blob (i.e. pull from the registry).
|
||||
func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
|
||||
var (
|
||||
resp *http.Response
|
||||
@@ -244,14 +247,17 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
if len(urls) == 0 {
|
||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
for _, url := range urls {
|
||||
for _, u := range urls {
|
||||
if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") {
|
||||
continue // unsupported url. skip this url.
|
||||
}
|
||||
// NOTE: we must not authenticate on additional URLs as those
|
||||
// can be abused to leak credentials or tokens. Please
|
||||
// refer to CVE-2020-15157 for more information.
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, url, nil, nil, -1, noAuth, nil)
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, http.MethodGet, u, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
logrus.Debug(err)
|
||||
resp.Body.Close()
|
||||
continue
|
||||
@@ -259,6 +265,9 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp == nil && err == nil {
|
||||
return nil, 0, nil // fallback to non-external blob
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -408,7 +417,12 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
return s.getExternalBlob(ctx, info.URLs)
|
||||
r, s, err := s.getExternalBlob(ctx, info.URLs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
} else if r != nil {
|
||||
return r, s, nil
|
||||
}
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
|
||||
|
||||
23
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
23
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@@ -5,13 +5,10 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/iolimits"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/internal/streamdigest"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@@ -98,25 +95,11 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest == "" {
|
||||
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||
streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(d.sysCtx), "docker-tarfile-blob")
|
||||
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer os.Remove(streamCopy.Name())
|
||||
defer streamCopy.Close()
|
||||
|
||||
digester, stream2 := putblobdigest.DigestIfUnknown(stream, inputInfo)
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
size, err := io.Copy(streamCopy, stream2)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
_, err = streamCopy.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
|
||||
inputInfo.Digest = digester.Digest()
|
||||
defer cleanup()
|
||||
stream = streamCopy
|
||||
logrus.Debugf("... streaming done")
|
||||
}
|
||||
|
||||
41
vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
generated
vendored
Normal file
41
vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
package streamdigest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo.
|
||||
// The temporary file is returned as an io.Reader along with a cleanup function.
|
||||
// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file.
|
||||
// If an error occurs, inputInfo is not modified.
|
||||
func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) {
|
||||
diskBlob, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err)
|
||||
}
|
||||
cleanup := func() {
|
||||
diskBlob.Close()
|
||||
os.Remove(diskBlob.Name())
|
||||
}
|
||||
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo)
|
||||
written, err := io.Copy(diskBlob, stream)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err)
|
||||
}
|
||||
_, err = diskBlob.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err)
|
||||
}
|
||||
inputInfo.Digest = digester.Digest()
|
||||
inputInfo.Size = written
|
||||
return diskBlob, cleanup, nil
|
||||
}
|
||||
67
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
67
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package manifest
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
@@ -32,6 +33,72 @@ func dupStringStringMap(m map[string]string) map[string]string {
|
||||
return result
|
||||
}
|
||||
|
||||
// allowedManifestFields is a bit mask of “essential” manifest fields that validateUnambiguousManifestFormat
|
||||
// can expect to be present.
|
||||
type allowedManifestFields int
|
||||
|
||||
const (
|
||||
allowedFieldConfig allowedManifestFields = 1 << iota
|
||||
allowedFieldFSLayers
|
||||
allowedFieldHistory
|
||||
allowedFieldLayers
|
||||
allowedFieldManifests
|
||||
allowedFieldFirstUnusedBit // Keep this at the end!
|
||||
)
|
||||
|
||||
// validateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than
|
||||
// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields
|
||||
// other than the ones the caller specifically allows.
|
||||
// expectedMIMEType is used only for diagnostics.
|
||||
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
|
||||
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambigous
|
||||
// data that just isn’t what was expected, as opposed to actually ambiguous data.
|
||||
func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
|
||||
allowed allowedManifestFields) error {
|
||||
if allowed >= allowedFieldFirstUnusedBit {
|
||||
return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed)
|
||||
}
|
||||
// Use a private type to decode, not just a map[string]interface{}, because we want
|
||||
// to also reject case-insensitive matches (which would be used by Go when really decoding
|
||||
// the manifest).
|
||||
// (It is expected that as manifest formats are added or extended over time, more fields will be added
|
||||
// here.)
|
||||
detectedFields := struct {
|
||||
Config interface{} `json:"config"`
|
||||
FSLayers interface{} `json:"fsLayers"`
|
||||
History interface{} `json:"history"`
|
||||
Layers interface{} `json:"layers"`
|
||||
Manifests interface{} `json:"manifests"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
|
||||
// The caller was supposed to already validate version numbers, so this shold not happen;
|
||||
// let’s not bother with making this error “nice”.
|
||||
return err
|
||||
}
|
||||
unexpected := []string{}
|
||||
// Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste.
|
||||
if detectedFields.Config != nil && (allowed&allowedFieldConfig) == 0 {
|
||||
unexpected = append(unexpected, "config")
|
||||
}
|
||||
if detectedFields.FSLayers != nil && (allowed&allowedFieldFSLayers) == 0 {
|
||||
unexpected = append(unexpected, "fsLayers")
|
||||
}
|
||||
if detectedFields.History != nil && (allowed&allowedFieldHistory) == 0 {
|
||||
unexpected = append(unexpected, "history")
|
||||
}
|
||||
if detectedFields.Layers != nil && (allowed&allowedFieldLayers) == 0 {
|
||||
unexpected = append(unexpected, "layers")
|
||||
}
|
||||
if detectedFields.Manifests != nil && (allowed&allowedFieldManifests) == 0 {
|
||||
unexpected = append(unexpected, "manifests")
|
||||
}
|
||||
if len(unexpected) != 0 {
|
||||
return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`,
|
||||
unexpected, expectedMIMEType)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func layerInfosToStrings(infos []LayerInfo) []string {
|
||||
|
||||
4
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
4
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
@@ -60,6 +60,10 @@ func Schema1FromManifest(manifest []byte) (*Schema1, error) {
|
||||
if s1.SchemaVersion != 1 {
|
||||
return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion)
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema1SignedMediaType,
|
||||
allowedFieldFSLayers|allowedFieldHistory); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s1.initialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
4
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
@@ -165,6 +165,10 @@ func Schema2FromManifest(manifest []byte) (*Schema2, error) {
|
||||
if err := json.Unmarshal(manifest, &s2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, DockerV2Schema2MediaType,
|
||||
allowedFieldConfig|allowedFieldLayers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check manifest's and layers' media types.
|
||||
if err := SupportedSchema2MediaType(s2.MediaType); err != nil {
|
||||
return nil, err
|
||||
|
||||
4
vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
generated
vendored
4
vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go
generated
vendored
@@ -192,6 +192,10 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) {
|
||||
if err := json.Unmarshal(manifest, &list); err != nil {
|
||||
return nil, errors.Wrapf(err, "unmarshaling Schema2List %q", string(manifest))
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, DockerV2ListMediaType,
|
||||
allowedFieldManifests); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &list, nil
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
4
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
@@ -54,6 +54,10 @@ func OCI1FromManifest(manifest []byte) (*OCI1, error) {
|
||||
if err := json.Unmarshal(manifest, &oci1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
|
||||
allowedFieldConfig|allowedFieldLayers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &oci1, nil
|
||||
}
|
||||
|
||||
|
||||
4
vendor/github.com/containers/image/v5/manifest/oci_index.go
generated
vendored
4
vendor/github.com/containers/image/v5/manifest/oci_index.go
generated
vendored
@@ -202,6 +202,10 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) {
|
||||
if err := json.Unmarshal(manifest, &index); err != nil {
|
||||
return nil, errors.Wrapf(err, "unmarshaling OCI1Index %q", string(manifest))
|
||||
}
|
||||
if err := validateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex,
|
||||
allowedFieldManifests); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &index, nil
|
||||
}
|
||||
|
||||
|
||||
30
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
30
vendor/github.com/containers/image/v5/oci/layout/oci_src.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
@@ -113,7 +114,12 @@ func (s *ociImageSource) HasThreadSafeGetBlob() bool {
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
return s.getExternalBlob(ctx, info.URLs)
|
||||
r, s, err := s.getExternalBlob(ctx, info.URLs)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
} else if r != nil {
|
||||
return r, s, nil
|
||||
}
|
||||
}
|
||||
|
||||
path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir)
|
||||
@@ -140,34 +146,44 @@ func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *dige
|
||||
return [][]byte{}, nil
|
||||
}
|
||||
|
||||
// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty.
|
||||
// This function can return nil reader when no url is supported by this function. In this case, the caller
|
||||
// should fallback to fetch the non-external blob (i.e. pull from the registry).
|
||||
func (s *ociImageSource) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
|
||||
if len(urls) == 0 {
|
||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
|
||||
errWrap := errors.New("failed fetching external blob from all urls")
|
||||
for _, url := range urls {
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
hasSupportedURL := false
|
||||
for _, u := range urls {
|
||||
if u, err := url.Parse(u); err != nil || (u.Scheme != "http" && u.Scheme != "https") {
|
||||
continue // unsupported url. skip this url.
|
||||
}
|
||||
hasSupportedURL = true
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, u, nil)
|
||||
if err != nil {
|
||||
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
|
||||
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := s.client.Do(req)
|
||||
if err != nil {
|
||||
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error())
|
||||
errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", u, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url)
|
||||
errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", u)
|
||||
continue
|
||||
}
|
||||
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
}
|
||||
if !hasSupportedURL {
|
||||
return nil, 0, nil // fallback to non-external blob
|
||||
}
|
||||
|
||||
return nil, 0, errWrap
|
||||
}
|
||||
|
||||
23
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
23
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
@@ -268,18 +268,18 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re
|
||||
}
|
||||
|
||||
// Anonymous function to query credentials from auth files.
|
||||
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, error) {
|
||||
getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) {
|
||||
for _, path := range getAuthFilePaths(sys, homeDir) {
|
||||
authConfig, err := findAuthentication(ref, registry, path.path, path.legacyFormat)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
return types.DockerAuthConfig{}, "", err
|
||||
}
|
||||
|
||||
if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" {
|
||||
return authConfig, nil
|
||||
return authConfig, path.path, nil
|
||||
}
|
||||
}
|
||||
return types.DockerAuthConfig{}, nil
|
||||
return types.DockerAuthConfig{}, "", nil
|
||||
}
|
||||
|
||||
helpers, err := sysregistriesv2.CredentialHelpers(sys)
|
||||
@@ -289,12 +289,15 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re
|
||||
|
||||
var multiErr error
|
||||
for _, helper := range helpers {
|
||||
var creds types.DockerAuthConfig
|
||||
var err error
|
||||
var (
|
||||
creds types.DockerAuthConfig
|
||||
credHelperPath string
|
||||
err error
|
||||
)
|
||||
switch helper {
|
||||
// Special-case the built-in helper for auth files.
|
||||
case sysregistriesv2.AuthenticationFileHelper:
|
||||
creds, err = getCredentialsFromAuthFiles()
|
||||
creds, credHelperPath, err = getCredentialsFromAuthFiles()
|
||||
// External helpers.
|
||||
default:
|
||||
creds, err = getAuthFromCredHelper(helper, registry)
|
||||
@@ -307,7 +310,11 @@ func getCredentialsWithHomeDir(sys *types.SystemContext, ref reference.Named, re
|
||||
if len(creds.Username)+len(creds.Password)+len(creds.IdentityToken) == 0 {
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Found credentials for %s in credential helper %s", registry, helper)
|
||||
msg := fmt.Sprintf("Found credentials for %s in credential helper %s", registry, helper)
|
||||
if credHelperPath != "" {
|
||||
msg = fmt.Sprintf("%s in file %s", msg, credHelperPath)
|
||||
}
|
||||
logrus.Debug(msg)
|
||||
return creds, nil
|
||||
}
|
||||
if multiErr != nil {
|
||||
|
||||
8
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
8
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@@ -80,7 +80,7 @@ func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (referen
|
||||
// be dropped.
|
||||
// https://github.com/containers/image/pull/1191#discussion_r610621608
|
||||
if e.Location == "" {
|
||||
if prefix[:2] != "*." {
|
||||
if !strings.HasPrefix(prefix, "*.") {
|
||||
return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix)
|
||||
}
|
||||
return ref, nil
|
||||
@@ -369,7 +369,7 @@ func (config *V2RegistriesConf) postProcessRegistries() error {
|
||||
}
|
||||
// FIXME: allow config authors to always use Prefix.
|
||||
// https://github.com/containers/image/pull/1191#discussion_r610622495
|
||||
if reg.Prefix[:2] != "*." && reg.Location == "" {
|
||||
if !strings.HasPrefix(reg.Prefix, "*.") && reg.Location == "" {
|
||||
return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"}
|
||||
}
|
||||
}
|
||||
@@ -804,7 +804,7 @@ func refMatchingSubdomainPrefix(ref, prefix string) int {
|
||||
// (This is split from the caller primarily to make testing easier.)
|
||||
func refMatchingPrefix(ref, prefix string) int {
|
||||
switch {
|
||||
case prefix[0:2] == "*.":
|
||||
case strings.HasPrefix(prefix, "*."):
|
||||
return refMatchingSubdomainPrefix(ref, prefix)
|
||||
case len(ref) < len(prefix):
|
||||
return -1
|
||||
@@ -924,7 +924,7 @@ func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) {
|
||||
// https://github.com/containers/image/pull/1191#discussion_r610623829
|
||||
for i := range res.partialV2.Registries {
|
||||
prefix := res.partialV2.Registries[i].Prefix
|
||||
if prefix[:2] == "*." && strings.ContainsAny(prefix, "/@:") {
|
||||
if strings.HasPrefix(prefix, "*.") && strings.ContainsAny(prefix, "/@:") {
|
||||
msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix)
|
||||
return nil, &InvalidRegistries{s: msg}
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/image/v5/storage/storage_transport.go
generated
vendored
2
vendor/github.com/containers/image/v5/storage/storage_transport.go
generated
vendored
@@ -225,7 +225,7 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
|
||||
// needs to match a store that was previously initialized using
|
||||
// storage.GetStore(), or be enough to let the storage library fill out
|
||||
// the rest using knowledge that it has from elsewhere.
|
||||
if reference[0] == '[' {
|
||||
if len(reference) > 0 && reference[0] == '[' {
|
||||
closeIndex := strings.IndexRune(reference, ']')
|
||||
if closeIndex < 1 {
|
||||
return nil, ErrInvalidReference
|
||||
|
||||
4
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
4
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
@@ -622,6 +622,10 @@ type SystemContext struct {
|
||||
DockerLogMirrorChoice bool
|
||||
// Directory to use for OSTree temporary files
|
||||
OSTreeTmpDirPath string
|
||||
// If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry.
|
||||
// Note that this requires writing blobs to temporary files, and takes more time than the default behavior,
|
||||
// when the digest for a blob is unknown.
|
||||
DockerRegistryPushPrecomputeDigests bool
|
||||
|
||||
// === docker/daemon.Transport overrides ===
|
||||
// A directory containing a CA certificate (ending with ".crt"),
|
||||
|
||||
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@@ -6,9 +6,9 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 16
|
||||
VersionMinor = 17
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 1
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
||||
4
vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
generated
vendored
4
vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go
generated
vendored
@@ -103,9 +103,11 @@ func SetFileCreateLabel(fileLabel string) error {
|
||||
return selinux.SetFSCreateLabel(fileLabel)
|
||||
}
|
||||
|
||||
// Relabel changes the label of path to the filelabel string.
|
||||
// Relabel changes the label of path and all the entries beneath the path.
|
||||
// It changes the MCS label to s0 if shared is true.
|
||||
// This will allow all containers to share the content.
|
||||
//
|
||||
// The path itself is guaranteed to be relabeled last.
|
||||
func Relabel(path string, fileLabel string, shared bool) error {
|
||||
if !selinux.GetEnabled() || fileLabel == "" {
|
||||
return nil
|
||||
|
||||
2
vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
generated
vendored
2
vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
generated
vendored
@@ -255,6 +255,8 @@ func CopyLevel(src, dest string) (string, error) {
|
||||
// Chcon changes the fpath file object to the SELinux label label.
|
||||
// If fpath is a directory and recurse is true, then Chcon walks the
|
||||
// directory tree setting the label.
|
||||
//
|
||||
// The fpath itself is guaranteed to be relabeled last.
|
||||
func Chcon(fpath string, label string, recurse bool) error {
|
||||
return chcon(fpath, label, recurse)
|
||||
}
|
||||
|
||||
68
vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
generated
vendored
68
vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/bits-and-blooms/bitset"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -44,7 +44,7 @@ type selinuxState struct {
|
||||
|
||||
type level struct {
|
||||
sens uint
|
||||
cats *bitset.BitSet
|
||||
cats *big.Int
|
||||
}
|
||||
|
||||
type mlsRange struct {
|
||||
@@ -455,8 +455,8 @@ func computeCreateContext(source string, target string, class string) (string, e
|
||||
}
|
||||
|
||||
// catsToBitset stores categories in a bitset.
|
||||
func catsToBitset(cats string) (*bitset.BitSet, error) {
|
||||
bitset := &bitset.BitSet{}
|
||||
func catsToBitset(cats string) (*big.Int, error) {
|
||||
bitset := new(big.Int)
|
||||
|
||||
catlist := strings.Split(cats, ",")
|
||||
for _, r := range catlist {
|
||||
@@ -471,14 +471,14 @@ func catsToBitset(cats string) (*bitset.BitSet, error) {
|
||||
return nil, err
|
||||
}
|
||||
for i := catstart; i <= catend; i++ {
|
||||
bitset.Set(i)
|
||||
bitset.SetBit(bitset, int(i), 1)
|
||||
}
|
||||
} else {
|
||||
cat, err := parseLevelItem(ranges[0], category)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bitset.Set(cat)
|
||||
bitset.SetBit(bitset, int(cat), 1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -548,37 +548,30 @@ func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) {
|
||||
|
||||
// bitsetToStr takes a category bitset and returns it in the
|
||||
// canonical selinux syntax
|
||||
func bitsetToStr(c *bitset.BitSet) string {
|
||||
func bitsetToStr(c *big.Int) string {
|
||||
var str string
|
||||
i, e := c.NextSet(0)
|
||||
len := 0
|
||||
for e {
|
||||
if len == 0 {
|
||||
|
||||
length := 0
|
||||
for i := int(c.TrailingZeroBits()); i < c.BitLen(); i++ {
|
||||
if c.Bit(i) == 0 {
|
||||
continue
|
||||
}
|
||||
if length == 0 {
|
||||
if str != "" {
|
||||
str += ","
|
||||
}
|
||||
str += "c" + strconv.Itoa(int(i))
|
||||
str += "c" + strconv.Itoa(i)
|
||||
}
|
||||
|
||||
next, e := c.NextSet(i + 1)
|
||||
if e {
|
||||
// consecutive cats
|
||||
if next == i+1 {
|
||||
len++
|
||||
i = next
|
||||
continue
|
||||
}
|
||||
if c.Bit(i+1) == 1 {
|
||||
length++
|
||||
continue
|
||||
}
|
||||
if len == 1 {
|
||||
str += ",c" + strconv.Itoa(int(i))
|
||||
} else if len > 1 {
|
||||
str += ".c" + strconv.Itoa(int(i))
|
||||
if length == 1 {
|
||||
str += ",c" + strconv.Itoa(i)
|
||||
} else if length > 1 {
|
||||
str += ".c" + strconv.Itoa(i)
|
||||
}
|
||||
if !e {
|
||||
break
|
||||
}
|
||||
len = 0
|
||||
i = next
|
||||
length = 0
|
||||
}
|
||||
|
||||
return str
|
||||
@@ -591,13 +584,16 @@ func (l1 *level) equal(l2 *level) bool {
|
||||
if l1.sens != l2.sens {
|
||||
return false
|
||||
}
|
||||
return l1.cats.Equal(l2.cats)
|
||||
if l2.cats == nil || l1.cats == nil {
|
||||
return l2.cats == l1.cats
|
||||
}
|
||||
return l1.cats.Cmp(l2.cats) == 0
|
||||
}
|
||||
|
||||
// String returns an mlsRange as a string.
|
||||
func (m mlsRange) String() string {
|
||||
low := "s" + strconv.Itoa(int(m.low.sens))
|
||||
if m.low.cats != nil && m.low.cats.Count() > 0 {
|
||||
if m.low.cats != nil && m.low.cats.BitLen() > 0 {
|
||||
low += ":" + bitsetToStr(m.low.cats)
|
||||
}
|
||||
|
||||
@@ -606,7 +602,7 @@ func (m mlsRange) String() string {
|
||||
}
|
||||
|
||||
high := "s" + strconv.Itoa(int(m.high.sens))
|
||||
if m.high.cats != nil && m.high.cats.Count() > 0 {
|
||||
if m.high.cats != nil && m.high.cats.BitLen() > 0 {
|
||||
high += ":" + bitsetToStr(m.high.cats)
|
||||
}
|
||||
|
||||
@@ -656,10 +652,12 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) {
|
||||
|
||||
/* find the intersecting categories */
|
||||
if s.low.cats != nil && t.low.cats != nil {
|
||||
outrange.low.cats = s.low.cats.Intersection(t.low.cats)
|
||||
outrange.low.cats = new(big.Int)
|
||||
outrange.low.cats.And(s.low.cats, t.low.cats)
|
||||
}
|
||||
if s.high.cats != nil && t.high.cats != nil {
|
||||
outrange.high.cats = s.high.cats.Intersection(t.high.cats)
|
||||
outrange.high.cats = new(big.Int)
|
||||
outrange.high.cats.And(s.high.cats, t.high.cats)
|
||||
}
|
||||
|
||||
return outrange.String(), nil
|
||||
|
||||
12
vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
generated
vendored
12
vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go
generated
vendored
@@ -51,6 +51,9 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
|
||||
var (
|
||||
err error
|
||||
wg sync.WaitGroup
|
||||
|
||||
rootLen = len(root)
|
||||
rootEntry *walkArgs
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -59,6 +62,11 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
|
||||
close(files)
|
||||
return err
|
||||
}
|
||||
if len(p) == rootLen {
|
||||
// Root entry is processed separately below.
|
||||
rootEntry = &walkArgs{path: p, info: &info}
|
||||
return nil
|
||||
}
|
||||
// add a file to the queue unless a callback sent an error
|
||||
select {
|
||||
case e := <-errCh:
|
||||
@@ -92,6 +100,10 @@ func WalkN(root string, walkFn WalkFunc, num int) error {
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if err == nil {
|
||||
err = walkFn(rootEntry.path, *rootEntry.info, nil)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
13
vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
generated
vendored
13
vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package pwalkdir
|
||||
@@ -51,6 +52,9 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
|
||||
var (
|
||||
err error
|
||||
wg sync.WaitGroup
|
||||
|
||||
rootLen = len(root)
|
||||
rootEntry *walkArgs
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -59,6 +63,11 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
|
||||
close(files)
|
||||
return err
|
||||
}
|
||||
if len(p) == rootLen {
|
||||
// Root entry is processed separately below.
|
||||
rootEntry = &walkArgs{path: p, entry: entry}
|
||||
return nil
|
||||
}
|
||||
// Add a file to the queue unless a callback sent an error.
|
||||
select {
|
||||
case e := <-errCh:
|
||||
@@ -92,6 +101,10 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error {
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if err == nil {
|
||||
err = walkFn(rootEntry.path, rootEntry.entry, nil)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
12
vendor/modules.txt
vendored
12
vendor/modules.txt
vendored
@@ -34,8 +34,6 @@ github.com/VividCortex/ewma
|
||||
github.com/acarl005/stripansi
|
||||
# github.com/beorn7/perks v1.0.1
|
||||
github.com/beorn7/perks/quantile
|
||||
# github.com/bits-and-blooms/bitset v1.2.1
|
||||
github.com/bits-and-blooms/bitset
|
||||
# github.com/cespare/xxhash/v2 v2.1.1
|
||||
github.com/cespare/xxhash/v2
|
||||
# github.com/containerd/cgroups v1.0.1
|
||||
@@ -47,14 +45,15 @@ github.com/containerd/containerd/platforms
|
||||
# github.com/containerd/stargz-snapshotter/estargz v0.9.0
|
||||
github.com/containerd/stargz-snapshotter/estargz
|
||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||
# github.com/containers/common v0.46.0
|
||||
# github.com/containers/common v0.46.1-0.20211026130826-7abfd453c86f
|
||||
github.com/containers/common/pkg/auth
|
||||
github.com/containers/common/pkg/capabilities
|
||||
github.com/containers/common/pkg/completion
|
||||
github.com/containers/common/pkg/flag
|
||||
github.com/containers/common/pkg/report
|
||||
github.com/containers/common/pkg/report/camelcase
|
||||
github.com/containers/common/pkg/retry
|
||||
# github.com/containers/image/v5 v5.16.1
|
||||
# github.com/containers/image/v5 v5.17.0
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
github.com/containers/image/v5/directory/explicitfilepath
|
||||
@@ -71,6 +70,7 @@ github.com/containers/image/v5/internal/pkg/keyctl
|
||||
github.com/containers/image/v5/internal/pkg/platform
|
||||
github.com/containers/image/v5/internal/putblobdigest
|
||||
github.com/containers/image/v5/internal/rootless
|
||||
github.com/containers/image/v5/internal/streamdigest
|
||||
github.com/containers/image/v5/internal/tmpdir
|
||||
github.com/containers/image/v5/internal/types
|
||||
github.com/containers/image/v5/internal/uploadreader
|
||||
@@ -177,7 +177,7 @@ github.com/docker/distribution/registry/client/auth/challenge
|
||||
github.com/docker/distribution/registry/client/transport
|
||||
github.com/docker/distribution/registry/storage/cache
|
||||
github.com/docker/distribution/registry/storage/cache/memory
|
||||
# github.com/docker/docker v20.10.9+incompatible
|
||||
# github.com/docker/docker v20.10.11+incompatible
|
||||
github.com/docker/docker/api
|
||||
github.com/docker/docker/api/types
|
||||
github.com/docker/docker/api/types/blkiodev
|
||||
@@ -283,7 +283,7 @@ github.com/opencontainers/runc/libcontainer/user
|
||||
github.com/opencontainers/runc/libcontainer/userns
|
||||
# github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||
github.com/opencontainers/runtime-spec/specs-go
|
||||
# github.com/opencontainers/selinux v1.8.5
|
||||
# github.com/opencontainers/selinux v1.9.1
|
||||
github.com/opencontainers/selinux/go-selinux
|
||||
github.com/opencontainers/selinux/go-selinux/label
|
||||
github.com/opencontainers/selinux/pkg/pwalk
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
// Version is the version of the build.
|
||||
const Version = "1.5.0"
|
||||
const Version = "1.5.2"
|
||||
|
||||
Reference in New Issue
Block a user