mirror of
https://github.com/containers/skopeo.git
synced 2025-09-22 10:27:08 +00:00
prompt-less signing via passphrase file
To support signing images without prompting the user, add CLI flags for providing a passphrase file. Signed-off-by: Valentin Rothberg <rothberg@redhat.com>
This commit is contained in:
5
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
5
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@@ -124,6 +124,7 @@ type ImageListSelection int
|
||||
type Options struct {
|
||||
RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
|
||||
SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
|
||||
SignPassphrase string // Passphare to use when signing with the key ID from `SignBy`.
|
||||
ReportWriter io.Writer
|
||||
SourceCtx *types.SystemContext
|
||||
DestinationCtx *types.SystemContext
|
||||
@@ -569,7 +570,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
|
||||
// Sign the manifest list.
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifestList, options.SignBy)
|
||||
newSig, err := c.createSignature(manifestList, options.SignBy, options.SignPassphrase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -791,7 +792,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy)
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy, options.SignPassphrase)
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
@@ -7,7 +7,7 @@ import (
|
||||
)
|
||||
|
||||
// createSignature creates a new signature of manifest using keyIdentity.
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) {
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string) ([]byte, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "initializing GPG")
|
||||
@@ -23,7 +23,7 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, e
|
||||
}
|
||||
|
||||
c.Printf("Signing manifest\n")
|
||||
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
|
||||
newSig, err := signature.SignDockerManifestWithOptions(manifest, dockerReference.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating signature")
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
4
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
@@ -51,7 +51,7 @@ const (
|
||||
// other than the ones the caller specifically allows.
|
||||
// expectedMIMEType is used only for diagnostics.
|
||||
// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format
|
||||
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambigous
|
||||
// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous
|
||||
// data that just isn’t what was expected, as opposed to actually ambiguous data.
|
||||
func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
|
||||
allowed allowedManifestFields) error {
|
||||
@@ -71,7 +71,7 @@ func validateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string,
|
||||
Manifests interface{} `json:"manifests"`
|
||||
}{}
|
||||
if err := json.Unmarshal(manifest, &detectedFields); err != nil {
|
||||
// The caller was supposed to already validate version numbers, so this shold not happen;
|
||||
// The caller was supposed to already validate version numbers, so this should not happen;
|
||||
// let’s not bother with making this error “nice”.
|
||||
return err
|
||||
}
|
||||
|
36
vendor/github.com/containers/image/v5/pkg/cli/passphrase.go
generated
vendored
Normal file
36
vendor/github.com/containers/image/v5/pkg/cli/passphrase.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ReadPassphraseFile returns the first line of the specified path.
|
||||
// For convenience, an empty string is returned if the path is empty.
|
||||
func ReadPassphraseFile(path string) (string, error) {
|
||||
if path == "" {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Reading user-specified passphrase for signing from %s", path)
|
||||
|
||||
ppf, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer ppf.Close()
|
||||
|
||||
// Read the *first* line in the passphrase file, just as gpg(1) does.
|
||||
buf, err := bufio.NewReader(ppf).ReadBytes('\n')
|
||||
if err != nil && !errors.Is(err, io.EOF) {
|
||||
return "", fmt.Errorf("reading passphrase file: %w", err)
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(string(buf), "\n"), nil
|
||||
}
|
4
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
4
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
@@ -667,6 +667,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
|
||||
// This intentionally uses "registry", not "key"; we don't support namespaced
|
||||
// credentials in helpers.
|
||||
if ch, exists := auths.CredHelpers[registry]; exists {
|
||||
logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path)
|
||||
return getAuthFromCredHelper(ch, registry)
|
||||
}
|
||||
|
||||
@@ -703,6 +704,9 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
|
||||
}
|
||||
}
|
||||
|
||||
// Only log this if we found nothing; getCredentialsWithHomeDir logs the
|
||||
// source of found data.
|
||||
logrus.Debugf("No credentials matching %s found in %s", key, path)
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
|
211
vendor/github.com/containers/image/v5/sif/load.go
generated
vendored
Normal file
211
vendor/github.com/containers/image/v5/sif/load.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
package sif
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sylabs/sif/v2/pkg/sif"
|
||||
)
|
||||
|
||||
// injectedScriptTargetPath is the path injectedScript should be written to in the created image.
|
||||
const injectedScriptTargetPath = "/podman/runscript"
|
||||
|
||||
// parseDefFile parses a SIF definition file from reader,
|
||||
// and returns non-trivial contents of the %environment and %runscript sections.
|
||||
func parseDefFile(reader io.Reader) ([]string, []string, error) {
|
||||
type parserState int
|
||||
const (
|
||||
parsingOther parserState = iota
|
||||
parsingEnvironment
|
||||
parsingRunscript
|
||||
)
|
||||
|
||||
environment := []string{}
|
||||
runscript := []string{}
|
||||
|
||||
state := parsingOther
|
||||
scanner := bufio.NewScanner(reader)
|
||||
for scanner.Scan() {
|
||||
s := strings.TrimSpace(scanner.Text())
|
||||
switch {
|
||||
case s == `%environment`:
|
||||
state = parsingEnvironment
|
||||
case s == `%runscript`:
|
||||
state = parsingRunscript
|
||||
case strings.HasPrefix(s, "%"):
|
||||
state = parsingOther
|
||||
case state == parsingEnvironment:
|
||||
if s != "" && !strings.HasPrefix(s, "#") {
|
||||
environment = append(environment, s)
|
||||
}
|
||||
case state == parsingRunscript:
|
||||
runscript = append(runscript, s)
|
||||
default: // parsingOther: ignore the line
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, nil, fmt.Errorf("reading lines from SIF definition file object: %w", err)
|
||||
}
|
||||
return environment, runscript, nil
|
||||
}
|
||||
|
||||
// generateInjectedScript generates a shell script based on
|
||||
// SIF definition file %environment and %runscript data, and returns it.
|
||||
func generateInjectedScript(environment []string, runscript []string) []byte {
|
||||
script := fmt.Sprintf("#!/bin/bash\n"+
|
||||
"%s\n"+
|
||||
"%s\n", strings.Join(environment, "\n"), strings.Join(runscript, "\n"))
|
||||
return []byte(script)
|
||||
}
|
||||
|
||||
// processDefFile finds sif.DataDeffile in sifImage, if any,
|
||||
// and returns:
|
||||
// - the command to run
|
||||
// - contents of a script to inject as injectedScriptTargetPath, or nil
|
||||
func processDefFile(sifImage *sif.FileImage) (string, []byte, error) {
|
||||
var environment, runscript []string
|
||||
|
||||
desc, err := sifImage.GetDescriptor(sif.WithDataType(sif.DataDeffile))
|
||||
if err == nil {
|
||||
environment, runscript, err = parseDefFile(desc.GetReader())
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var command string
|
||||
var injectedScript []byte
|
||||
if len(environment) == 0 && len(runscript) == 0 {
|
||||
command = "bash"
|
||||
injectedScript = nil
|
||||
} else {
|
||||
injectedScript = generateInjectedScript(environment, runscript)
|
||||
command = injectedScriptTargetPath
|
||||
}
|
||||
|
||||
return command, injectedScript, nil
|
||||
}
|
||||
|
||||
func writeInjectedScript(extractedRootPath string, injectedScript []byte) error {
|
||||
if injectedScript == nil {
|
||||
return nil
|
||||
}
|
||||
filePath := filepath.Join(extractedRootPath, injectedScriptTargetPath)
|
||||
parentDirPath := filepath.Dir(filePath)
|
||||
if err := os.MkdirAll(parentDirPath, 0755); err != nil {
|
||||
return fmt.Errorf("creating %s: %w", parentDirPath, err)
|
||||
}
|
||||
if err := ioutil.WriteFile(filePath, injectedScript, 0755); err != nil {
|
||||
return fmt.Errorf("writing %s to %s: %w", injectedScriptTargetPath, filePath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createTarFromSIFInputs creates a tar file at tarPath, using a squashfs image at squashFSPath.
|
||||
// It can also use extractedRootPath and scriptPath, which are allocated for its exclusive use,
|
||||
// if necessary.
|
||||
func createTarFromSIFInputs(ctx context.Context, tarPath, squashFSPath string, injectedScript []byte, extractedRootPath, scriptPath string) error {
|
||||
// It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
|
||||
// for our use.
|
||||
defer os.RemoveAll(extractedRootPath)
|
||||
|
||||
// Almost everything in extractedRootPath comes from squashFSPath.
|
||||
conversionCommand := fmt.Sprintf("unsquashfs -d %s -f %s && tar --acls --xattrs -C %s -cpf %s ./",
|
||||
extractedRootPath, squashFSPath, extractedRootPath, tarPath)
|
||||
script := "#!/bin/sh\n" + conversionCommand + "\n"
|
||||
if err := ioutil.WriteFile(scriptPath, []byte(script), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(scriptPath)
|
||||
|
||||
// On top of squashFSPath, we only add injectedScript, if necessary.
|
||||
if err := writeInjectedScript(extractedRootPath, injectedScript); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("Converting squashfs to tar, command: %s ...", conversionCommand)
|
||||
cmd := exec.CommandContext(ctx, "fakeroot", "--", scriptPath)
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting image: %w, output: %s", err, string(output))
|
||||
}
|
||||
logrus.Debugf("... finished converting squashfs to tar")
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertSIFToElements processes sifImage and creates/returns
|
||||
// the relevant elements for constructing an OCI-like image:
|
||||
// - A path to a tar file containing a root filesystem,
|
||||
// - A command to run.
|
||||
// The returned tar file path is inside tempDir, which can be assumed to be empty
|
||||
// at start, and is exclusively used by the current process (i.e. it is safe
|
||||
// to use hard-coded relative paths within it).
|
||||
func convertSIFToElements(ctx context.Context, sifImage *sif.FileImage, tempDir string) (string, []string, error) {
|
||||
// We could allocate unique names for all of these using ioutil.Temp*, but tempDir is exclusive,
|
||||
// so we can just hard-code a set of unique values here.
|
||||
// We create and/or manage cleanup of these two paths.
|
||||
squashFSPath := filepath.Join(tempDir, "rootfs.squashfs")
|
||||
tarPath := filepath.Join(tempDir, "rootfs.tar")
|
||||
// We only allocate these paths, the user is responsible for cleaning them up.
|
||||
extractedRootPath := filepath.Join(tempDir, "rootfs")
|
||||
scriptPath := filepath.Join(tempDir, "script")
|
||||
|
||||
succeeded := false
|
||||
// It's safe for the Remove calls to happen even before we create the files, because tempDir is exclusive
|
||||
// for our use.
|
||||
// Ideally we would remove squashFSPath immediately after creating extractedRootPath, but we need
|
||||
// to run both creation and consumption of extractedRootPath in the same fakeroot context.
|
||||
// So, overall, this process requires at least 2 compressed copies (SIF and squashFSPath) and 2
|
||||
// uncompressed copies (extractedRootPath and tarPath) of the data, all using up space at the same time.
|
||||
// That's rather unsatisfactory, ideally we would be streaming the data directly from a squashfs parser
|
||||
// reading from the SIF file to a tarball, for 1 compressed and 1 uncompressed copy.
|
||||
defer os.Remove(squashFSPath)
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
os.Remove(tarPath)
|
||||
}
|
||||
}()
|
||||
|
||||
command, injectedScript, err := processDefFile(sifImage)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
rootFS, err := sifImage.GetDescriptor(sif.WithPartitionType(sif.PartPrimSys))
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("looking up rootfs from SIF file: %w", err)
|
||||
}
|
||||
// TODO: We'd prefer not to make a full copy of the file here; unsquashfs ≥ 4.4
|
||||
// has an -o option that allows extracting a squashfs from the SIF file directly,
|
||||
// but that version is not currently available in RHEL 8.
|
||||
logrus.Debugf("Creating a temporary squashfs image %s ...", squashFSPath)
|
||||
if err := func() error { // A scope for defer
|
||||
f, err := os.Create(squashFSPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
if _, err := io.CopyN(f, rootFS.GetReader(), rootFS.Size()); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
logrus.Debugf("... finished creating a temporary squashfs image")
|
||||
|
||||
if err := createTarFromSIFInputs(ctx, tarPath, squashFSPath, injectedScript, extractedRootPath, scriptPath); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
succeeded = true
|
||||
return tarPath, []string{command}, nil
|
||||
}
|
217
vendor/github.com/containers/image/v5/sif/src.go
generated
vendored
Normal file
217
vendor/github.com/containers/image/v5/sif/src.go
generated
vendored
Normal file
@@ -0,0 +1,217 @@
|
||||
package sif
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecs "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/sylabs/sif/v2/pkg/sif"
|
||||
)
|
||||
|
||||
type sifImageSource struct {
|
||||
ref sifReference
|
||||
workDir string
|
||||
layerDigest digest.Digest
|
||||
layerSize int64
|
||||
layerFile string
|
||||
config []byte
|
||||
configDigest digest.Digest
|
||||
manifest []byte
|
||||
}
|
||||
|
||||
// getBlobInfo returns the digest, and size of the provided file.
|
||||
func getBlobInfo(path string) (digest.Digest, int64, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return "", -1, fmt.Errorf("opening %q for reading: %w", path, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// TODO: Instead of writing the tar file to disk, and reading
|
||||
// it here again, stream the tar file to a pipe and
|
||||
// compute the digest while writing it to disk.
|
||||
logrus.Debugf("Computing a digest of the SIF conversion output...")
|
||||
digester := digest.Canonical.Digester()
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
size, err := io.Copy(digester.Hash(), f)
|
||||
if err != nil {
|
||||
return "", -1, fmt.Errorf("reading %q: %w", path, err)
|
||||
}
|
||||
digest := digester.Digest()
|
||||
logrus.Debugf("... finished computing the digest of the SIF conversion output")
|
||||
|
||||
return digest, size, nil
|
||||
}
|
||||
|
||||
// newImageSource returns an ImageSource for reading from an existing directory.
|
||||
// newImageSource extracts SIF objects and saves them in a temp directory.
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifReference) (types.ImageSource, error) {
|
||||
sifImg, err := sif.LoadContainerFromPath(ref.file, sif.OptLoadWithFlag(os.O_RDONLY))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("loading SIF file: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
_ = sifImg.UnloadContainer()
|
||||
}()
|
||||
|
||||
workDir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating temp directory: %w", err)
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
os.RemoveAll(workDir)
|
||||
}
|
||||
}()
|
||||
|
||||
layerPath, commandLine, err := convertSIFToElements(ctx, sifImg, workDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("converting rootfs from SquashFS to Tarball: %w", err)
|
||||
}
|
||||
|
||||
layerDigest, layerSize, err := getBlobInfo(layerPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("gathering blob information: %w", err)
|
||||
}
|
||||
|
||||
created := sifImg.ModifiedAt()
|
||||
config := imgspecv1.Image{
|
||||
Created: &created,
|
||||
Architecture: sifImg.PrimaryArch(),
|
||||
OS: "linux",
|
||||
Config: imgspecv1.ImageConfig{
|
||||
Cmd: commandLine,
|
||||
},
|
||||
RootFS: imgspecv1.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: []digest.Digest{layerDigest},
|
||||
},
|
||||
History: []imgspecv1.History{
|
||||
{
|
||||
Created: &created,
|
||||
CreatedBy: fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", layerDigest.Hex(), os.PathSeparator),
|
||||
Comment: "imported from SIF, uuid: " + sifImg.ID(),
|
||||
},
|
||||
{
|
||||
Created: &created,
|
||||
CreatedBy: "/bin/sh -c #(nop) CMD [\"bash\"]",
|
||||
EmptyLayer: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
configBytes, err := json.Marshal(&config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generating configuration blob for %q: %w", ref.resolvedFile, err)
|
||||
}
|
||||
configDigest := digest.Canonical.FromBytes(configBytes)
|
||||
|
||||
manifest := imgspecv1.Manifest{
|
||||
Versioned: imgspecs.Versioned{SchemaVersion: 2},
|
||||
MediaType: imgspecv1.MediaTypeImageManifest,
|
||||
Config: imgspecv1.Descriptor{
|
||||
Digest: configDigest,
|
||||
Size: int64(len(configBytes)),
|
||||
MediaType: imgspecv1.MediaTypeImageConfig,
|
||||
},
|
||||
Layers: []imgspecv1.Descriptor{{
|
||||
Digest: layerDigest,
|
||||
Size: layerSize,
|
||||
MediaType: imgspecv1.MediaTypeImageLayer,
|
||||
}},
|
||||
}
|
||||
manifestBytes, err := json.Marshal(&manifest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("generating manifest for %q: %w", ref.resolvedFile, err)
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return &sifImageSource{
|
||||
ref: ref,
|
||||
workDir: workDir,
|
||||
layerDigest: layerDigest,
|
||||
layerSize: layerSize,
|
||||
layerFile: layerPath,
|
||||
config: configBytes,
|
||||
configDigest: configDigest,
|
||||
manifest: manifestBytes,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source.
|
||||
func (s *sifImageSource) Reference() types.ImageReference {
|
||||
return s.ref
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *sifImageSource) Close() error {
|
||||
return os.RemoveAll(s.workDir)
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *sifImageSource) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *sifImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
switch info.Digest {
|
||||
case s.configDigest:
|
||||
return ioutil.NopCloser(bytes.NewBuffer(s.config)), int64(len(s.config)), nil
|
||||
case s.layerDigest:
|
||||
reader, err := os.Open(s.layerFile)
|
||||
if err != nil {
|
||||
return nil, -1, fmt.Errorf("opening %q: %w", s.layerFile, err)
|
||||
}
|
||||
return reader, s.layerSize, nil
|
||||
default:
|
||||
return nil, -1, fmt.Errorf("no blob with digest %q found", info.Digest.String())
|
||||
}
|
||||
}
|
||||
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
// It may use a remote (= slow) service.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
|
||||
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
||||
func (s *sifImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
if instanceDigest != nil {
|
||||
return nil, "", errors.New("manifest lists are not supported by the sif transport")
|
||||
}
|
||||
return s.manifest, imgspecv1.MediaTypeImageManifest, nil
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
|
||||
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
|
||||
// (e.g. if the source never returns manifest lists).
|
||||
func (s *sifImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
||||
if instanceDigest != nil {
|
||||
return nil, errors.New("manifest lists are not supported by the sif transport")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
|
||||
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
|
||||
// to read the image's layers.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for
|
||||
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
|
||||
// (e.g. if the source never returns manifest lists).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (s *sifImageSource) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) {
|
||||
return nil, nil
|
||||
}
|
164
vendor/github.com/containers/image/v5/sif/transport.go
generated
vendored
Normal file
164
vendor/github.com/containers/image/v5/sif/transport.go
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
package sif
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/directory/explicitfilepath"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/image"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for SIF images.
|
||||
var Transport = sifTransport{}
|
||||
|
||||
type sifTransport struct{}
|
||||
|
||||
func (t sifTransport) Name() string {
|
||||
return "sif"
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
|
||||
func (t sifTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||
return NewReference(reference)
|
||||
}
|
||||
|
||||
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
|
||||
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
|
||||
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
|
||||
// scope passed to this function will not be "", that value is always allowed.
|
||||
func (t sifTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
if !strings.HasPrefix(scope, "/") {
|
||||
return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
|
||||
}
|
||||
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
||||
// and "" could be unexpectedly shadowed by the "/" entry.
|
||||
if scope == "/" {
|
||||
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
||||
}
|
||||
cleaned := filepath.Clean(scope)
|
||||
if cleaned != scope {
|
||||
return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sifReference is an ImageReference for SIF images.
|
||||
type sifReference struct {
|
||||
// Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time!
|
||||
// Either of the paths may point to a different, or no, inode over time. resolvedFile may contain symbolic links, and so on.
|
||||
|
||||
// Generally we follow the intent of the user, and use the "file" member for filesystem operations (e.g. the user can use a relative path to avoid
|
||||
// being exposed to symlinks and renames in the parent directories to the working directory).
|
||||
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
|
||||
file string // As specified by the user. May be relative, contain symlinks, etc.
|
||||
resolvedFile string // Absolute file path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
|
||||
}
|
||||
|
||||
// There is no sif.ParseReference because it is rather pointless.
|
||||
// Callers who need a transport-independent interface will go through
|
||||
// sifTransport.ParseReference; callers who intentionally deal with SIF files
|
||||
// can use sif.NewReference.
|
||||
|
||||
// NewReference returns an image file reference for a specified path.
|
||||
func NewReference(file string) (types.ImageReference, error) {
|
||||
// We do not expose an API supplying the resolvedFile; we could, but recomputing it
|
||||
// is generally cheap enough that we prefer being confident about the properties of resolvedFile.
|
||||
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sifReference{file: file, resolvedFile: resolved}, nil
|
||||
}
|
||||
|
||||
func (ref sifReference) Transport() types.ImageTransport {
|
||||
return Transport
|
||||
}
|
||||
|
||||
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
|
||||
// instead, see transports.ImageName().
|
||||
func (ref sifReference) StringWithinTransport() string {
|
||||
return ref.file
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
|
||||
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
|
||||
func (ref sifReference) DockerReference() reference.Named {
|
||||
return nil
|
||||
}
|
||||
|
||||
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
|
||||
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
|
||||
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
|
||||
// (i.e. various references with exactly the same semantics should return the same configuration identity)
|
||||
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
|
||||
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
|
||||
// Returns "" if configuration identities for these references are not supported.
|
||||
func (ref sifReference) PolicyConfigurationIdentity() string {
|
||||
return ref.resolvedFile
|
||||
}
|
||||
|
||||
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
|
||||
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
|
||||
// in order, terminating on first match, and an implicit "" is always checked at the end.
|
||||
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
|
||||
// and each following element to be a prefix of the element preceding it.
|
||||
func (ref sifReference) PolicyConfigurationNamespaces() []string {
|
||||
res := []string{}
|
||||
path := ref.resolvedFile
|
||||
for {
|
||||
lastSlash := strings.LastIndex(path, "/")
|
||||
if lastSlash == -1 || lastSlash == 0 {
|
||||
break
|
||||
}
|
||||
path = path[:lastSlash]
|
||||
res = append(res, path)
|
||||
}
|
||||
// Note that we do not include "/"; it is redundant with the default "" global default,
|
||||
// and rejected by sifTransport.ValidatePolicyConfigurationScope above.
|
||||
return res
|
||||
}
|
||||
|
||||
// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport.
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||
// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details.
|
||||
func (ref sifReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) {
|
||||
src, err := newImageSource(ctx, sys, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return image.FromSource(ctx, sys, src)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref sifReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, sys, ref)
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
// The caller must call .Close() on the returned ImageDestination.
|
||||
func (ref sifReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
|
||||
return nil, errors.New(`"sif:" locations can only be read from, not written to`)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref sifReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
|
||||
return errors.New("Deleting images not implemented for sif: images")
|
||||
}
|
30
vendor/github.com/containers/image/v5/signature/docker.go
generated
vendored
30
vendor/github.com/containers/image/v5/signature/docker.go
generated
vendored
@@ -3,22 +3,46 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// SignOptions includes optional parameters for signing container images.
|
||||
type SignOptions struct {
|
||||
// Passphare to use when signing with the key identity.
|
||||
Passphrase string
|
||||
}
|
||||
|
||||
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
|
||||
// using mech and keyIdentity.
|
||||
func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
|
||||
// using mech and keyIdentity, and the specified options.
|
||||
func SignDockerManifestWithOptions(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string, options *SignOptions) ([]byte, error) {
|
||||
manifestDigest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig := newUntrustedSignature(manifestDigest, dockerReference)
|
||||
return sig.sign(mech, keyIdentity)
|
||||
|
||||
var passphrase string
|
||||
if options != nil {
|
||||
passphrase = options.Passphrase
|
||||
// The gpgme implementation can’t use passphrase with \n; reject it here for consistent behavior.
|
||||
if strings.Contains(passphrase, "\n") {
|
||||
return nil, errors.New("invalid passphrase: must not contain a line break")
|
||||
}
|
||||
}
|
||||
|
||||
return sig.sign(mech, keyIdentity, passphrase)
|
||||
}
|
||||
|
||||
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
|
||||
// using mech and keyIdentity.
|
||||
func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
|
||||
return SignDockerManifestWithOptions(m, dockerReference, mech, keyIdentity, nil)
|
||||
}
|
||||
|
||||
// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
|
||||
|
11
vendor/github.com/containers/image/v5/signature/mechanism.go
generated
vendored
11
vendor/github.com/containers/image/v5/signature/mechanism.go
generated
vendored
@@ -18,8 +18,6 @@ import (
|
||||
|
||||
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
|
||||
// Each mechanism should eventually be closed by calling Close().
|
||||
// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
|
||||
// eliminate ambiguities, support CA signatures and perhaps other key properties)
|
||||
type SigningMechanism interface {
|
||||
// Close removes resources associated with the mechanism, if any.
|
||||
Close() error
|
||||
@@ -38,6 +36,15 @@ type SigningMechanism interface {
|
||||
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
|
||||
}
|
||||
|
||||
// signingMechanismWithPassphrase is an internal extension of SigningMechanism.
|
||||
type signingMechanismWithPassphrase interface {
|
||||
SigningMechanism
|
||||
|
||||
// Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
|
||||
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||
SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error)
|
||||
}
|
||||
|
||||
// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that.
|
||||
type SigningNotSupportedError string
|
||||
|
||||
|
37
vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
generated
vendored
37
vendor/github.com/containers/image/v5/signature/mechanism_gpgme.go
generated
vendored
@@ -5,11 +5,12 @@ package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/mtrmac/gpgme"
|
||||
"github.com/proglottis/gpgme"
|
||||
)
|
||||
|
||||
// A GPG/OpenPGP signing mechanism, implemented using gpgme.
|
||||
@@ -20,7 +21,7 @@ type gpgmeSigningMechanism struct {
|
||||
|
||||
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
||||
func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
|
||||
ctx, err := newGPGMEContext(optionalDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -35,7 +36,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
|
||||
// recognizes _only_ public keys from the supplied blob, and returns the identities
|
||||
// of these keys.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
|
||||
func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) {
|
||||
dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -117,9 +118,9 @@ func (m *gpgmeSigningMechanism) SupportsSigning() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||
// Sign creates a (non-detached) signature of input using keyIdentity and passphrase.
|
||||
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||
func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||
func (m *gpgmeSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
|
||||
key, err := m.ctx.GetKey(keyIdentity, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -133,12 +134,38 @@ func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if passphrase != "" {
|
||||
// Callback to write the passphrase to the specified file descriptor.
|
||||
callback := func(uidHint string, prevWasBad bool, gpgmeFD *os.File) error {
|
||||
if prevWasBad {
|
||||
return errors.New("bad passphrase")
|
||||
}
|
||||
_, err := gpgmeFD.WriteString(passphrase + "\n")
|
||||
return err
|
||||
}
|
||||
if err := m.ctx.SetCallback(callback); err != nil {
|
||||
return nil, fmt.Errorf("setting gpgme passphrase callback: %w", err)
|
||||
}
|
||||
|
||||
// Loopback mode will use the callback instead of prompting the user.
|
||||
if err := m.ctx.SetPinEntryMode(gpgme.PinEntryLoopback); err != nil {
|
||||
return nil, fmt.Errorf("setting gpgme pinentry mode: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sigBuffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||
func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||
return m.SignWithPassphrase(input, keyIdentity, "")
|
||||
}
|
||||
|
||||
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||
func (m *gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||
signedBuffer := bytes.Buffer{}
|
||||
|
12
vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
generated
vendored
12
vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
generated
vendored
@@ -30,7 +30,7 @@ type openpgpSigningMechanism struct {
|
||||
|
||||
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
||||
func newGPGSigningMechanismInDirectory(optionalDir string) (signingMechanismWithPassphrase, error) {
|
||||
m := &openpgpSigningMechanism{
|
||||
keyring: openpgp.EntityList{},
|
||||
}
|
||||
@@ -61,7 +61,7 @@ func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, er
|
||||
// recognizes _only_ public keys from the supplied blob, and returns the identities
|
||||
// of these keys.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
|
||||
func newEphemeralGPGSigningMechanism(blob []byte) (signingMechanismWithPassphrase, []string, error) {
|
||||
m := &openpgpSigningMechanism{
|
||||
keyring: openpgp.EntityList{},
|
||||
}
|
||||
@@ -110,10 +110,16 @@ func (m *openpgpSigningMechanism) SupportsSigning() error {
|
||||
|
||||
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||
func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||
func (m *openpgpSigningMechanism) SignWithPassphrase(input []byte, keyIdentity string, passphrase string) ([]byte, error) {
|
||||
return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
|
||||
}
|
||||
|
||||
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||
func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||
return m.SignWithPassphrase(input, keyIdentity, "")
|
||||
}
|
||||
|
||||
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||
func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||
md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil)
|
||||
|
10
vendor/github.com/containers/image/v5/signature/signature.go
generated
vendored
10
vendor/github.com/containers/image/v5/signature/signature.go
generated
vendored
@@ -190,12 +190,20 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
||||
// of the system just because it is a private key — actually the presence of a private key
|
||||
// on the system increases the likelihood of an a successful attack on that private key
|
||||
// on that particular system.)
|
||||
func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
|
||||
func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string, passphrase string) ([]byte, error) {
|
||||
json, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if newMech, ok := mech.(signingMechanismWithPassphrase); ok {
|
||||
return newMech.SignWithPassphrase(json, keyIdentity, passphrase)
|
||||
}
|
||||
|
||||
if passphrase != "" {
|
||||
return nil, errors.New("signing mechanism does not support passphrases")
|
||||
}
|
||||
|
||||
return mech.Sign(json, keyIdentity)
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
generated
vendored
2
vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go
generated
vendored
@@ -12,7 +12,9 @@ import (
|
||||
_ "github.com/containers/image/v5/oci/archive"
|
||||
_ "github.com/containers/image/v5/oci/layout"
|
||||
_ "github.com/containers/image/v5/openshift"
|
||||
_ "github.com/containers/image/v5/sif"
|
||||
_ "github.com/containers/image/v5/tarball"
|
||||
|
||||
// The ostree transport is registered by ostree*.go
|
||||
// The storage transport is registered by storage*.go
|
||||
"github.com/containers/image/v5/transports"
|
||||
|
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@@ -6,7 +6,7 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 18
|
||||
VersionMinor = 19
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
|
3
vendor/github.com/mtrmac/gpgme/go.mod
generated
vendored
3
vendor/github.com/mtrmac/gpgme/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module github.com/mtrmac/gpgme
|
||||
|
||||
go 1.11
|
0
vendor/github.com/mtrmac/gpgme/LICENSE → vendor/github.com/proglottis/gpgme/LICENSE
generated
vendored
0
vendor/github.com/mtrmac/gpgme/LICENSE → vendor/github.com/proglottis/gpgme/LICENSE
generated
vendored
0
vendor/github.com/mtrmac/gpgme/data.go → vendor/github.com/proglottis/gpgme/data.go
generated
vendored
0
vendor/github.com/mtrmac/gpgme/data.go → vendor/github.com/proglottis/gpgme/data.go
generated
vendored
3
vendor/github.com/proglottis/gpgme/go.mod
generated
vendored
Normal file
3
vendor/github.com/proglottis/gpgme/go.mod
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
module github.com/proglottis/gpgme
|
||||
|
||||
go 1.11
|
@@ -6,11 +6,6 @@
|
||||
|
||||
#include <gpgme.h>
|
||||
|
||||
/* GPGME_VERSION_NUMBER was introduced in 1.4.0 */
|
||||
#if !defined(GPGME_VERSION_NUMBER) || GPGME_VERSION_NUMBER < 0x010402
|
||||
typedef off_t gpgme_off_t; /* Introduced in 1.4.2 */
|
||||
#endif
|
||||
|
||||
extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size);
|
||||
extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size);
|
||||
extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence);
|
36
vendor/github.com/mtrmac/gpgme/gpgme.go → vendor/github.com/proglottis/gpgme/gpgme.go
generated
vendored
36
vendor/github.com/mtrmac/gpgme/gpgme.go → vendor/github.com/proglottis/gpgme/gpgme.go
generated
vendored
@@ -53,13 +53,13 @@ const (
|
||||
|
||||
type PinEntryMode int
|
||||
|
||||
// const ( // Unavailable in 1.3.2
|
||||
// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT
|
||||
// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK
|
||||
// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL
|
||||
// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR
|
||||
// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK
|
||||
// )
|
||||
const (
|
||||
PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT
|
||||
PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK
|
||||
PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL
|
||||
PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR
|
||||
PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK
|
||||
)
|
||||
|
||||
type EncryptFlag uint
|
||||
|
||||
@@ -348,19 +348,17 @@ func (c *Context) KeyListMode() KeyListMode {
|
||||
return res
|
||||
}
|
||||
|
||||
// Unavailable in 1.3.2:
|
||||
// func (c *Context) SetPinEntryMode(m PinEntryMode) error {
|
||||
// err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
|
||||
// runtime.KeepAlive(c)
|
||||
// return err
|
||||
// }
|
||||
func (c *Context) SetPinEntryMode(m PinEntryMode) error {
|
||||
err := handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m)))
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
}
|
||||
|
||||
// Unavailable in 1.3.2:
|
||||
// func (c *Context) PinEntryMode() PinEntryMode {
|
||||
// res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
|
||||
// runtime.KeepAlive(c)
|
||||
// return res
|
||||
// }
|
||||
func (c *Context) PinEntryMode() PinEntryMode {
|
||||
res := PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx))
|
||||
runtime.KeepAlive(c)
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *Context) SetCallback(callback Callback) error {
|
||||
var err error
|
29
vendor/github.com/sylabs/sif/v2/LICENSE.md
generated
vendored
Normal file
29
vendor/github.com/sylabs/sif/v2/LICENSE.md
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
# LICENSE
|
||||
|
||||
Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its
|
||||
contributors may be used to endorse or promote products derived from this
|
||||
software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
69
vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go
generated
vendored
Normal file
69
vendor/github.com/sylabs/sif/v2/pkg/sif/arch.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright (c) 2021, Sylabs Inc. All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
// LICENSE file distributed with the sources of this project regarding your
|
||||
// rights to use or distribute this software.
|
||||
|
||||
package sif
|
||||
|
||||
var (
|
||||
hdrArchUnknown archType = [...]byte{'0', '0', '\x00'}
|
||||
hdrArch386 archType = [...]byte{'0', '1', '\x00'}
|
||||
hdrArchAMD64 archType = [...]byte{'0', '2', '\x00'}
|
||||
hdrArchARM archType = [...]byte{'0', '3', '\x00'}
|
||||
hdrArchARM64 archType = [...]byte{'0', '4', '\x00'}
|
||||
hdrArchPPC64 archType = [...]byte{'0', '5', '\x00'}
|
||||
hdrArchPPC64le archType = [...]byte{'0', '6', '\x00'}
|
||||
hdrArchMIPS archType = [...]byte{'0', '7', '\x00'}
|
||||
hdrArchMIPSle archType = [...]byte{'0', '8', '\x00'}
|
||||
hdrArchMIPS64 archType = [...]byte{'0', '9', '\x00'}
|
||||
hdrArchMIPS64le archType = [...]byte{'1', '0', '\x00'}
|
||||
hdrArchS390x archType = [...]byte{'1', '1', '\x00'}
|
||||
)
|
||||
|
||||
type archType [3]byte
|
||||
|
||||
// getSIFArch returns the archType corresponding to go runtime arch.
|
||||
func getSIFArch(arch string) archType {
|
||||
archMap := map[string]archType{
|
||||
"386": hdrArch386,
|
||||
"amd64": hdrArchAMD64,
|
||||
"arm": hdrArchARM,
|
||||
"arm64": hdrArchARM64,
|
||||
"ppc64": hdrArchPPC64,
|
||||
"ppc64le": hdrArchPPC64le,
|
||||
"mips": hdrArchMIPS,
|
||||
"mipsle": hdrArchMIPSle,
|
||||
"mips64": hdrArchMIPS64,
|
||||
"mips64le": hdrArchMIPS64le,
|
||||
"s390x": hdrArchS390x,
|
||||
}
|
||||
|
||||
t, ok := archMap[arch]
|
||||
if !ok {
|
||||
return hdrArchUnknown
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// GoArch returns the go runtime arch corresponding to t.
|
||||
func (t archType) GoArch() string {
|
||||
archMap := map[archType]string{
|
||||
hdrArch386: "386",
|
||||
hdrArchAMD64: "amd64",
|
||||
hdrArchARM: "arm",
|
||||
hdrArchARM64: "arm64",
|
||||
hdrArchPPC64: "ppc64",
|
||||
hdrArchPPC64le: "ppc64le",
|
||||
hdrArchMIPS: "mips",
|
||||
hdrArchMIPSle: "mipsle",
|
||||
hdrArchMIPS64: "mips64",
|
||||
hdrArchMIPS64le: "mips64le",
|
||||
hdrArchS390x: "s390x",
|
||||
}
|
||||
|
||||
arch, ok := archMap[t]
|
||||
if !ok {
|
||||
arch = "unknown"
|
||||
}
|
||||
return arch
|
||||
}
|
103
vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go
generated
vendored
Normal file
103
vendor/github.com/sylabs/sif/v2/pkg/sif/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
// Copyright (c) 2021, Sylabs Inc. All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
// LICENSE file distributed with the sources of this project regarding your
|
||||
// rights to use or distribute this software.
|
||||
|
||||
package sif
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A Buffer is a variable-sized buffer of bytes that implements the sif.ReadWriter interface. The
|
||||
// zero value for Buffer is an empty buffer ready to use.
|
||||
type Buffer struct {
|
||||
buf []byte
|
||||
pos int64
|
||||
}
|
||||
|
||||
// NewBuffer creates and initializes a new Buffer using buf as its initial contents.
|
||||
func NewBuffer(buf []byte) *Buffer {
|
||||
return &Buffer{buf: buf}
|
||||
}
|
||||
|
||||
var errNegativeOffset = errors.New("negative offset")
|
||||
|
||||
// ReadAt implements the io.ReaderAt interface.
|
||||
func (b *Buffer) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if off < 0 {
|
||||
return 0, errNegativeOffset
|
||||
}
|
||||
|
||||
if off >= int64(len(b.buf)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
n = copy(p, b.buf[off:])
|
||||
if n < len(p) {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
var errNegativePosition = errors.New("negative position")
|
||||
|
||||
// Write implements the io.Writer interface.
|
||||
func (b *Buffer) Write(p []byte) (n int, err error) {
|
||||
if b.pos < 0 {
|
||||
return 0, errNegativePosition
|
||||
}
|
||||
|
||||
if have, need := int64(len(b.buf))-b.pos, int64(len(p)); have < need {
|
||||
b.buf = append(b.buf, make([]byte, need-have)...)
|
||||
}
|
||||
|
||||
n = copy(b.buf[b.pos:], p)
|
||||
b.pos += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
var errInvalidWhence = errors.New("invalid whence")
|
||||
|
||||
// Seek implements the io.Seeker interface.
|
||||
func (b *Buffer) Seek(offset int64, whence int) (int64, error) {
|
||||
var abs int64
|
||||
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
abs = offset
|
||||
case io.SeekCurrent:
|
||||
abs = b.pos + offset
|
||||
case io.SeekEnd:
|
||||
abs = int64(len(b.buf)) + offset
|
||||
default:
|
||||
return 0, errInvalidWhence
|
||||
}
|
||||
|
||||
if abs < 0 {
|
||||
return 0, errNegativePosition
|
||||
}
|
||||
|
||||
b.pos = abs
|
||||
return abs, nil
|
||||
}
|
||||
|
||||
var errTruncateRange = errors.New("truncation out of range")
|
||||
|
||||
// Truncate discards all but the first n bytes from the buffer.
|
||||
func (b *Buffer) Truncate(n int64) error {
|
||||
if n < 0 || n > int64(len(b.buf)) {
|
||||
return errTruncateRange
|
||||
}
|
||||
|
||||
b.buf = b.buf[:n]
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the buffer. The slice is valid for use only until the next buffer
|
||||
// modification (that is, only until the next call to a method like ReadAt, Write, or Truncate).
|
||||
func (b *Buffer) Bytes() []byte { return b.buf }
|
||||
|
||||
// Len returns the number of bytes in the buffer.
|
||||
func (b *Buffer) Len() int64 { return int64(len(b.buf)) }
|
680
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
Normal file
680
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
Normal file
@@ -0,0 +1,680 @@
|
||||
// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
|
||||
// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
|
||||
// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
// LICENSE file distributed with the sources of this project regarding your
|
||||
// rights to use or distribute this software.
|
||||
|
||||
package sif
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// nextAligned finds the next offset that satisfies alignment.
|
||||
func nextAligned(offset int64, alignment int) int64 {
|
||||
align64 := uint64(alignment)
|
||||
offset64 := uint64(offset)
|
||||
|
||||
if align64 != 0 && offset64%align64 != 0 {
|
||||
offset64 = (offset64 & ^(align64 - 1)) + align64
|
||||
}
|
||||
|
||||
return int64(offset64)
|
||||
}
|
||||
|
||||
// writeDataObjectAt writes the data object described by di to ws, using time t, recording details
|
||||
// in d. The object is written at the first position that satisfies the alignment requirements
|
||||
// described by di following offsetUnaligned.
|
||||
func writeDataObjectAt(ws io.WriteSeeker, offsetUnaligned int64, di DescriptorInput, t time.Time, d *rawDescriptor) error { //nolint:lll
|
||||
offset, err := ws.Seek(nextAligned(offsetUnaligned, di.opts.alignment), io.SeekStart)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n, err := io.Copy(ws, di.r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := di.fillDescriptor(t, d); err != nil {
|
||||
return err
|
||||
}
|
||||
d.Used = true
|
||||
d.Offset = offset
|
||||
d.Size = n
|
||||
d.SizeWithPadding = offset - offsetUnaligned + n
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
errInsufficientCapacity = errors.New("insufficient descriptor capacity to add data object(s) to image")
|
||||
errPrimaryPartition = errors.New("image already contains a primary partition")
|
||||
)
|
||||
|
||||
// writeDataObject writes the data object described by di to f, using time t, recording details in
|
||||
// the descriptor at index i.
|
||||
func (f *FileImage) writeDataObject(i int, di DescriptorInput, t time.Time) error {
|
||||
if i >= len(f.rds) {
|
||||
return errInsufficientCapacity
|
||||
}
|
||||
|
||||
// If this is a primary partition, verify there isn't another primary partition, and update the
|
||||
// architecture in the global header.
|
||||
if p, ok := di.opts.extra.(partition); ok && p.Parttype == PartPrimSys {
|
||||
if ds, err := f.GetDescriptors(WithPartitionType(PartPrimSys)); err == nil && len(ds) > 0 {
|
||||
return errPrimaryPartition
|
||||
}
|
||||
|
||||
f.h.Arch = p.Arch
|
||||
}
|
||||
|
||||
d := &f.rds[i]
|
||||
d.ID = uint32(i) + 1
|
||||
|
||||
if err := writeDataObjectAt(f.rw, f.h.DataOffset+f.h.DataSize, di, t, d); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update minimum object ID map.
|
||||
if minID, ok := f.minIDs[d.GroupID]; !ok || d.ID < minID {
|
||||
f.minIDs[d.GroupID] = d.ID
|
||||
}
|
||||
|
||||
f.h.DescriptorsFree--
|
||||
f.h.DataSize += d.SizeWithPadding
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeDescriptors writes the descriptors in f to backing storage.
|
||||
func (f *FileImage) writeDescriptors() error {
|
||||
if _, err := f.rw.Seek(f.h.DescriptorsOffset, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return binary.Write(f.rw, binary.LittleEndian, f.rds)
|
||||
}
|
||||
|
||||
// writeHeader writes the the global header in f to backing storage.
|
||||
func (f *FileImage) writeHeader() error {
|
||||
if _, err := f.rw.Seek(0, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return binary.Write(f.rw, binary.LittleEndian, f.h)
|
||||
}
|
||||
|
||||
// createOpts accumulates container creation options.
|
||||
type createOpts struct {
|
||||
launchScript [hdrLaunchLen]byte
|
||||
id uuid.UUID
|
||||
descriptorsOffset int64
|
||||
descriptorCapacity int64
|
||||
dis []DescriptorInput
|
||||
t time.Time
|
||||
closeOnUnload bool
|
||||
}
|
||||
|
||||
// CreateOpt are used to specify container creation options.
|
||||
type CreateOpt func(*createOpts) error
|
||||
|
||||
var errLaunchScriptLen = errors.New("launch script too large")
|
||||
|
||||
// OptCreateWithLaunchScript specifies s as the launch script.
|
||||
func OptCreateWithLaunchScript(s string) CreateOpt {
|
||||
return func(co *createOpts) error {
|
||||
b := []byte(s)
|
||||
|
||||
if len(b) >= len(co.launchScript) {
|
||||
return errLaunchScriptLen
|
||||
}
|
||||
|
||||
copy(co.launchScript[:], b)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptCreateDeterministic sets header/descriptor fields to values that support deterministic
|
||||
// creation of images.
|
||||
func OptCreateDeterministic() CreateOpt {
|
||||
return func(co *createOpts) error {
|
||||
co.id = uuid.Nil
|
||||
co.t = time.Time{}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptCreateWithID specifies id as the unique ID.
|
||||
func OptCreateWithID(id string) CreateOpt {
|
||||
return func(co *createOpts) error {
|
||||
id, err := uuid.Parse(id)
|
||||
co.id = id
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// OptCreateWithDescriptorCapacity specifies that the created image should have the capacity for a
|
||||
// maximum of n descriptors.
|
||||
func OptCreateWithDescriptorCapacity(n int64) CreateOpt {
|
||||
return func(co *createOpts) error {
|
||||
co.descriptorCapacity = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptCreateWithDescriptors appends dis to the list of descriptors.
|
||||
func OptCreateWithDescriptors(dis ...DescriptorInput) CreateOpt {
|
||||
return func(co *createOpts) error {
|
||||
co.dis = append(co.dis, dis...)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptCreateWithTime specifies t as the image creation time.
|
||||
func OptCreateWithTime(t time.Time) CreateOpt {
|
||||
return func(co *createOpts) error {
|
||||
co.t = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptCreateWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer.
|
||||
// By default, the ReadWriter will be closed if it implements the io.Closer interface.
|
||||
func OptCreateWithCloseOnUnload(b bool) CreateOpt {
|
||||
return func(co *createOpts) error {
|
||||
co.closeOnUnload = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// createContainer creates a new SIF container file in rw, according to opts.
|
||||
func createContainer(rw ReadWriter, co createOpts) (*FileImage, error) {
|
||||
rds := make([]rawDescriptor, co.descriptorCapacity)
|
||||
rdsSize := int64(binary.Size(rds))
|
||||
|
||||
h := header{
|
||||
LaunchScript: co.launchScript,
|
||||
Magic: hdrMagic,
|
||||
Version: CurrentVersion.bytes(),
|
||||
Arch: hdrArchUnknown,
|
||||
ID: co.id,
|
||||
CreatedAt: co.t.Unix(),
|
||||
ModifiedAt: co.t.Unix(),
|
||||
DescriptorsFree: co.descriptorCapacity,
|
||||
DescriptorsTotal: co.descriptorCapacity,
|
||||
DescriptorsOffset: co.descriptorsOffset,
|
||||
DescriptorsSize: rdsSize,
|
||||
DataOffset: co.descriptorsOffset + rdsSize,
|
||||
}
|
||||
|
||||
f := &FileImage{
|
||||
rw: rw,
|
||||
h: h,
|
||||
rds: rds,
|
||||
minIDs: make(map[uint32]uint32),
|
||||
}
|
||||
|
||||
for i, di := range co.dis {
|
||||
if err := f.writeDataObject(i, di, co.t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := f.writeDescriptors(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.writeHeader(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// CreateContainer creates a new SIF container in rw, according to opts. One or more data objects
|
||||
// can optionally be specified using OptCreateWithDescriptors.
|
||||
//
|
||||
// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
|
||||
// are released. By default, UnloadContainer will close rw if it implements the io.Closer
|
||||
// interface. To change this behavior, consider using OptCreateWithCloseOnUnload.
|
||||
//
|
||||
// By default, the image ID is set to a randomly generated value. To override this, consider using
|
||||
// OptCreateDeterministic or OptCreateWithID.
|
||||
//
|
||||
// By default, the image creation time is set to time.Now(). To override this, consider using
|
||||
// OptCreateDeterministic or OptCreateWithTime.
|
||||
//
|
||||
// By default, the image will support a maximum of 48 descriptors. To change this, consider using
|
||||
// OptCreateWithDescriptorCapacity.
|
||||
//
|
||||
// A launch script can optionally be set using OptCreateWithLaunchScript.
|
||||
func CreateContainer(rw ReadWriter, opts ...CreateOpt) (*FileImage, error) {
|
||||
id, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
co := createOpts{
|
||||
id: id,
|
||||
descriptorsOffset: 4096,
|
||||
descriptorCapacity: 48,
|
||||
t: time.Now(),
|
||||
closeOnUnload: true,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&co); err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
f, err := createContainer(rw, co)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f.closeOnUnload = co.closeOnUnload
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// CreateContainerAtPath creates a new SIF container file at path, according to opts. One or more
|
||||
// data objects can optionally be specified using OptCreateWithDescriptors.
|
||||
//
|
||||
// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
|
||||
// are released.
|
||||
//
|
||||
// By default, the image ID is set to a randomly generated value. To override this, consider using
|
||||
// OptCreateDeterministic or OptCreateWithID.
|
||||
//
|
||||
// By default, the image creation time is set to time.Now(). To override this, consider using
|
||||
// OptCreateDeterministic or OptCreateWithTime.
|
||||
//
|
||||
// By default, the image will support a maximum of 48 descriptors. To change this, consider using
|
||||
// OptCreateWithDescriptorCapacity.
|
||||
//
|
||||
// A launch script can optionally be set using OptCreateWithLaunchScript.
|
||||
func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) {
|
||||
fp, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o755)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f, err := CreateContainer(fp, opts...)
|
||||
if err != nil {
|
||||
fp.Close()
|
||||
os.Remove(fp.Name())
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f.closeOnUnload = true
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func zeroData(fimg *FileImage, descr *rawDescriptor) error {
|
||||
// first, move to data object offset
|
||||
if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var zero [4096]byte
|
||||
n := descr.Size
|
||||
upbound := int64(4096)
|
||||
for {
|
||||
if n < 4096 {
|
||||
upbound = n
|
||||
}
|
||||
|
||||
if _, err := fimg.rw.Write(zero[:upbound]); err != nil {
|
||||
return err
|
||||
}
|
||||
n -= 4096
|
||||
if n <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resetDescriptor(fimg *FileImage, index int) error {
|
||||
// If we remove the primary partition, set the global header Arch field to HdrArchUnknown
|
||||
// to indicate that the SIF file doesn't include a primary partition and no dependency
|
||||
// on any architecture exists.
|
||||
if fimg.rds[index].isPartitionOfType(PartPrimSys) {
|
||||
fimg.h.Arch = hdrArchUnknown
|
||||
}
|
||||
|
||||
offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0]))
|
||||
|
||||
// first, move to descriptor offset
|
||||
if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var emptyDesc rawDescriptor
|
||||
return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc)
|
||||
}
|
||||
|
||||
// addOpts accumulates object add options.
|
||||
type addOpts struct {
|
||||
t time.Time
|
||||
}
|
||||
|
||||
// AddOpt are used to specify object add options.
|
||||
type AddOpt func(*addOpts) error
|
||||
|
||||
// OptAddDeterministic sets header/descriptor fields to values that support deterministic
|
||||
// modification of images.
|
||||
func OptAddDeterministic() AddOpt {
|
||||
return func(ao *addOpts) error {
|
||||
ao.t = time.Time{}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptAddWithTime specifies t as the image modification time.
|
||||
func OptAddWithTime(t time.Time) AddOpt {
|
||||
return func(ao *addOpts) error {
|
||||
ao.t = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AddObject adds a new data object and its descriptor into the specified SIF file.
|
||||
//
|
||||
// By default, the image modification time is set to the current time. To override this, consider
|
||||
// using OptAddDeterministic or OptAddWithTime.
|
||||
func (f *FileImage) AddObject(di DescriptorInput, opts ...AddOpt) error {
|
||||
ao := addOpts{
|
||||
t: time.Now(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&ao); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Find an unused descriptor.
|
||||
i := 0
|
||||
for _, rd := range f.rds {
|
||||
if !rd.Used {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
if err := f.writeDataObject(i, di, ao.t); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
if err := f.writeDescriptors(); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f.h.ModifiedAt = ao.t.Unix()
|
||||
|
||||
if err := f.writeHeader(); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isLast return true if the data object associated with d is the last in f.
|
||||
func (f *FileImage) isLast(d *rawDescriptor) bool {
|
||||
isLast := true
|
||||
|
||||
end := d.Offset + d.Size
|
||||
f.WithDescriptors(func(d Descriptor) bool {
|
||||
isLast = d.Offset()+d.Size() <= end
|
||||
return !isLast
|
||||
})
|
||||
|
||||
return isLast
|
||||
}
|
||||
|
||||
// truncateAt truncates f at the start of the padded data object described by d.
|
||||
func (f *FileImage) truncateAt(d *rawDescriptor) error {
|
||||
start := d.Offset + d.Size - d.SizeWithPadding
|
||||
|
||||
if err := f.rw.Truncate(start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteOpts accumulates object deletion options.
|
||||
type deleteOpts struct {
|
||||
zero bool
|
||||
compact bool
|
||||
t time.Time
|
||||
}
|
||||
|
||||
// DeleteOpt are used to specify object deletion options.
|
||||
type DeleteOpt func(*deleteOpts) error
|
||||
|
||||
// OptDeleteZero specifies whether the deleted object should be zeroed.
|
||||
func OptDeleteZero(b bool) DeleteOpt {
|
||||
return func(do *deleteOpts) error {
|
||||
do.zero = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptDeleteCompact specifies whether the image should be compacted following object deletion.
|
||||
func OptDeleteCompact(b bool) DeleteOpt {
|
||||
return func(do *deleteOpts) error {
|
||||
do.compact = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptDeleteDeterministic sets header/descriptor fields to values that support deterministic
|
||||
// modification of images.
|
||||
func OptDeleteDeterministic() DeleteOpt {
|
||||
return func(do *deleteOpts) error {
|
||||
do.t = time.Time{}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptDeleteWithTime specifies t as the image modification time.
|
||||
func OptDeleteWithTime(t time.Time) DeleteOpt {
|
||||
return func(do *deleteOpts) error {
|
||||
do.t = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var errCompactNotImplemented = errors.New("compact not implemented for non-last object")
|
||||
|
||||
// DeleteObject deletes the data object with id, according to opts.
|
||||
//
|
||||
// To zero the data region of the deleted object, use OptDeleteZero. To compact the file following
|
||||
// object deletion, use OptDeleteCompact.
|
||||
//
|
||||
// By default, the image modification time is set to time.Now(). To override this, consider using
|
||||
// OptDeleteDeterministic or OptDeleteWithTime.
|
||||
func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error {
|
||||
do := deleteOpts{
|
||||
t: time.Now(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&do); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
d, err := f.getDescriptor(WithID(id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
if do.compact && !f.isLast(d) {
|
||||
return fmt.Errorf("%w", errCompactNotImplemented)
|
||||
}
|
||||
|
||||
if do.zero {
|
||||
if err := zeroData(f, d); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if do.compact {
|
||||
if err := f.truncateAt(d); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f.h.DataSize -= d.SizeWithPadding
|
||||
}
|
||||
|
||||
f.h.DescriptorsFree++
|
||||
f.h.ModifiedAt = do.t.Unix()
|
||||
|
||||
index := 0
|
||||
for i, od := range f.rds {
|
||||
if od.ID == id {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := resetDescriptor(f, index); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
if err := f.writeHeader(); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setOpts accumulates object set options.
|
||||
type setOpts struct {
|
||||
t time.Time
|
||||
}
|
||||
|
||||
// SetOpt are used to specify object set options.
|
||||
type SetOpt func(*setOpts) error
|
||||
|
||||
// OptSetDeterministic sets header/descriptor fields to values that support deterministic
|
||||
// modification of images.
|
||||
func OptSetDeterministic() SetOpt {
|
||||
return func(so *setOpts) error {
|
||||
so.t = time.Time{}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptSetWithTime specifies t as the image/object modification time.
|
||||
func OptSetWithTime(t time.Time) SetOpt {
|
||||
return func(so *setOpts) error {
|
||||
so.t = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
errNotPartition = errors.New("data object not a partition")
|
||||
errNotSystem = errors.New("data object not a system partition")
|
||||
)
|
||||
|
||||
// SetPrimPart sets the specified system partition to be the primary one.
|
||||
//
|
||||
// By default, the image/object modification times are set to time.Now(). To override this,
|
||||
// consider using OptSetDeterministic or OptSetWithTime.
|
||||
func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error {
|
||||
so := setOpts{
|
||||
t: time.Now(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&so); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
descr, err := f.getDescriptor(WithID(id))
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
if descr.DataType != DataPartition {
|
||||
return fmt.Errorf("%w", errNotPartition)
|
||||
}
|
||||
|
||||
fs, pt, arch, err := descr.getPartitionMetadata()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
// if already primary system partition, nothing to do
|
||||
if pt == PartPrimSys {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pt != PartSystem {
|
||||
return fmt.Errorf("%w", errNotSystem)
|
||||
}
|
||||
|
||||
olddescr, err := f.getDescriptor(WithPartitionType(PartPrimSys))
|
||||
if err != nil && !errors.Is(err, ErrObjectNotFound) {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f.h.Arch = getSIFArch(arch)
|
||||
|
||||
extra := partition{
|
||||
Fstype: fs,
|
||||
Parttype: PartPrimSys,
|
||||
}
|
||||
copy(extra.Arch[:], arch)
|
||||
|
||||
if err := descr.setExtra(extra); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
if olddescr != nil {
|
||||
oldfs, _, oldarch, err := olddescr.getPartitionMetadata()
|
||||
if err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
oldextra := partition{
|
||||
Fstype: oldfs,
|
||||
Parttype: PartSystem,
|
||||
Arch: getSIFArch(oldarch),
|
||||
}
|
||||
|
||||
if err := olddescr.setExtra(oldextra); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := f.writeDescriptors(); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f.h.ModifiedAt = so.t.Unix()
|
||||
|
||||
if err := f.writeHeader(); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
267
vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
generated
vendored
Normal file
267
vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go
generated
vendored
Normal file
@@ -0,0 +1,267 @@
|
||||
// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
|
||||
// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
|
||||
// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
// LICENSE file distributed with the sources of this project regarding your
|
||||
// rights to use or distribute this software.
|
||||
|
||||
package sif
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// rawDescriptor represents an on-disk object descriptor.
|
||||
type rawDescriptor struct {
|
||||
DataType DataType
|
||||
Used bool
|
||||
ID uint32
|
||||
GroupID uint32
|
||||
LinkedID uint32
|
||||
Offset int64
|
||||
Size int64
|
||||
SizeWithPadding int64
|
||||
|
||||
CreatedAt int64
|
||||
ModifiedAt int64
|
||||
UID int64 // Deprecated: UID exists for historical compatibility and should not be used.
|
||||
GID int64 // Deprecated: GID exists for historical compatibility and should not be used.
|
||||
Name [descrNameLen]byte
|
||||
Extra [descrMaxPrivLen]byte
|
||||
}
|
||||
|
||||
// partition represents the SIF partition data object descriptor.
|
||||
type partition struct {
|
||||
Fstype FSType
|
||||
Parttype PartType
|
||||
Arch archType
|
||||
}
|
||||
|
||||
// signature represents the SIF signature data object descriptor.
|
||||
type signature struct {
|
||||
Hashtype hashType
|
||||
Entity [descrEntityLen]byte
|
||||
}
|
||||
|
||||
// cryptoMessage represents the SIF crypto message object descriptor.
|
||||
type cryptoMessage struct {
|
||||
Formattype FormatType
|
||||
Messagetype MessageType
|
||||
}
|
||||
|
||||
var errNameTooLarge = errors.New("name value too large")
|
||||
|
||||
// setName encodes name into the name field of d.
|
||||
func (d *rawDescriptor) setName(name string) error {
|
||||
if len(name) > len(d.Name) {
|
||||
return errNameTooLarge
|
||||
}
|
||||
|
||||
for i := copy(d.Name[:], name); i < len(d.Name); i++ {
|
||||
d.Name[i] = 0
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var errExtraTooLarge = errors.New("extra value too large")
|
||||
|
||||
// setExtra encodes v into the extra field of d.
|
||||
func (d *rawDescriptor) setExtra(v interface{}) error {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if binary.Size(v) > len(d.Extra) {
|
||||
return errExtraTooLarge
|
||||
}
|
||||
|
||||
b := new(bytes.Buffer)
|
||||
if err := binary.Write(b, binary.LittleEndian, v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := copy(d.Extra[:], b.Bytes()); i < len(d.Extra); i++ {
|
||||
d.Extra[i] = 0
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getPartitionMetadata gets metadata for a partition data object.
|
||||
func (d rawDescriptor) getPartitionMetadata() (fs FSType, pt PartType, arch string, err error) {
|
||||
if got, want := d.DataType, DataPartition; got != want {
|
||||
return 0, 0, "", &unexpectedDataTypeError{got, []DataType{want}}
|
||||
}
|
||||
|
||||
var p partition
|
||||
|
||||
b := bytes.NewReader(d.Extra[:])
|
||||
if err := binary.Read(b, binary.LittleEndian, &p); err != nil {
|
||||
return 0, 0, "", fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
return p.Fstype, p.Parttype, p.Arch.GoArch(), nil
|
||||
}
|
||||
|
||||
// isPartitionOfType returns true if d is a partition data object of type pt.
|
||||
func (d rawDescriptor) isPartitionOfType(pt PartType) bool {
|
||||
_, t, _, err := d.getPartitionMetadata()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return t == pt
|
||||
}
|
||||
|
||||
// Descriptor represents the SIF descriptor type.
|
||||
type Descriptor struct {
|
||||
r io.ReaderAt // Backing storage.
|
||||
|
||||
raw rawDescriptor // Raw descriptor from image.
|
||||
|
||||
relativeID uint32 // ID relative to minimum ID of object group.
|
||||
}
|
||||
|
||||
// DataType returns the type of data object.
|
||||
func (d Descriptor) DataType() DataType { return d.raw.DataType }
|
||||
|
||||
// ID returns the data object ID of d.
|
||||
func (d Descriptor) ID() uint32 { return d.raw.ID }
|
||||
|
||||
// GroupID returns the data object group ID of d, or zero if d is not part of a data object
|
||||
// group.
|
||||
func (d Descriptor) GroupID() uint32 { return d.raw.GroupID &^ descrGroupMask }
|
||||
|
||||
// LinkedID returns the object/group ID d is linked to, or zero if d does not contain a linked
|
||||
// ID. If isGroup is true, the returned id is an object group ID. Otherwise, the returned id is a
|
||||
// data object ID.
|
||||
func (d Descriptor) LinkedID() (id uint32, isGroup bool) {
|
||||
return d.raw.LinkedID &^ descrGroupMask, d.raw.LinkedID&descrGroupMask == descrGroupMask
|
||||
}
|
||||
|
||||
// Offset returns the offset of the data object.
|
||||
func (d Descriptor) Offset() int64 { return d.raw.Offset }
|
||||
|
||||
// Size returns the data object size.
|
||||
func (d Descriptor) Size() int64 { return d.raw.Size }
|
||||
|
||||
// CreatedAt returns the creation time of the data object.
|
||||
func (d Descriptor) CreatedAt() time.Time { return time.Unix(d.raw.CreatedAt, 0) }
|
||||
|
||||
// ModifiedAt returns the modification time of the data object.
|
||||
func (d Descriptor) ModifiedAt() time.Time { return time.Unix(d.raw.ModifiedAt, 0) }
|
||||
|
||||
// Name returns the name of the data object.
|
||||
func (d Descriptor) Name() string { return strings.TrimRight(string(d.raw.Name[:]), "\000") }
|
||||
|
||||
// PartitionMetadata gets metadata for a partition data object.
|
||||
func (d Descriptor) PartitionMetadata() (fs FSType, pt PartType, arch string, err error) {
|
||||
return d.raw.getPartitionMetadata()
|
||||
}
|
||||
|
||||
var errHashUnsupported = errors.New("hash algorithm unsupported")
|
||||
|
||||
// getHashType converts ht into a crypto.Hash.
|
||||
func getHashType(ht hashType) (crypto.Hash, error) {
|
||||
switch ht {
|
||||
case hashSHA256:
|
||||
return crypto.SHA256, nil
|
||||
case hashSHA384:
|
||||
return crypto.SHA384, nil
|
||||
case hashSHA512:
|
||||
return crypto.SHA512, nil
|
||||
case hashBLAKE2S:
|
||||
return crypto.BLAKE2s_256, nil
|
||||
case hashBLAKE2B:
|
||||
return crypto.BLAKE2b_256, nil
|
||||
}
|
||||
return 0, errHashUnsupported
|
||||
}
|
||||
|
||||
// SignatureMetadata gets metadata for a signature data object.
|
||||
func (d Descriptor) SignatureMetadata() (ht crypto.Hash, fp []byte, err error) {
|
||||
if got, want := d.raw.DataType, DataSignature; got != want {
|
||||
return ht, fp, &unexpectedDataTypeError{got, []DataType{want}}
|
||||
}
|
||||
|
||||
var s signature
|
||||
|
||||
b := bytes.NewReader(d.raw.Extra[:])
|
||||
if err := binary.Read(b, binary.LittleEndian, &s); err != nil {
|
||||
return ht, fp, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
if ht, err = getHashType(s.Hashtype); err != nil {
|
||||
return ht, fp, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
fp = make([]byte, 20)
|
||||
copy(fp, s.Entity[:])
|
||||
|
||||
return ht, fp, nil
|
||||
}
|
||||
|
||||
// CryptoMessageMetadata gets metadata for a crypto message data object.
|
||||
func (d Descriptor) CryptoMessageMetadata() (FormatType, MessageType, error) {
|
||||
if got, want := d.raw.DataType, DataCryptoMessage; got != want {
|
||||
return 0, 0, &unexpectedDataTypeError{got, []DataType{want}}
|
||||
}
|
||||
|
||||
var m cryptoMessage
|
||||
|
||||
b := bytes.NewReader(d.raw.Extra[:])
|
||||
if err := binary.Read(b, binary.LittleEndian, &m); err != nil {
|
||||
return 0, 0, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
return m.Formattype, m.Messagetype, nil
|
||||
}
|
||||
|
||||
// GetData returns the data object associated with descriptor d.
|
||||
func (d Descriptor) GetData() ([]byte, error) {
|
||||
b := make([]byte, d.raw.Size)
|
||||
if _, err := io.ReadFull(d.GetReader(), b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// GetReader returns a io.Reader that reads the data object associated with descriptor d.
|
||||
func (d Descriptor) GetReader() io.Reader {
|
||||
return io.NewSectionReader(d.r, d.raw.Offset, d.raw.Size)
|
||||
}
|
||||
|
||||
// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from d.
|
||||
func (d Descriptor) GetIntegrityReader() io.Reader {
|
||||
fields := []interface{}{
|
||||
d.raw.DataType,
|
||||
d.raw.Used,
|
||||
d.relativeID,
|
||||
d.raw.LinkedID,
|
||||
d.raw.Size,
|
||||
d.raw.CreatedAt,
|
||||
d.raw.UID,
|
||||
d.raw.GID,
|
||||
}
|
||||
|
||||
// Encode endian-sensitive fields.
|
||||
data := bytes.Buffer{}
|
||||
for _, f := range fields {
|
||||
if err := binary.Write(&data, binary.LittleEndian, f); err != nil {
|
||||
panic(err) // (*bytes.Buffer).Write() is documented as always returning a nil error.
|
||||
}
|
||||
}
|
||||
|
||||
return io.MultiReader(
|
||||
&data,
|
||||
bytes.NewReader(d.raw.Name[:]),
|
||||
bytes.NewReader(d.raw.Extra[:]),
|
||||
)
|
||||
}
|
300
vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go
generated
vendored
Normal file
300
vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go
generated
vendored
Normal file
@@ -0,0 +1,300 @@
|
||||
// Copyright (c) 2021, Sylabs Inc. All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
// LICENSE file distributed with the sources of this project regarding your
|
||||
// rights to use or distribute this software.
|
||||
|
||||
package sif
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// descriptorOpts accumulates data object options.
|
||||
type descriptorOpts struct {
|
||||
groupID uint32
|
||||
linkID uint32
|
||||
alignment int
|
||||
name string
|
||||
extra interface{}
|
||||
t time.Time
|
||||
}
|
||||
|
||||
// DescriptorInputOpt are used to specify data object options.
|
||||
type DescriptorInputOpt func(DataType, *descriptorOpts) error
|
||||
|
||||
// OptNoGroup specifies the data object is not contained within a data object group.
|
||||
func OptNoGroup() DescriptorInputOpt {
|
||||
return func(_ DataType, opts *descriptorOpts) error {
|
||||
opts.groupID = 0
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptGroupID specifies groupID as data object group ID.
|
||||
func OptGroupID(groupID uint32) DescriptorInputOpt {
|
||||
return func(_ DataType, opts *descriptorOpts) error {
|
||||
if groupID == 0 {
|
||||
return ErrInvalidGroupID
|
||||
}
|
||||
opts.groupID = groupID
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptLinkedID specifies that the data object is linked to the data object with the specified ID.
|
||||
func OptLinkedID(id uint32) DescriptorInputOpt {
|
||||
return func(_ DataType, opts *descriptorOpts) error {
|
||||
if id == 0 {
|
||||
return ErrInvalidObjectID
|
||||
}
|
||||
opts.linkID = id
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptLinkedGroupID specifies that the data object is linked to the data object group with the
|
||||
// specified groupID.
|
||||
func OptLinkedGroupID(groupID uint32) DescriptorInputOpt {
|
||||
return func(_ DataType, opts *descriptorOpts) error {
|
||||
if groupID == 0 {
|
||||
return ErrInvalidGroupID
|
||||
}
|
||||
opts.linkID = groupID | descrGroupMask
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptObjectAlignment specifies n as the data alignment requirement.
|
||||
func OptObjectAlignment(n int) DescriptorInputOpt {
|
||||
return func(_ DataType, opts *descriptorOpts) error {
|
||||
opts.alignment = n
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptObjectName specifies name as the data object name.
|
||||
func OptObjectName(name string) DescriptorInputOpt {
|
||||
return func(_ DataType, opts *descriptorOpts) error {
|
||||
opts.name = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptObjectTime specifies t as the data object creation time.
|
||||
func OptObjectTime(t time.Time) DescriptorInputOpt {
|
||||
return func(_ DataType, opts *descriptorOpts) error {
|
||||
opts.t = t
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type unexpectedDataTypeError struct {
|
||||
got DataType
|
||||
want []DataType
|
||||
}
|
||||
|
||||
func (e *unexpectedDataTypeError) Error() string {
|
||||
return fmt.Sprintf("unexpected data type %v, expected one of: %v", e.got, e.want)
|
||||
}
|
||||
|
||||
func (e *unexpectedDataTypeError) Is(target error) bool {
|
||||
//nolint:errorlint // don't compare wrapped errors in Is()
|
||||
t, ok := target.(*unexpectedDataTypeError)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(t.want) > 0 {
|
||||
// Use a map to check that the "want" errors in e and t contain the same values, ignoring
|
||||
// any ordering differences.
|
||||
acc := make(map[DataType]int, len(t.want))
|
||||
|
||||
// Increment counter for each data type in e.
|
||||
for _, dt := range e.want {
|
||||
if _, ok := acc[dt]; !ok {
|
||||
acc[dt] = 0
|
||||
}
|
||||
acc[dt]++
|
||||
}
|
||||
|
||||
// Decrement counter for each data type in e.
|
||||
for _, dt := range t.want {
|
||||
if _, ok := acc[dt]; !ok {
|
||||
return false
|
||||
}
|
||||
acc[dt]--
|
||||
}
|
||||
|
||||
// If the "want" errors in e and t are equivalent, all counters should be zero.
|
||||
for _, n := range acc {
|
||||
if n != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (e.got == t.got || t.got == 0)
|
||||
}
|
||||
|
||||
// OptCryptoMessageMetadata sets metadata for a crypto message data object. The format type is set
|
||||
// to ft, and the message type is set to mt.
|
||||
//
|
||||
// If this option is applied to a data object with an incompatible type, an error is returned.
|
||||
func OptCryptoMessageMetadata(ft FormatType, mt MessageType) DescriptorInputOpt {
|
||||
return func(t DataType, opts *descriptorOpts) error {
|
||||
if got, want := t, DataCryptoMessage; got != want {
|
||||
return &unexpectedDataTypeError{got, []DataType{want}}
|
||||
}
|
||||
|
||||
m := cryptoMessage{
|
||||
Formattype: ft,
|
||||
Messagetype: mt,
|
||||
}
|
||||
|
||||
opts.extra = m
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var errUnknownArchitcture = errors.New("unknown architecture")
|
||||
|
||||
// OptPartitionMetadata sets metadata for a partition data object. The filesystem type is set to
|
||||
// fs, the partition type is set to pt, and the CPU architecture is set to arch. The value of arch
|
||||
// should be the architecture as represented by the Go runtime.
|
||||
//
|
||||
// If this option is applied to a data object with an incompatible type, an error is returned.
|
||||
func OptPartitionMetadata(fs FSType, pt PartType, arch string) DescriptorInputOpt {
|
||||
return func(t DataType, opts *descriptorOpts) error {
|
||||
if got, want := t, DataPartition; got != want {
|
||||
return &unexpectedDataTypeError{got, []DataType{want}}
|
||||
}
|
||||
|
||||
sifarch := getSIFArch(arch)
|
||||
if sifarch == hdrArchUnknown {
|
||||
return fmt.Errorf("%w: %v", errUnknownArchitcture, arch)
|
||||
}
|
||||
|
||||
p := partition{
|
||||
Fstype: fs,
|
||||
Parttype: pt,
|
||||
Arch: sifarch,
|
||||
}
|
||||
|
||||
opts.extra = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// sifHashType converts h into a HashType.
|
||||
func sifHashType(h crypto.Hash) hashType {
|
||||
switch h {
|
||||
case crypto.SHA256:
|
||||
return hashSHA256
|
||||
case crypto.SHA384:
|
||||
return hashSHA384
|
||||
case crypto.SHA512:
|
||||
return hashSHA512
|
||||
case crypto.BLAKE2s_256:
|
||||
return hashBLAKE2S
|
||||
case crypto.BLAKE2b_256:
|
||||
return hashBLAKE2B
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// OptSignatureMetadata sets metadata for a signature data object. The hash type is set to ht, and
|
||||
// the signing entity fingerprint is set to fp.
|
||||
//
|
||||
// If this option is applied to a data object with an incompatible type, an error is returned.
|
||||
func OptSignatureMetadata(ht crypto.Hash, fp []byte) DescriptorInputOpt {
|
||||
return func(t DataType, opts *descriptorOpts) error {
|
||||
if got, want := t, DataSignature; got != want {
|
||||
return &unexpectedDataTypeError{got, []DataType{want}}
|
||||
}
|
||||
|
||||
s := signature{
|
||||
Hashtype: sifHashType(ht),
|
||||
}
|
||||
copy(s.Entity[:], fp)
|
||||
|
||||
opts.extra = s
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DescriptorInput describes a new data object.
|
||||
type DescriptorInput struct {
|
||||
dt DataType
|
||||
r io.Reader
|
||||
opts descriptorOpts
|
||||
}
|
||||
|
||||
// DefaultObjectGroup is the default group that data objects are placed in.
|
||||
const DefaultObjectGroup = 1
|
||||
|
||||
// NewDescriptorInput returns a DescriptorInput representing a data object of type t, with contents
|
||||
// read from r, configured according to opts.
|
||||
//
|
||||
// It is possible (and often necessary) to store additional metadata related to certain types of
|
||||
// data objects. Consider supplying options such as OptCryptoMessageMetadata, OptPartitionMetadata,
|
||||
// and OptSignatureMetadata for this purpose.
|
||||
//
|
||||
// By default, the data object will be placed in the default data object group (1). To override
|
||||
// this behavior, use OptNoGroup or OptGroupID. To link this data object, use OptLinkedID or
|
||||
// OptLinkedGroupID.
|
||||
//
|
||||
// By default, the data object will be aligned according to the system's memory page size. To
|
||||
// override this behavior, consider using OptObjectAlignment.
|
||||
//
|
||||
// By default, no name is set for data object. To set a name, use OptObjectName.
|
||||
//
|
||||
// When creating a new image, data object creation/modification times are set to the image creation
|
||||
// time. When modifying an existing image, the data object creation/modification time is set to the
|
||||
// image modification time. To override this behavior, consider using OptObjectTime.
|
||||
func NewDescriptorInput(t DataType, r io.Reader, opts ...DescriptorInputOpt) (DescriptorInput, error) {
|
||||
dopts := descriptorOpts{
|
||||
groupID: DefaultObjectGroup,
|
||||
alignment: os.Getpagesize(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(t, &dopts); err != nil {
|
||||
return DescriptorInput{}, fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
di := DescriptorInput{
|
||||
dt: t,
|
||||
r: r,
|
||||
opts: dopts,
|
||||
}
|
||||
|
||||
return di, nil
|
||||
}
|
||||
|
||||
// fillDescriptor fills d according to di. If di does not explicitly specify a time value, use t.
|
||||
func (di DescriptorInput) fillDescriptor(t time.Time, d *rawDescriptor) error {
|
||||
d.DataType = di.dt
|
||||
d.GroupID = di.opts.groupID | descrGroupMask
|
||||
d.LinkedID = di.opts.linkID
|
||||
|
||||
if !di.opts.t.IsZero() {
|
||||
t = di.opts.t
|
||||
}
|
||||
d.CreatedAt = t.Unix()
|
||||
d.ModifiedAt = t.Unix()
|
||||
|
||||
d.UID = 0
|
||||
d.GID = 0
|
||||
|
||||
if err := d.setName(di.opts.name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.setExtra(di.opts.extra)
|
||||
}
|
174
vendor/github.com/sylabs/sif/v2/pkg/sif/load.go
generated
vendored
Normal file
174
vendor/github.com/sylabs/sif/v2/pkg/sif/load.go
generated
vendored
Normal file
@@ -0,0 +1,174 @@
|
||||
// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
|
||||
// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
|
||||
// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
// LICENSE file distributed with the sources of this project regarding your
|
||||
// rights to use or distribute this software.
|
||||
|
||||
package sif
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidMagic = errors.New("invalid SIF magic")
|
||||
errIncompatibleVersion = errors.New("incompatible SIF version")
|
||||
)
|
||||
|
||||
// isValidSif looks at key fields from the global header to assess SIF validity.
|
||||
func isValidSif(f *FileImage) error {
|
||||
if f.h.Magic != hdrMagic {
|
||||
return errInvalidMagic
|
||||
}
|
||||
|
||||
if f.h.Version != CurrentVersion.bytes() {
|
||||
return errIncompatibleVersion
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// populateMinIDs populates the minIDs field of f.
|
||||
func (f *FileImage) populateMinIDs() {
|
||||
f.minIDs = make(map[uint32]uint32)
|
||||
f.WithDescriptors(func(d Descriptor) bool {
|
||||
if minID, ok := f.minIDs[d.raw.GroupID]; !ok || d.ID() < minID {
|
||||
f.minIDs[d.raw.GroupID] = d.ID()
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
// loadContainer loads a SIF image from rw.
|
||||
func loadContainer(rw ReadWriter) (*FileImage, error) {
|
||||
f := FileImage{rw: rw}
|
||||
|
||||
// Read global header.
|
||||
err := binary.Read(
|
||||
io.NewSectionReader(rw, 0, int64(binary.Size(f.h))),
|
||||
binary.LittleEndian,
|
||||
&f.h,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading global header: %w", err)
|
||||
}
|
||||
|
||||
if err := isValidSif(&f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read descriptors.
|
||||
f.rds = make([]rawDescriptor, f.h.DescriptorsTotal)
|
||||
err = binary.Read(
|
||||
io.NewSectionReader(rw, f.h.DescriptorsOffset, f.h.DescriptorsSize),
|
||||
binary.LittleEndian,
|
||||
&f.rds,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading descriptors: %w", err)
|
||||
}
|
||||
|
||||
f.populateMinIDs()
|
||||
|
||||
return &f, nil
|
||||
}
|
||||
|
||||
// loadOpts accumulates container loading options.
|
||||
type loadOpts struct {
|
||||
flag int
|
||||
closeOnUnload bool
|
||||
}
|
||||
|
||||
// LoadOpt are used to specify container loading options.
|
||||
type LoadOpt func(*loadOpts) error
|
||||
|
||||
// OptLoadWithFlag specifies flag (os.O_RDONLY etc.) to be used when opening the container file.
|
||||
func OptLoadWithFlag(flag int) LoadOpt {
|
||||
return func(lo *loadOpts) error {
|
||||
lo.flag = flag
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// OptLoadWithCloseOnUnload specifies whether the ReadWriter should be closed by UnloadContainer.
|
||||
// By default, the ReadWriter will be closed if it implements the io.Closer interface.
|
||||
func OptLoadWithCloseOnUnload(b bool) LoadOpt {
|
||||
return func(lo *loadOpts) error {
|
||||
lo.closeOnUnload = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// LoadContainerFromPath loads a new SIF container from path, according to opts.
|
||||
//
|
||||
// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
|
||||
// are released.
|
||||
//
|
||||
// By default, the file is opened for read and write access. To change this behavior, consider
|
||||
// using OptLoadWithFlag.
|
||||
func LoadContainerFromPath(path string, opts ...LoadOpt) (*FileImage, error) {
|
||||
lo := loadOpts{
|
||||
flag: os.O_RDWR,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&lo); err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
fp, err := os.OpenFile(path, lo.flag, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f, err := loadContainer(fp)
|
||||
if err != nil {
|
||||
fp.Close()
|
||||
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f.closeOnUnload = true
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// LoadContainer loads a new SIF container from rw, according to opts.
|
||||
//
|
||||
// On success, a FileImage is returned. The caller must call UnloadContainer to ensure resources
|
||||
// are released. By default, UnloadContainer will close rw if it implements the io.Closer
|
||||
// interface. To change this behavior, consider using OptLoadWithCloseOnUnload.
|
||||
func LoadContainer(rw ReadWriter, opts ...LoadOpt) (*FileImage, error) {
|
||||
lo := loadOpts{
|
||||
closeOnUnload: true,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
if err := opt(&lo); err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
|
||||
f, err := loadContainer(rw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
f.closeOnUnload = lo.closeOnUnload
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// UnloadContainer unloads f, releasing associated resources.
|
||||
func (f *FileImage) UnloadContainer() error {
|
||||
if c, ok := f.rw.(io.Closer); ok && f.closeOnUnload {
|
||||
if err := c.Close(); err != nil {
|
||||
return fmt.Errorf("%w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
210
vendor/github.com/sylabs/sif/v2/pkg/sif/select.go
generated
vendored
Normal file
210
vendor/github.com/sylabs/sif/v2/pkg/sif/select.go
generated
vendored
Normal file
@@ -0,0 +1,210 @@
|
||||
// Copyright (c) 2021, Sylabs Inc. All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
// LICENSE file distributed with the sources of this project regarding your
|
||||
// rights to use or distribute this software.
|
||||
|
||||
package sif
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ErrNoObjects is the error returned when an image contains no data objects.
|
||||
var ErrNoObjects = errors.New("no objects in image")
|
||||
|
||||
// ErrObjectNotFound is the error returned when a data object is not found.
|
||||
var ErrObjectNotFound = errors.New("object not found")
|
||||
|
||||
// ErrMultipleObjectsFound is the error returned when multiple data objects are found.
|
||||
var ErrMultipleObjectsFound = errors.New("multiple objects found")
|
||||
|
||||
// ErrInvalidObjectID is the error returned when an invalid object ID is supplied.
|
||||
var ErrInvalidObjectID = errors.New("invalid object ID")
|
||||
|
||||
// ErrInvalidGroupID is the error returned when an invalid group ID is supplied.
|
||||
var ErrInvalidGroupID = errors.New("invalid group ID")
|
||||
|
||||
// DescriptorSelectorFunc returns true if d matches, and false otherwise.
|
||||
type DescriptorSelectorFunc func(d Descriptor) (bool, error)
|
||||
|
||||
// WithDataType selects descriptors that have data type dt.
|
||||
func WithDataType(dt DataType) DescriptorSelectorFunc {
|
||||
return func(d Descriptor) (bool, error) {
|
||||
return d.DataType() == dt, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithID selects descriptors with a matching ID.
|
||||
func WithID(id uint32) DescriptorSelectorFunc {
|
||||
return func(d Descriptor) (bool, error) {
|
||||
if id == 0 {
|
||||
return false, ErrInvalidObjectID
|
||||
}
|
||||
return d.ID() == id, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNoGroup selects descriptors that are not contained within an object group.
|
||||
func WithNoGroup() DescriptorSelectorFunc {
|
||||
return func(d Descriptor) (bool, error) {
|
||||
return d.GroupID() == 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithGroupID returns a selector func that selects descriptors with a matching groupID.
|
||||
func WithGroupID(groupID uint32) DescriptorSelectorFunc {
|
||||
return func(d Descriptor) (bool, error) {
|
||||
if groupID == 0 {
|
||||
return false, ErrInvalidGroupID
|
||||
}
|
||||
return d.GroupID() == groupID, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLinkedID selects descriptors that are linked to the data object with specified ID.
|
||||
func WithLinkedID(id uint32) DescriptorSelectorFunc {
|
||||
return func(d Descriptor) (bool, error) {
|
||||
if id == 0 {
|
||||
return false, ErrInvalidObjectID
|
||||
}
|
||||
linkedID, isGroup := d.LinkedID()
|
||||
return !isGroup && linkedID == id, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLinkedGroupID selects descriptors that are linked to the data object group with specified
|
||||
// ID.
|
||||
func WithLinkedGroupID(groupID uint32) DescriptorSelectorFunc {
|
||||
return func(d Descriptor) (bool, error) {
|
||||
if groupID == 0 {
|
||||
return false, ErrInvalidGroupID
|
||||
}
|
||||
linkedID, isGroup := d.LinkedID()
|
||||
return isGroup && linkedID == groupID, nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPartitionType selects descriptors containing a partition of type pt.
|
||||
func WithPartitionType(pt PartType) DescriptorSelectorFunc {
|
||||
return func(d Descriptor) (bool, error) {
|
||||
return d.raw.isPartitionOfType(pt), nil
|
||||
}
|
||||
}
|
||||
|
||||
// descriptorFromRaw populates a Descriptor from rd.
|
||||
func (f *FileImage) descriptorFromRaw(rd *rawDescriptor) Descriptor {
|
||||
return Descriptor{
|
||||
raw: *rd,
|
||||
r: f.rw,
|
||||
relativeID: rd.ID - f.minIDs[rd.GroupID],
|
||||
}
|
||||
}
|
||||
|
||||
// GetDescriptors returns a slice of in-use descriptors for which all selector funcs return true.
|
||||
// If the image contains no data objects, an error wrapping ErrNoObjects is returned.
|
||||
func (f *FileImage) GetDescriptors(fns ...DescriptorSelectorFunc) ([]Descriptor, error) {
|
||||
if f.DescriptorsFree() == f.DescriptorsTotal() {
|
||||
return nil, fmt.Errorf("%w", ErrNoObjects)
|
||||
}
|
||||
|
||||
var ds []Descriptor
|
||||
|
||||
err := f.withDescriptors(multiSelectorFunc(fns...), func(d *rawDescriptor) error {
|
||||
ds = append(ds, f.descriptorFromRaw(d))
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// getDescriptor returns a pointer to the in-use descriptor selected by fns. If no descriptor is
|
||||
// selected by fns, ErrObjectNotFound is returned. If multiple descriptors are selected by fns,
|
||||
// ErrMultipleObjectsFound is returned.
|
||||
func (f *FileImage) getDescriptor(fns ...DescriptorSelectorFunc) (*rawDescriptor, error) {
|
||||
var d *rawDescriptor
|
||||
|
||||
err := f.withDescriptors(multiSelectorFunc(fns...), func(found *rawDescriptor) error {
|
||||
if d != nil {
|
||||
return ErrMultipleObjectsFound
|
||||
}
|
||||
d = found
|
||||
return nil
|
||||
})
|
||||
|
||||
if err == nil && d == nil {
|
||||
err = ErrObjectNotFound
|
||||
}
|
||||
|
||||
return d, err
|
||||
}
|
||||
|
||||
// GetDescriptor returns the in-use descriptor selected by fns. If the image contains no data
|
||||
// objects, an error wrapping ErrNoObjects is returned. If no descriptor is selected by fns, an
|
||||
// error wrapping ErrObjectNotFound is returned. If multiple descriptors are selected by fns, an
|
||||
// error wrapping ErrMultipleObjectsFound is returned.
|
||||
func (f *FileImage) GetDescriptor(fns ...DescriptorSelectorFunc) (Descriptor, error) {
|
||||
if f.DescriptorsFree() == f.DescriptorsTotal() {
|
||||
return Descriptor{}, fmt.Errorf("%w", ErrNoObjects)
|
||||
}
|
||||
|
||||
d, err := f.getDescriptor(fns...)
|
||||
if err != nil {
|
||||
return Descriptor{}, fmt.Errorf("%w", err)
|
||||
}
|
||||
|
||||
return f.descriptorFromRaw(d), nil
|
||||
}
|
||||
|
||||
// multiSelectorFunc returns a DescriptorSelectorFunc that selects a descriptor iff all of fns
|
||||
// select the descriptor.
|
||||
func multiSelectorFunc(fns ...DescriptorSelectorFunc) DescriptorSelectorFunc {
|
||||
return func(d Descriptor) (bool, error) {
|
||||
for _, fn := range fns {
|
||||
if ok, err := fn(d); !ok || err != nil {
|
||||
return ok, err
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// withDescriptors calls onMatchFn with each in-use descriptor in f for which selectFn returns
|
||||
// true. If selectFn or onMatchFn return a non-nil error, the iteration halts, and the error is
|
||||
// returned to the caller.
|
||||
func (f *FileImage) withDescriptors(selectFn DescriptorSelectorFunc, onMatchFn func(*rawDescriptor) error) error {
|
||||
for i, d := range f.rds {
|
||||
if !d.Used {
|
||||
continue
|
||||
}
|
||||
|
||||
if ok, err := selectFn(f.descriptorFromRaw(&f.rds[i])); err != nil {
|
||||
return err
|
||||
} else if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := onMatchFn(&f.rds[i]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var errAbort = errors.New("abort")
|
||||
|
||||
// abortOnMatch is a semantic convenience function that always returns a non-nil error, which can
|
||||
// be used as a no-op matchFn.
|
||||
func abortOnMatch(*rawDescriptor) error { return errAbort }
|
||||
|
||||
// WithDescriptors calls fn with each in-use descriptor in f, until fn returns true.
|
||||
func (f *FileImage) WithDescriptors(fn func(d Descriptor) bool) {
|
||||
selectFn := func(d Descriptor) (bool, error) {
|
||||
return fn(d), nil
|
||||
}
|
||||
_ = f.withDescriptors(selectFn, abortOnMatch)
|
||||
}
|
364
vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go
generated
vendored
Normal file
364
vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go
generated
vendored
Normal file
@@ -0,0 +1,364 @@
|
||||
// Copyright (c) 2018-2021, Sylabs Inc. All rights reserved.
|
||||
// Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
|
||||
// Copyright (c) 2017, Yannick Cote <yhcote@gmail.com> All rights reserved.
|
||||
// This software is licensed under a 3-clause BSD license. Please consult the
|
||||
// LICENSE file distributed with the sources of this project regarding your
|
||||
// rights to use or distribute this software.
|
||||
|
||||
// Package sif implements data structures and routines to create
|
||||
// and access SIF files.
|
||||
//
|
||||
// Layout of a SIF file (example):
|
||||
//
|
||||
// .================================================.
|
||||
// | GLOBAL HEADER: Sifheader |
|
||||
// | - launch: "#!/usr/bin/env..." |
|
||||
// | - magic: "SIF_MAGIC" |
|
||||
// | - version: "1" |
|
||||
// | - arch: "4" |
|
||||
// | - uuid: b2659d4e-bd50-4ea5-bd17-eec5e54f918e |
|
||||
// | - ctime: 1504657553 |
|
||||
// | - mtime: 1504657653 |
|
||||
// | - ndescr: 3 |
|
||||
// | - descroff: 120 | --.
|
||||
// | - descrlen: 432 | |
|
||||
// | - dataoff: 4096 | |
|
||||
// | - datalen: 619362 | |
|
||||
// |------------------------------------------------| <-'
|
||||
// | DESCR[0]: Sifdeffile |
|
||||
// | - Sifcommon |
|
||||
// | - datatype: DATA_DEFFILE |
|
||||
// | - id: 1 |
|
||||
// | - groupid: 1 |
|
||||
// | - link: NONE |
|
||||
// | - fileoff: 4096 | --.
|
||||
// | - filelen: 222 | |
|
||||
// |------------------------------------------------| <-----.
|
||||
// | DESCR[1]: Sifpartition | | |
|
||||
// | - Sifcommon | | |
|
||||
// | - datatype: DATA_PARTITION | | |
|
||||
// | - id: 2 | | |
|
||||
// | - groupid: 1 | | |
|
||||
// | - link: NONE | | |
|
||||
// | - fileoff: 4318 | ----. |
|
||||
// | - filelen: 618496 | | | |
|
||||
// | - fstype: Squashfs | | | |
|
||||
// | - parttype: System | | | |
|
||||
// | - content: Linux | | | |
|
||||
// |------------------------------------------------| | | |
|
||||
// | DESCR[2]: Sifsignature | | | |
|
||||
// | - Sifcommon | | | |
|
||||
// | - datatype: DATA_SIGNATURE | | | |
|
||||
// | - id: 3 | | | |
|
||||
// | - groupid: NONE | | | |
|
||||
// | - link: 2 | ------'
|
||||
// | - fileoff: 622814 | ------.
|
||||
// | - filelen: 644 | | | |
|
||||
// | - hashtype: SHA384 | | | |
|
||||
// | - entity: @ | | | |
|
||||
// |------------------------------------------------| <-' | |
|
||||
// | Definition file data | | |
|
||||
// | . | | |
|
||||
// | . | | |
|
||||
// | . | | |
|
||||
// |------------------------------------------------| <---' |
|
||||
// | File system partition image | |
|
||||
// | . | |
|
||||
// | . | |
|
||||
// | . | |
|
||||
// |------------------------------------------------| <-----'
|
||||
// | Signed verification data |
|
||||
// | . |
|
||||
// | . |
|
||||
// | . |
|
||||
// `================================================'
|
||||
//
|
||||
package sif
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// SIF header constants and quantities.
|
||||
const (
|
||||
hdrLaunchLen = 32 // len("#!/usr/bin/env... ")
|
||||
hdrMagicLen = 10 // len("SIF_MAGIC")
|
||||
hdrVersionLen = 3 // len("99")
|
||||
)
|
||||
|
||||
var hdrMagic = [...]byte{'S', 'I', 'F', '_', 'M', 'A', 'G', 'I', 'C', '\x00'}
|
||||
|
||||
// SpecVersion specifies a SIF specification version.
|
||||
type SpecVersion uint8
|
||||
|
||||
func (v SpecVersion) String() string { return fmt.Sprintf("%02d", v) }
|
||||
|
||||
// bytes returns the value of b, formatted for direct inclusion in a SIF header.
|
||||
func (v SpecVersion) bytes() [hdrVersionLen]byte {
|
||||
var b [3]byte
|
||||
copy(b[:], fmt.Sprintf("%02d", v))
|
||||
return b
|
||||
}
|
||||
|
||||
// SIF specification versions.
|
||||
const (
|
||||
version01 SpecVersion = iota + 1
|
||||
)
|
||||
|
||||
// CurrentVersion specifies the current SIF specification version.
|
||||
const CurrentVersion = version01
|
||||
|
||||
const (
|
||||
descrGroupMask = 0xf0000000 // groups start at that offset
|
||||
descrEntityLen = 256 // len("Joe Bloe <jbloe@gmail.com>...")
|
||||
descrNameLen = 128 // descriptor name (string identifier)
|
||||
descrMaxPrivLen = 384 // size reserved for descriptor specific data
|
||||
)
|
||||
|
||||
// DataType represents the different SIF data object types stored in the image.
|
||||
type DataType int32
|
||||
|
||||
// List of supported SIF data types.
|
||||
const (
|
||||
DataDeffile DataType = iota + 0x4001 // definition file data object
|
||||
DataEnvVar // environment variables data object
|
||||
DataLabels // JSON labels data object
|
||||
DataPartition // file system data object
|
||||
DataSignature // signing/verification data object
|
||||
DataGenericJSON // generic JSON meta-data
|
||||
DataGeneric // generic / raw data
|
||||
DataCryptoMessage // cryptographic message data object
|
||||
)
|
||||
|
||||
// String returns a human-readable representation of t.
|
||||
func (t DataType) String() string {
|
||||
switch t {
|
||||
case DataDeffile:
|
||||
return "Def.FILE"
|
||||
case DataEnvVar:
|
||||
return "Env.Vars"
|
||||
case DataLabels:
|
||||
return "JSON.Labels"
|
||||
case DataPartition:
|
||||
return "FS"
|
||||
case DataSignature:
|
||||
return "Signature"
|
||||
case DataGenericJSON:
|
||||
return "JSON.Generic"
|
||||
case DataGeneric:
|
||||
return "Generic/Raw"
|
||||
case DataCryptoMessage:
|
||||
return "Cryptographic Message"
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
// FSType represents the different SIF file system types found in partition data objects.
|
||||
type FSType int32
|
||||
|
||||
// List of supported file systems.
|
||||
const (
|
||||
FsSquash FSType = iota + 1 // Squashfs file system, RDONLY
|
||||
FsExt3 // EXT3 file system, RDWR (deprecated)
|
||||
FsImmuObj // immutable data object archive
|
||||
FsRaw // raw data
|
||||
FsEncryptedSquashfs // Encrypted Squashfs file system, RDONLY
|
||||
)
|
||||
|
||||
// String returns a human-readable representation of t.
|
||||
func (t FSType) String() string {
|
||||
switch t {
|
||||
case FsSquash:
|
||||
return "Squashfs"
|
||||
case FsExt3:
|
||||
return "Ext3"
|
||||
case FsImmuObj:
|
||||
return "Archive"
|
||||
case FsRaw:
|
||||
return "Raw"
|
||||
case FsEncryptedSquashfs:
|
||||
return "Encrypted squashfs"
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
// PartType represents the different SIF container partition types (system and data).
|
||||
type PartType int32
|
||||
|
||||
// List of supported partition types.
|
||||
const (
|
||||
PartSystem PartType = iota + 1 // partition hosts an operating system
|
||||
PartPrimSys // partition hosts the primary operating system
|
||||
PartData // partition hosts data only
|
||||
PartOverlay // partition hosts an overlay
|
||||
)
|
||||
|
||||
// String returns a human-readable representation of t.
|
||||
func (t PartType) String() string {
|
||||
switch t {
|
||||
case PartSystem:
|
||||
return "System"
|
||||
case PartPrimSys:
|
||||
return "*System"
|
||||
case PartData:
|
||||
return "Data"
|
||||
case PartOverlay:
|
||||
return "Overlay"
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
// hashType represents the different SIF hashing function types used to fingerprint data objects.
|
||||
type hashType int32
|
||||
|
||||
// List of supported hash functions.
|
||||
const (
|
||||
hashSHA256 hashType = iota + 1
|
||||
hashSHA384
|
||||
hashSHA512
|
||||
hashBLAKE2S
|
||||
hashBLAKE2B
|
||||
)
|
||||
|
||||
// FormatType represents the different formats used to store cryptographic message objects.
|
||||
type FormatType int32
|
||||
|
||||
// List of supported cryptographic message formats.
|
||||
const (
|
||||
FormatOpenPGP FormatType = iota + 1
|
||||
FormatPEM
|
||||
)
|
||||
|
||||
// String returns a human-readable representation of t.
|
||||
func (t FormatType) String() string {
|
||||
switch t {
|
||||
case FormatOpenPGP:
|
||||
return "OpenPGP"
|
||||
case FormatPEM:
|
||||
return "PEM"
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
// MessageType represents the different messages stored within cryptographic message objects.
|
||||
type MessageType int32
|
||||
|
||||
// List of supported cryptographic message formats.
|
||||
const (
|
||||
// openPGP formatted messages.
|
||||
MessageClearSignature MessageType = 0x100
|
||||
|
||||
// PEM formatted messages.
|
||||
MessageRSAOAEP MessageType = 0x200
|
||||
)
|
||||
|
||||
// String returns a human-readable representation of t.
|
||||
func (t MessageType) String() string {
|
||||
switch t {
|
||||
case MessageClearSignature:
|
||||
return "Clear Signature"
|
||||
case MessageRSAOAEP:
|
||||
return "RSA-OAEP"
|
||||
}
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
// header describes a loaded SIF file.
|
||||
type header struct {
|
||||
LaunchScript [hdrLaunchLen]byte
|
||||
|
||||
Magic [hdrMagicLen]byte
|
||||
Version [hdrVersionLen]byte
|
||||
Arch archType
|
||||
ID uuid.UUID
|
||||
|
||||
CreatedAt int64
|
||||
ModifiedAt int64
|
||||
|
||||
DescriptorsFree int64
|
||||
DescriptorsTotal int64
|
||||
DescriptorsOffset int64
|
||||
DescriptorsSize int64
|
||||
DataOffset int64
|
||||
DataSize int64
|
||||
}
|
||||
|
||||
// GetIntegrityReader returns an io.Reader that reads the integrity-protected fields from h.
|
||||
func (h header) GetIntegrityReader() io.Reader {
|
||||
return io.MultiReader(
|
||||
bytes.NewReader(h.LaunchScript[:]),
|
||||
bytes.NewReader(h.Magic[:]),
|
||||
bytes.NewReader(h.Version[:]),
|
||||
bytes.NewReader(h.ID[:]),
|
||||
)
|
||||
}
|
||||
|
||||
// ReadWriter describes the interface required to read and write SIF images.
|
||||
type ReadWriter interface {
|
||||
io.ReaderAt
|
||||
io.WriteSeeker
|
||||
Truncate(int64) error
|
||||
}
|
||||
|
||||
// FileImage describes the representation of a SIF file in memory.
|
||||
type FileImage struct {
|
||||
rw ReadWriter // Backing storage for image.
|
||||
|
||||
h header // Raw global header from image.
|
||||
rds []rawDescriptor // Raw descriptors from image.
|
||||
|
||||
closeOnUnload bool // Close rw on Unload.
|
||||
minIDs map[uint32]uint32 // Minimum object IDs for each group ID.
|
||||
}
|
||||
|
||||
// LaunchScript returns the image launch script.
|
||||
func (f *FileImage) LaunchScript() string {
|
||||
return string(bytes.TrimRight(f.h.LaunchScript[:], "\x00"))
|
||||
}
|
||||
|
||||
// Version returns the SIF specification version of the image.
|
||||
func (f *FileImage) Version() string {
|
||||
return string(bytes.TrimRight(f.h.Version[:], "\x00"))
|
||||
}
|
||||
|
||||
// PrimaryArch returns the primary CPU architecture of the image, or "unknown" if the primary CPU
|
||||
// architecture cannot be determined.
|
||||
func (f *FileImage) PrimaryArch() string { return f.h.Arch.GoArch() }
|
||||
|
||||
// ID returns the ID of the image.
|
||||
func (f *FileImage) ID() string { return f.h.ID.String() }
|
||||
|
||||
// CreatedAt returns the creation time of the image.
|
||||
func (f *FileImage) CreatedAt() time.Time { return time.Unix(f.h.CreatedAt, 0) }
|
||||
|
||||
// ModifiedAt returns the last modification time of the image.
|
||||
func (f *FileImage) ModifiedAt() time.Time { return time.Unix(f.h.ModifiedAt, 0) }
|
||||
|
||||
// DescriptorsFree returns the number of free descriptors in the image.
|
||||
func (f *FileImage) DescriptorsFree() int64 { return f.h.DescriptorsFree }
|
||||
|
||||
// DescriptorsTotal returns the total number of descriptors in the image.
|
||||
func (f *FileImage) DescriptorsTotal() int64 { return f.h.DescriptorsTotal }
|
||||
|
||||
// DescriptorsOffset returns the offset (in bytes) of the descriptors section in the image.
|
||||
func (f *FileImage) DescriptorsOffset() int64 { return f.h.DescriptorsOffset }
|
||||
|
||||
// DescriptorsSize returns the size (in bytes) of the descriptors section in the image.
|
||||
func (f *FileImage) DescriptorsSize() int64 { return f.h.DescriptorsSize }
|
||||
|
||||
// DataOffset returns the offset (in bytes) of the data section in the image.
|
||||
func (f *FileImage) DataOffset() int64 { return f.h.DataOffset }
|
||||
|
||||
// DataSize returns the size (in bytes) of the data section in the image.
|
||||
func (f *FileImage) DataSize() int64 { return f.h.DataSize }
|
||||
|
||||
// GetHeaderIntegrityReader returns an io.Reader that reads the integrity-protected fields from the
|
||||
// header of the image.
|
||||
func (f *FileImage) GetHeaderIntegrityReader() io.Reader {
|
||||
return f.h.GetIntegrityReader()
|
||||
}
|
93
vendor/github.com/vbauerster/mpb/v7/bar.go
generated
vendored
93
vendor/github.com/vbauerster/mpb/v7/bar.go
generated
vendored
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -36,7 +35,6 @@ type Bar struct {
|
||||
cacheState *bState
|
||||
|
||||
container *Progress
|
||||
dlogger *log.Logger
|
||||
recoveredPanic interface{}
|
||||
}
|
||||
|
||||
@@ -64,7 +62,7 @@ type bState struct {
|
||||
averageDecorators []decor.AverageDecorator
|
||||
ewmaDecorators []decor.EwmaDecorator
|
||||
shutdownListeners []decor.ShutdownListener
|
||||
bufP, bufB, bufA *bytes.Buffer
|
||||
buffers [3]*bytes.Buffer
|
||||
filler BarFiller
|
||||
middleware func(BarFiller) BarFiller
|
||||
extender extenderFunc
|
||||
@@ -81,7 +79,6 @@ type frame struct {
|
||||
}
|
||||
|
||||
func newBar(container *Progress, bs *bState) *Bar {
|
||||
logPrefix := fmt.Sprintf("%sbar#%02d ", container.dlogger.Prefix(), bs.id)
|
||||
ctx, cancel := context.WithCancel(container.ctx)
|
||||
|
||||
bar := &Bar{
|
||||
@@ -93,7 +90,6 @@ func newBar(container *Progress, bs *bState) *Bar {
|
||||
frameCh: make(chan *frame, 1),
|
||||
done: make(chan struct{}),
|
||||
cancel: cancel,
|
||||
dlogger: log.New(bs.debugOut, logPrefix, log.Lshortfile),
|
||||
}
|
||||
|
||||
go bar.serve(ctx, bs)
|
||||
@@ -106,7 +102,7 @@ func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser {
|
||||
if r == nil {
|
||||
panic("expected non nil io.Reader")
|
||||
}
|
||||
return newProxyReader(r, b)
|
||||
return b.newProxyReader(r)
|
||||
}
|
||||
|
||||
// ID returs id of the bar.
|
||||
@@ -279,7 +275,7 @@ func (b *Bar) Abort(drop bool) {
|
||||
done := make(chan struct{})
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
if s.completed == true {
|
||||
if s.completed {
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
@@ -346,13 +342,16 @@ func (b *Bar) render(tw int) {
|
||||
// recovering if user defined decorator panics for example
|
||||
if p := recover(); p != nil {
|
||||
if b.recoveredPanic == nil {
|
||||
if s.debugOut != nil {
|
||||
fmt.Fprintln(s.debugOut, p)
|
||||
_, _ = s.debugOut.Write(debug.Stack())
|
||||
}
|
||||
s.extender = makePanicExtender(p)
|
||||
b.toShutdown = !b.toShutdown
|
||||
b.recoveredPanic = p
|
||||
}
|
||||
reader, lines := s.extender(nil, s.reqWidth, stat)
|
||||
b.frameCh <- &frame{reader, lines + 1}
|
||||
b.dlogger.Println(p)
|
||||
}
|
||||
s.completeFlushed = s.completed
|
||||
}()
|
||||
@@ -429,40 +428,41 @@ func (b *Bar) wSyncTable() [][]chan int {
|
||||
}
|
||||
|
||||
func (s *bState) draw(stat decor.Statistics) io.Reader {
|
||||
bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2]
|
||||
nlr := strings.NewReader("\n")
|
||||
tw := stat.AvailableWidth
|
||||
for _, d := range s.pDecorators {
|
||||
str := d.Decor(stat)
|
||||
stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
|
||||
s.bufP.WriteString(str)
|
||||
bufP.WriteString(str)
|
||||
}
|
||||
if stat.AvailableWidth < 1 {
|
||||
trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufP.String()), tw, "…"))
|
||||
s.bufP.Reset()
|
||||
trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), tw, "…"))
|
||||
bufP.Reset()
|
||||
return io.MultiReader(trunc, nlr)
|
||||
}
|
||||
|
||||
if !s.trimSpace && stat.AvailableWidth > 1 {
|
||||
stat.AvailableWidth -= 2
|
||||
s.bufB.WriteByte(' ')
|
||||
defer s.bufB.WriteByte(' ')
|
||||
bufB.WriteByte(' ')
|
||||
defer bufB.WriteByte(' ')
|
||||
}
|
||||
|
||||
tw = stat.AvailableWidth
|
||||
for _, d := range s.aDecorators {
|
||||
str := d.Decor(stat)
|
||||
stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str))
|
||||
s.bufA.WriteString(str)
|
||||
bufA.WriteString(str)
|
||||
}
|
||||
if stat.AvailableWidth < 1 {
|
||||
trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(s.bufA.String()), tw, "…"))
|
||||
s.bufA.Reset()
|
||||
return io.MultiReader(s.bufP, s.bufB, trunc, nlr)
|
||||
trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), tw, "…"))
|
||||
bufA.Reset()
|
||||
return io.MultiReader(bufP, bufB, trunc, nlr)
|
||||
}
|
||||
|
||||
s.filler.Fill(s.bufB, s.reqWidth, stat)
|
||||
s.filler.Fill(bufB, s.reqWidth, stat)
|
||||
|
||||
return io.MultiReader(s.bufP, s.bufB, s.bufA, nlr)
|
||||
return io.MultiReader(bufP, bufB, bufA, nlr)
|
||||
}
|
||||
|
||||
func (s *bState) wSyncTable() [][]chan int {
|
||||
@@ -489,39 +489,51 @@ func (s *bState) wSyncTable() [][]chan int {
|
||||
|
||||
func (s bState) decoratorEwmaUpdate(dur time.Duration) {
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(len(s.ewmaDecorators))
|
||||
for _, d := range s.ewmaDecorators {
|
||||
d := d
|
||||
go func() {
|
||||
for i := 0; i < len(s.ewmaDecorators); i++ {
|
||||
switch d := s.ewmaDecorators[i]; i {
|
||||
case len(s.ewmaDecorators) - 1:
|
||||
d.EwmaUpdate(s.lastIncrement, dur)
|
||||
wg.Done()
|
||||
}()
|
||||
default:
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
d.EwmaUpdate(s.lastIncrement, dur)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (s bState) decoratorAverageAdjust(start time.Time) {
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(len(s.averageDecorators))
|
||||
for _, d := range s.averageDecorators {
|
||||
d := d
|
||||
go func() {
|
||||
for i := 0; i < len(s.averageDecorators); i++ {
|
||||
switch d := s.averageDecorators[i]; i {
|
||||
case len(s.averageDecorators) - 1:
|
||||
d.AverageAdjust(start)
|
||||
wg.Done()
|
||||
}()
|
||||
default:
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
d.AverageAdjust(start)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func (s bState) decoratorShutdownNotify() {
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(len(s.shutdownListeners))
|
||||
for _, d := range s.shutdownListeners {
|
||||
d := d
|
||||
go func() {
|
||||
for i := 0; i < len(s.shutdownListeners); i++ {
|
||||
switch d := s.shutdownListeners[i]; i {
|
||||
case len(s.shutdownListeners) - 1:
|
||||
d.Shutdown()
|
||||
wg.Done()
|
||||
}()
|
||||
default:
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
d.Shutdown()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -547,14 +559,11 @@ func extractBaseDecorator(d decor.Decorator) decor.Decorator {
|
||||
|
||||
func makePanicExtender(p interface{}) extenderFunc {
|
||||
pstr := fmt.Sprint(p)
|
||||
stack := debug.Stack()
|
||||
stackLines := bytes.Count(stack, []byte("\n"))
|
||||
return func(_ io.Reader, _ int, st decor.Statistics) (io.Reader, int) {
|
||||
mr := io.MultiReader(
|
||||
strings.NewReader(runewidth.Truncate(pstr, st.AvailableWidth, "…")),
|
||||
strings.NewReader(fmt.Sprintf("\n%#v\n", st)),
|
||||
bytes.NewReader(stack),
|
||||
strings.NewReader("\n"),
|
||||
)
|
||||
return mr, stackLines + 1
|
||||
return mr, 0
|
||||
}
|
||||
}
|
||||
|
37
vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
generated
vendored
37
vendor/github.com/vbauerster/mpb/v7/bar_filler_bar.go
generated
vendored
@@ -32,13 +32,13 @@ type BarStyleComposer interface {
|
||||
}
|
||||
|
||||
type bFiller struct {
|
||||
rev bool
|
||||
components [components]*component
|
||||
tip struct {
|
||||
count uint
|
||||
onComplete *component
|
||||
frames []*component
|
||||
}
|
||||
flush func(dst io.Writer, filling, padding [][]byte)
|
||||
}
|
||||
|
||||
type component struct {
|
||||
@@ -113,14 +113,7 @@ func (s *barStyle) Reverse() BarStyleComposer {
|
||||
}
|
||||
|
||||
func (s *barStyle) Build() BarFiller {
|
||||
bf := new(bFiller)
|
||||
if s.rev {
|
||||
bf.flush = func(dst io.Writer, filling, padding [][]byte) {
|
||||
flush(dst, padding, filling)
|
||||
}
|
||||
} else {
|
||||
bf.flush = flush
|
||||
}
|
||||
bf := &bFiller{rev: s.rev}
|
||||
bf.components[iLbound] = &component{
|
||||
width: runewidth.StringWidth(stripansi.Strip(s.lbound)),
|
||||
bytes: []byte(s.lbound),
|
||||
@@ -164,8 +157,9 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
|
||||
return
|
||||
}
|
||||
|
||||
w.Write(s.components[iLbound].bytes)
|
||||
defer w.Write(s.components[iRbound].bytes)
|
||||
ow := optimisticWriter(w)
|
||||
ow(s.components[iLbound].bytes)
|
||||
defer ow(s.components[iRbound].bytes)
|
||||
|
||||
if width == 0 {
|
||||
return
|
||||
@@ -236,14 +230,27 @@ func (s *bFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
|
||||
}
|
||||
}
|
||||
|
||||
s.flush(w, filling, padding)
|
||||
if s.rev {
|
||||
flush(ow, padding, filling)
|
||||
} else {
|
||||
flush(ow, filling, padding)
|
||||
}
|
||||
}
|
||||
|
||||
func flush(dst io.Writer, filling, padding [][]byte) {
|
||||
func flush(ow func([]byte), filling, padding [][]byte) {
|
||||
for i := len(filling) - 1; i >= 0; i-- {
|
||||
dst.Write(filling[i])
|
||||
ow(filling[i])
|
||||
}
|
||||
for i := 0; i < len(padding); i++ {
|
||||
dst.Write(padding[i])
|
||||
ow(padding[i])
|
||||
}
|
||||
}
|
||||
|
||||
func optimisticWriter(w io.Writer) func([]byte) {
|
||||
return func(p []byte) {
|
||||
_, err := w.Write(p)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
10
vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go
generated
vendored
10
vendor/github.com/vbauerster/mpb/v7/bar_filler_spinner.go
generated
vendored
@@ -73,15 +73,19 @@ func (s *sFiller) Fill(w io.Writer, width int, stat decor.Statistics) {
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
rest := width - frameWidth
|
||||
switch s.position {
|
||||
case positionLeft:
|
||||
io.WriteString(w, frame+strings.Repeat(" ", rest))
|
||||
_, err = io.WriteString(w, frame+strings.Repeat(" ", rest))
|
||||
case positionRight:
|
||||
io.WriteString(w, strings.Repeat(" ", rest)+frame)
|
||||
_, err = io.WriteString(w, strings.Repeat(" ", rest)+frame)
|
||||
default:
|
||||
str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2)
|
||||
io.WriteString(w, str)
|
||||
_, err = io.WriteString(w, str)
|
||||
}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
s.count++
|
||||
}
|
||||
|
5
vendor/github.com/vbauerster/mpb/v7/bar_option.go
generated
vendored
5
vendor/github.com/vbauerster/mpb/v7/bar_option.go
generated
vendored
@@ -89,7 +89,10 @@ func BarFillerOnComplete(message string) BarOption {
|
||||
return BarFillerMiddleware(func(base BarFiller) BarFiller {
|
||||
return BarFillerFunc(func(w io.Writer, reqWidth int, st decor.Statistics) {
|
||||
if st.Completed {
|
||||
io.WriteString(w, message)
|
||||
_, err := io.WriteString(w, message)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
base.Fill(w, reqWidth, st)
|
||||
}
|
||||
|
6
vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
generated
vendored
6
vendor/github.com/vbauerster/mpb/v7/cwriter/writer.go
generated
vendored
@@ -76,9 +76,9 @@ func (w *Writer) GetWidth() (int, error) {
|
||||
return tw, err
|
||||
}
|
||||
|
||||
func (w *Writer) ansiCuuAndEd() (err error) {
|
||||
func (w *Writer) ansiCuuAndEd() error {
|
||||
buf := make([]byte, 8)
|
||||
buf = strconv.AppendInt(buf[:copy(buf, escOpen)], int64(w.lines), 10)
|
||||
_, err = w.out.Write(append(buf, cuuAndEd...))
|
||||
return
|
||||
_, err := w.out.Write(append(buf, cuuAndEd...))
|
||||
return err
|
||||
}
|
||||
|
12
vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go
generated
vendored
Normal file
12
vendor/github.com/vbauerster/mpb/v7/decor/optimistic_string_writer.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
package decor
|
||||
|
||||
import "io"
|
||||
|
||||
func optimisticStringWriter(w io.Writer) func(string) {
|
||||
return func(s string) {
|
||||
_, err := io.WriteString(w, s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
9
vendor/github.com/vbauerster/mpb/v7/decor/percentage.go
generated
vendored
9
vendor/github.com/vbauerster/mpb/v7/decor/percentage.go
generated
vendored
@@ -2,7 +2,6 @@ package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"github.com/vbauerster/mpb/v7/internal"
|
||||
@@ -24,12 +23,12 @@ func (s percentageType) Format(st fmt.State, verb rune) {
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(st, strconv.FormatFloat(float64(s), 'f', prec, 64))
|
||||
|
||||
osw := optimisticStringWriter(st)
|
||||
osw(strconv.FormatFloat(float64(s), 'f', prec, 64))
|
||||
if st.Flag(' ') {
|
||||
io.WriteString(st, " ")
|
||||
osw(" ")
|
||||
}
|
||||
io.WriteString(st, "%")
|
||||
osw("%")
|
||||
}
|
||||
|
||||
// Percentage returns percentage decorator. It's a wrapper of NewPercentage.
|
||||
|
22
vendor/github.com/vbauerster/mpb/v7/decor/size_type.go
generated
vendored
22
vendor/github.com/vbauerster/mpb/v7/decor/size_type.go
generated
vendored
@@ -2,8 +2,6 @@ package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
@@ -47,16 +45,16 @@ func (self SizeB1024) Format(st fmt.State, verb rune) {
|
||||
unit = _iMiB
|
||||
case self < _iTiB:
|
||||
unit = _iGiB
|
||||
case self <= math.MaxInt64:
|
||||
default:
|
||||
unit = _iTiB
|
||||
}
|
||||
|
||||
io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
|
||||
|
||||
osw := optimisticStringWriter(st)
|
||||
osw(strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
|
||||
if st.Flag(' ') {
|
||||
io.WriteString(st, " ")
|
||||
osw(" ")
|
||||
}
|
||||
io.WriteString(st, unit.String())
|
||||
osw(unit.String())
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -96,14 +94,14 @@ func (self SizeB1000) Format(st fmt.State, verb rune) {
|
||||
unit = _MB
|
||||
case self < _TB:
|
||||
unit = _GB
|
||||
case self <= math.MaxInt64:
|
||||
default:
|
||||
unit = _TB
|
||||
}
|
||||
|
||||
io.WriteString(st, strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
|
||||
|
||||
osw := optimisticStringWriter(st)
|
||||
osw(strconv.FormatFloat(float64(self)/float64(unit), 'f', prec, 64))
|
||||
if st.Flag(' ') {
|
||||
io.WriteString(st, " ")
|
||||
osw(" ")
|
||||
}
|
||||
io.WriteString(st, unit.String())
|
||||
osw(unit.String())
|
||||
}
|
||||
|
3
vendor/github.com/vbauerster/mpb/v7/decor/speed.go
generated
vendored
3
vendor/github.com/vbauerster/mpb/v7/decor/speed.go
generated
vendored
@@ -2,7 +2,6 @@ package decor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
@@ -24,7 +23,7 @@ type speedFormatter struct {
|
||||
|
||||
func (self *speedFormatter) Format(st fmt.State, verb rune) {
|
||||
self.Formatter.Format(st, verb)
|
||||
io.WriteString(st, "/s")
|
||||
optimisticStringWriter(st)("/s")
|
||||
}
|
||||
|
||||
// EwmaSpeed exponential-weighted-moving-average based speed decorator.
|
||||
|
2
vendor/github.com/vbauerster/mpb/v7/go.mod
generated
vendored
2
vendor/github.com/vbauerster/mpb/v7/go.mod
generated
vendored
@@ -4,7 +4,7 @@ require (
|
||||
github.com/VividCortex/ewma v1.2.0
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d
|
||||
github.com/mattn/go-runewidth v0.0.13
|
||||
golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
||||
)
|
||||
|
||||
go 1.14
|
||||
|
4
vendor/github.com/vbauerster/mpb/v7/go.sum
generated
vendored
4
vendor/github.com/vbauerster/mpb/v7/go.sum
generated
vendored
@@ -6,5 +6,5 @@ github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4
|
||||
github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d h1:1oIt9o40TWWI9FUaveVpUvBe13FNqBNVXy3ue2fcfkw=
|
||||
golang.org/x/sys v0.0.0-20211214234402-4825e8c3871d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
34
vendor/github.com/vbauerster/mpb/v7/progress.go
generated
vendored
34
vendor/github.com/vbauerster/mpb/v7/progress.go
generated
vendored
@@ -6,8 +6,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"sync"
|
||||
@@ -33,7 +31,6 @@ type Progress struct {
|
||||
done chan struct{}
|
||||
refreshCh chan time.Time
|
||||
once sync.Once
|
||||
dlogger *log.Logger
|
||||
}
|
||||
|
||||
// pState holds bars in its priorityQueue. It gets passed to
|
||||
@@ -75,7 +72,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
|
||||
rr: prr,
|
||||
parkedBars: make(map[*Bar]*Bar),
|
||||
output: os.Stdout,
|
||||
debugOut: ioutil.Discard,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
@@ -91,7 +87,6 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
|
||||
bwg: new(sync.WaitGroup),
|
||||
operateState: make(chan func(*pState)),
|
||||
done: make(chan struct{}),
|
||||
dlogger: log.New(s.debugOut, "[mpb] ", log.Lshortfile),
|
||||
}
|
||||
|
||||
p.cwg.Add(1)
|
||||
@@ -234,12 +229,26 @@ func (p *Progress) serve(s *pState, cw *cwriter.Writer) {
|
||||
op(s)
|
||||
case <-p.refreshCh:
|
||||
if err := s.render(cw); err != nil {
|
||||
p.dlogger.Println(err)
|
||||
if s.debugOut != nil {
|
||||
_, e := fmt.Fprintln(s.debugOut, err)
|
||||
if e != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
case <-s.shutdownNotifier:
|
||||
for s.heapUpdated {
|
||||
if err := s.render(cw); err != nil {
|
||||
p.dlogger.Println(err)
|
||||
if s.debugOut != nil {
|
||||
_, e := fmt.Fprintln(s.debugOut, err)
|
||||
if e != nil {
|
||||
panic(err)
|
||||
}
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -311,7 +320,10 @@ func (s *pState) flush(cw *cwriter.Writer) error {
|
||||
for s.bHeap.Len() > 0 {
|
||||
b := heap.Pop(&s.bHeap).(*Bar)
|
||||
frame := <-b.frameCh
|
||||
cw.ReadFrom(frame.reader)
|
||||
_, err := cw.ReadFrom(frame.reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b.toShutdown {
|
||||
if b.recoveredPanic != nil {
|
||||
s.barShutdownQueue = append(s.barShutdownQueue, b)
|
||||
@@ -402,9 +414,9 @@ func (s *pState) makeBarState(total int64, filler BarFiller, options ...BarOptio
|
||||
bs.priority = -(math.MaxInt32 - s.idCount)
|
||||
}
|
||||
|
||||
bs.bufP = bytes.NewBuffer(make([]byte, 0, 128))
|
||||
bs.bufB = bytes.NewBuffer(make([]byte, 0, 256))
|
||||
bs.bufA = bytes.NewBuffer(make([]byte, 0, 128))
|
||||
for i := 0; i < len(bs.buffers); i++ {
|
||||
bs.buffers[i] = bytes.NewBuffer(make([]byte, 0, 512))
|
||||
}
|
||||
|
||||
return bs
|
||||
}
|
||||
|
45
vendor/github.com/vbauerster/mpb/v7/proxyreader.go
generated
vendored
45
vendor/github.com/vbauerster/mpb/v7/proxyreader.go
generated
vendored
@@ -11,7 +11,7 @@ type proxyReader struct {
|
||||
bar *Bar
|
||||
}
|
||||
|
||||
func (x *proxyReader) Read(p []byte) (int, error) {
|
||||
func (x proxyReader) Read(p []byte) (int, error) {
|
||||
n, err := x.ReadCloser.Read(p)
|
||||
x.bar.IncrBy(n)
|
||||
if err == io.EOF {
|
||||
@@ -21,12 +21,11 @@ func (x *proxyReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
type proxyWriterTo struct {
|
||||
io.ReadCloser // *proxyReader
|
||||
wt io.WriterTo
|
||||
bar *Bar
|
||||
proxyReader
|
||||
wt io.WriterTo
|
||||
}
|
||||
|
||||
func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) {
|
||||
func (x proxyWriterTo) WriteTo(w io.Writer) (int64, error) {
|
||||
n, err := x.wt.WriteTo(w)
|
||||
x.bar.IncrInt64(n)
|
||||
if err == io.EOF {
|
||||
@@ -36,13 +35,12 @@ func (x *proxyWriterTo) WriteTo(w io.Writer) (int64, error) {
|
||||
}
|
||||
|
||||
type ewmaProxyReader struct {
|
||||
io.ReadCloser // *proxyReader
|
||||
bar *Bar
|
||||
proxyReader
|
||||
}
|
||||
|
||||
func (x *ewmaProxyReader) Read(p []byte) (int, error) {
|
||||
func (x ewmaProxyReader) Read(p []byte) (int, error) {
|
||||
start := time.Now()
|
||||
n, err := x.ReadCloser.Read(p)
|
||||
n, err := x.proxyReader.Read(p)
|
||||
if n > 0 {
|
||||
x.bar.DecoratorEwmaUpdate(time.Since(start))
|
||||
}
|
||||
@@ -50,12 +48,11 @@ func (x *ewmaProxyReader) Read(p []byte) (int, error) {
|
||||
}
|
||||
|
||||
type ewmaProxyWriterTo struct {
|
||||
io.ReadCloser // *ewmaProxyReader
|
||||
wt io.WriterTo // *proxyWriterTo
|
||||
bar *Bar
|
||||
ewmaProxyReader
|
||||
wt proxyWriterTo
|
||||
}
|
||||
|
||||
func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) {
|
||||
func (x ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) {
|
||||
start := time.Now()
|
||||
n, err := x.wt.WriteTo(w)
|
||||
if n > 0 {
|
||||
@@ -64,17 +61,19 @@ func (x *ewmaProxyWriterTo) WriteTo(w io.Writer) (int64, error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
func newProxyReader(r io.Reader, bar *Bar) io.ReadCloser {
|
||||
rc := toReadCloser(r)
|
||||
rc = &proxyReader{rc, bar}
|
||||
|
||||
if wt, isWriterTo := r.(io.WriterTo); bar.hasEwmaDecorators {
|
||||
rc = &ewmaProxyReader{rc, bar}
|
||||
if isWriterTo {
|
||||
rc = &ewmaProxyWriterTo{rc, wt, bar}
|
||||
func (b *Bar) newProxyReader(r io.Reader) (rc io.ReadCloser) {
|
||||
pr := proxyReader{toReadCloser(r), b}
|
||||
if wt, ok := r.(io.WriterTo); ok {
|
||||
pw := proxyWriterTo{pr, wt}
|
||||
if b.hasEwmaDecorators {
|
||||
rc = ewmaProxyWriterTo{ewmaProxyReader{pr}, pw}
|
||||
} else {
|
||||
rc = pw
|
||||
}
|
||||
} else if isWriterTo {
|
||||
rc = &proxyWriterTo{rc, wt, bar}
|
||||
} else if b.hasEwmaDecorators {
|
||||
rc = ewmaProxyReader{pr}
|
||||
} else {
|
||||
rc = pr
|
||||
}
|
||||
return rc
|
||||
}
|
||||
|
Reference in New Issue
Block a user