mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 05:49:52 +00:00
Compare commits
43 Commits
release-1.
...
v1.10.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ee60474d5a | ||
|
|
ff2a361a0a | ||
|
|
7ebff0f533 | ||
|
|
787e10873c | ||
|
|
a2f29acc7d | ||
|
|
ee84302b60 | ||
|
|
a169ccf8f3 | ||
|
|
89ae387d7b | ||
|
|
66fe7af769 | ||
|
|
feabfac2a7 | ||
|
|
cf354b7abd | ||
|
|
18a95f947e | ||
|
|
07da29fd37 | ||
|
|
9b40f0be2f | ||
|
|
869d496f18 | ||
|
|
166b587a77 | ||
|
|
0a42c33af9 | ||
|
|
90c5033886 | ||
|
|
d3ff6e2635 | ||
|
|
06cf25fb53 | ||
|
|
3a05dca94e | ||
|
|
7bbaffc4f4 | ||
|
|
2b948c177a | ||
|
|
d9dfc44888 | ||
|
|
ba23a9162f | ||
|
|
1eada90813 | ||
|
|
4b9ffac0cc | ||
|
|
a81460437a | ||
|
|
f36752a279 | ||
|
|
4e2dee4362 | ||
|
|
d58e59a57d | ||
|
|
3450c11a0d | ||
|
|
b2e7139331 | ||
|
|
3a808c2ed5 | ||
|
|
04169cac6e | ||
|
|
a99bd0c9e3 | ||
|
|
97c3eabacf | ||
|
|
fa2b15ff76 | ||
|
|
9e79da5e33 | ||
|
|
97cb423b52 | ||
|
|
32f24e8870 | ||
|
|
a863a0dccb | ||
|
|
67a4e04471 |
112
.github/workflows/check_cirrus_cron.yml
vendored
112
.github/workflows/check_cirrus_cron.yml
vendored
@@ -1,105 +1,17 @@
|
||||
---
|
||||
|
||||
# See also:
|
||||
# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
|
||||
|
||||
# Format Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
# Required to un-FUBAR default ${{github.workflow}} value
|
||||
name: check_cirrus_cron
|
||||
|
||||
on:
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
# Debug-mode can reveal secrets, only enable by a secret value.
|
||||
# Ref: https://help.github.com/en/actions/configuring-and-managing-workflows/managing-a-workflow-run#enabling-step-debug-logging
|
||||
ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}'
|
||||
# CSV listing of e-mail addresses for delivery failure or error notices
|
||||
RCPTCSV: rh.container.bot@gmail.com,podman-monitor@lists.podman.io
|
||||
# Filename for table of cron-name to build-id data
|
||||
# (must be in $GITHUB_WORKSPACE/artifacts/)
|
||||
NAME_ID_FILEPATH: './artifacts/name_id.txt'
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
cron_failures:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Avoid duplicating cron_failures.sh in skopeo repo.
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
with:
|
||||
repository: containers/podman
|
||||
path: '_podman'
|
||||
persist-credentials: false
|
||||
|
||||
- name: Get failed cron names and Build IDs
|
||||
id: cron
|
||||
run: './_podman/.github/actions/${{ github.workflow }}/${{ github.job }}.sh'
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
shell: bash
|
||||
# Must be inline, since context expressions are used.
|
||||
# Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions
|
||||
run: |
|
||||
set -eo pipefail
|
||||
(
|
||||
echo "Detected one or more Cirrus-CI cron-triggered jobs have failed recently:"
|
||||
echo ""
|
||||
|
||||
while read -r NAME BID; do
|
||||
echo "Cron build '$NAME' Failed: https://cirrus-ci.com/build/$BID"
|
||||
done < "$NAME_ID_FILEPATH"
|
||||
|
||||
echo ""
|
||||
echo "# Source: ${{ github.workflow }} workflow on ${{ github.repository }}."
|
||||
# Separate content from sendgrid.com automatic footer.
|
||||
echo ""
|
||||
echo ""
|
||||
) > ./artifacts/email_body.txt
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
name: Send failure notification e-mail
|
||||
# Ref: https://github.com/dawidd6/action-send-mail
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Cirrus-CI cron build failures on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: file://./artifacts/email_body.txt
|
||||
|
||||
- if: always()
|
||||
uses: actions/upload-artifact@82c141cc518b40d92cc801eee768e7aafc9c2fa2 # v2
|
||||
with:
|
||||
name: ${{ github.job }}_artifacts
|
||||
path: artifacts/*
|
||||
|
||||
- if: failure()
|
||||
name: Send error notification e-mail
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Github workflow error on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}"
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_failures:
|
||||
uses: containers/buildah/.github/workflows/check_cirrus_cron.yml@main
|
||||
secrets: inherit
|
||||
|
||||
19
Makefile
19
Makefile
@@ -28,10 +28,15 @@ ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
endif
|
||||
|
||||
# Multiple scripts are sensitive to this value, make sure it's exported/available
|
||||
# N/B: Need to use 'command -v' here for compatibility with MacOS.
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man),go-md2man,$(GOBIN)/go-md2man)
|
||||
# Scripts may also use CONTAINER_RUNTIME, so we need to export it.
|
||||
# Note possibly non-obvious aspects of this:
|
||||
# - We need to use 'command -v' here, not 'which', for compatibility with MacOS.
|
||||
# - GNU Make 4.2.1 (included in Ubuntu 20.04) incorrectly tries to avoid invoking
|
||||
# a shell, and fails because there is no /usr/bin/command. The trailing ';' in
|
||||
# $(shell … ;) defeats that heuristic (recommended in
|
||||
# https://savannah.gnu.org/bugs/index.php?57625 ).
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman ;),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man ;),go-md2man,$(GOBIN)/go-md2man)
|
||||
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources.
|
||||
# See also hack/make.sh.
|
||||
@@ -50,8 +55,6 @@ ifeq ($(GOOS), linux)
|
||||
endif
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
|
||||
# If $TESTFLAGS is set, it is passed as extra arguments to 'go test'.
|
||||
# You can increase test output verbosity with the option '-test.vv'.
|
||||
# You can select certain tests to run, with `-test.run <regex>` for example:
|
||||
@@ -87,7 +90,7 @@ endif
|
||||
CONTAINER_GOSRC = /src/github.com/containers/skopeo
|
||||
CONTAINER_RUN ?= $(CONTAINER_CMD) --security-opt label=disable -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN)
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
GIT_COMMIT := $(shell GIT_CEILING_DIRECTORIES=$$(cd ..; pwd) git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
EXTRA_LDFLAGS ?=
|
||||
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
@@ -247,7 +250,7 @@ vendor:
|
||||
$(GO) mod verify
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src quay.io/libpod/golang:1.16 $(MAKE) vendor
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src golang $(MAKE) vendor
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
|
||||
@@ -207,7 +207,7 @@ Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate
|
||||
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between registry repositories and local directories. |
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
@@ -260,6 +260,8 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
}
|
||||
|
||||
opts.destImage.warnAboutIneffectiveOptions(destRef.Transport())
|
||||
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
|
||||
@@ -186,6 +186,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
LayersData: imgInspect.LayersData,
|
||||
Env: imgInspect.Env,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
|
||||
@@ -3,6 +3,7 @@ package inspect
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
@@ -19,5 +20,6 @@ type Output struct {
|
||||
Architecture string
|
||||
Os string
|
||||
Layers []string
|
||||
LayersData []types.ImageInspectLayer
|
||||
Env []string
|
||||
}
|
||||
|
||||
@@ -244,7 +244,11 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
|
||||
for _, tag := range tags {
|
||||
taggedRef, err := reference.WithTag(repoRef, tag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating a reference for repository %s and tag %q: %w", repoRef.Name(), tag, err)
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"repo": repoRef.Name(),
|
||||
"tag": tag,
|
||||
}).Errorf("Error creating a tagged reference from registry tag list: %v", err)
|
||||
continue
|
||||
}
|
||||
ref, err := docker.NewReference(taggedRef)
|
||||
if err != nil {
|
||||
@@ -548,6 +552,8 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
|
||||
}
|
||||
|
||||
opts.destImage.warnAboutIneffectiveOptions(transports.Get(opts.destination))
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
@@ -30,11 +31,11 @@ type errorShouldDisplayUsage struct {
|
||||
// The error for closeErr is annotated with description (which is not a format string)
|
||||
// Typical usage:
|
||||
//
|
||||
// defer func() {
|
||||
// if err := something.Close(); err != nil {
|
||||
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
|
||||
// }
|
||||
// }
|
||||
// defer func() {
|
||||
// if err := something.Close(); err != nil {
|
||||
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
|
||||
// }
|
||||
// }
|
||||
func noteCloseFailure(err error, description string, closeErr error) error {
|
||||
// We don’t accept a Closer() and close it ourselves because signature.PolicyContext has .Destroy(), not .Close().
|
||||
// This also makes it harder for a caller to do
|
||||
@@ -244,6 +245,7 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for image destinations.
|
||||
// Every user should call imageDestOptions.warnAboutIneffectiveOptions() as part of handling the CLI
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
@@ -252,12 +254,13 @@ type imageDestOptions struct {
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel commonFlag.OptionalInt // Level to use for the compression
|
||||
precomputeDigests bool // Precompute digests to dedup layers when saving to the docker: transport
|
||||
imageDestFlagPrefix string
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
opts := imageDestOptions{imageOptions: genericOptions, imageDestFlagPrefix: flagPrefix}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.AddFlagSet(&genericFlags)
|
||||
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
@@ -295,6 +298,19 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
// warnAboutIneffectiveOptions warns if any ineffective option was set by the user
|
||||
// Every user should call this as part of handling the CLI
|
||||
func (opts *imageDestOptions) warnAboutIneffectiveOptions(destTransport types.ImageTransport) {
|
||||
if destTransport.Name() != directory.Transport.Name() {
|
||||
if opts.dirForceCompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"compress")
|
||||
}
|
||||
if opts.dirForceDecompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"decompress")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (ANY changes made to this file, once commited/merged must)
|
||||
[comment]: <> (be manually copy/pasted -in markdown- into the description)
|
||||
[comment]: <> (field on Quay at the following locations:)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (https://quay.io/repository/containers/skopeo)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/stable)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/testing)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/upstream)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
@@ -16,27 +16,11 @@ FROM registry.fedoraproject.org/fedora:latest
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y --enablerepo updates-testing --exclude container-selinux install \
|
||||
make \
|
||||
golang \
|
||||
git \
|
||||
go-md2man \
|
||||
fuse-overlayfs \
|
||||
fuse3 \
|
||||
containers-common \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel && \
|
||||
mkdir /root/skopeo && \
|
||||
git clone https://github.com/containers/skopeo \
|
||||
/root/skopeo/src/github.com/containers/skopeo && \
|
||||
export GOPATH=/root/skopeo && \
|
||||
cd /root/skopeo/src/github.com/containers/skopeo && \
|
||||
make bin/skopeo && \
|
||||
make PREFIX=/usr install && \
|
||||
rm -rf /root/skopeo/* && \
|
||||
dnf -y remove git golang go-md2man make && \
|
||||
dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
|
||||
dnf -y copr enable rhcontainerbot/podman-next && \
|
||||
dnf -y install skopeo \
|
||||
--exclude container-selinux \
|
||||
--enablerepo=updates-testing && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
|
||||
14
default.yaml
14
default.yaml
@@ -1,8 +1,8 @@
|
||||
# This is a default registries.d configuration file. You may
|
||||
# add to this file or create additional files in registries.d/.
|
||||
#
|
||||
# lookaside: indicates a location that is read and write
|
||||
# lookaside-staging: indicates a location that is only for write
|
||||
# lookaside: for reading/writing simple signing signatures
|
||||
# lookaside-staging: for writing simple signing signatures, preferred over lookaside
|
||||
#
|
||||
# lookaside and lookaside-staging take a value of the following:
|
||||
# lookaside: {schema}://location
|
||||
@@ -10,10 +10,12 @@
|
||||
# For reading signatures, schema may be http, https, or file.
|
||||
# For writing signatures, schema may only be file.
|
||||
|
||||
# This is the default signature write location for docker registries.
|
||||
# The default locations are built-in, for both reading and writing:
|
||||
# /var/lib/containers/sigstore for root, or
|
||||
# ~/.local/share/containers/sigstore for non-root users.
|
||||
default-docker:
|
||||
# lookaside: file:///var/lib/containers/sigstore
|
||||
lookaside-staging: file:///var/lib/containers/sigstore
|
||||
# lookaside: https://…
|
||||
# lookaside-staging: file:///…
|
||||
|
||||
# The 'docker' indicator here is the start of the configuration
|
||||
# for docker registries.
|
||||
@@ -21,6 +23,6 @@ default-docker:
|
||||
# docker:
|
||||
#
|
||||
# privateregistry.com:
|
||||
# lookaside: http://privateregistry.com/sigstore/
|
||||
# lookaside: https://privateregistry.com/sigstore/
|
||||
# lookaside-staging: /mnt/nfs/privateregistry/sigstore
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ After copying the image, write the digest of the resulting image to the file.
|
||||
|
||||
**--preserve-digests**
|
||||
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
|
||||
**--encrypt-layer** _ints_
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
% skopeo-sync(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-sync - Synchronize images between container registries and local directories.
|
||||
skopeo\-sync - Synchronize images between registry repositories and local directories.
|
||||
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
|
||||
|
||||
## DESCRIPTION
|
||||
Synchronize images between container registries and local directories.
|
||||
Synchronize images between registry repoositories and local directories.
|
||||
The synchronization is achieved by copying all the images found at _source_ to _destination_.
|
||||
|
||||
Useful to synchronize a local container registry mirror, and to to populate registries running inside of air-gapped environments.
|
||||
@@ -66,7 +66,7 @@ Print usage statement.
|
||||
|
||||
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
|
||||
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
|
||||
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ Print the version number
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between registry repositories and local directories. |
|
||||
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
|
||||
68
go.mod
68
go.mod
@@ -3,20 +3,19 @@ module github.com/containers/skopeo
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.49.1
|
||||
github.com/containers/image/v5 v5.22.0
|
||||
github.com/containers/common v0.50.1
|
||||
github.com/containers/image/v5 v5.23.0
|
||||
github.com/containers/ocicrypt v1.1.5
|
||||
github.com/containers/storage v1.42.0
|
||||
github.com/docker/docker v20.10.17+incompatible
|
||||
github.com/containers/storage v1.43.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
|
||||
github.com/opencontainers/image-spec v1.1.0-rc1
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
@@ -24,85 +23,78 @@ require (
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.2.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.3 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/cgroups v1.0.4 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/docker v20.10.18+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-containerregistry v0.10.0 // indirect
|
||||
github.com/google/go-containerregistry v0.11.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/honeycombio/libhoney-go v1.15.8 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
|
||||
github.com/kr/pretty v0.2.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.3 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
|
||||
github.com/opencontainers/selinux v1.10.1 // indirect
|
||||
github.com/opencontainers/selinux v1.10.2 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect
|
||||
github.com/sigstore/sigstore v1.4.2 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.7.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.8.0 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/theupdateframework/go-tuf v0.3.1 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.4.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.5.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.3.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 // indirect
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220720214146-176da50484ac // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ if [[ "$SKOPEO_CONTAINER_TESTS" == "0" ]] && [[ "$CI" != "true" ]]; then
|
||||
echo " the Makefile targets WITHOUT the '-local' suffix."
|
||||
echo "***************************************************************"
|
||||
) > /dev/stderr
|
||||
sleep 5s
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -15,11 +15,15 @@ TestRunShell is not really a test; it is a convenient way to use the registry se
|
||||
in openshift.go and CopySuite to get an interactive environment for experimentation.
|
||||
|
||||
To use it, run:
|
||||
|
||||
sudo make shell
|
||||
|
||||
to start a container, then within the container:
|
||||
|
||||
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
|
||||
|
||||
An example of what can be done within the container:
|
||||
|
||||
cd ..; make bin/skopeo PREFIX=/usr install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
|
||||
@@ -50,7 +50,7 @@ function setup() {
|
||||
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
run_skopeo copy --dest-compress --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
run_skopeo copy --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
|
||||
# zstd magic number
|
||||
local magic=$(printf "\x28\xb5\x2f\xfd")
|
||||
|
||||
2
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
@@ -57,7 +57,7 @@ func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
|
||||
}).Warn("failed to parse job object message")
|
||||
continue
|
||||
}
|
||||
if err := msq.Write(notification); err == queue.ErrQueueClosed {
|
||||
if err := msq.Enqueue(notification); err == queue.ErrQueueClosed {
|
||||
// Write will only return an error when the queue is closed.
|
||||
// The only time a queue would ever be closed is when we call `Close` on
|
||||
// the job it belongs to which also removes it from the jobMap, so something
|
||||
|
||||
55
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
55
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
@@ -68,6 +68,9 @@ type Options struct {
|
||||
// `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
|
||||
// Defaults to false.
|
||||
UseNTVariant bool
|
||||
// `IOTracking` enables tracking I/O statistics on the job object. More specifically this
|
||||
// calls SetInformationJobObject with the JobObjectIoAttribution class.
|
||||
EnableIOTracking bool
|
||||
}
|
||||
|
||||
// Create creates a job object.
|
||||
@@ -134,6 +137,12 @@ func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
||||
job.mq = mq
|
||||
}
|
||||
|
||||
if options.EnableIOTracking {
|
||||
if err := enableIOTracking(jobHandle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return job, nil
|
||||
}
|
||||
|
||||
@@ -235,7 +244,7 @@ func (job *JobObject) PollNotification() (interface{}, error) {
|
||||
if job.mq == nil {
|
||||
return nil, ErrNotRegistered
|
||||
}
|
||||
return job.mq.ReadOrWait()
|
||||
return job.mq.Dequeue()
|
||||
}
|
||||
|
||||
// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
|
||||
@@ -330,7 +339,7 @@ func (job *JobObject) Pids() ([]uint32, error) {
|
||||
err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectBasicProcessIdList,
|
||||
uintptr(unsafe.Pointer(&info)),
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
)
|
||||
@@ -356,7 +365,7 @@ func (job *JobObject) Pids() ([]uint32, error) {
|
||||
if err = winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectBasicProcessIdList,
|
||||
uintptr(unsafe.Pointer(&buf[0])),
|
||||
unsafe.Pointer(&buf[0]),
|
||||
uint32(len(buf)),
|
||||
nil,
|
||||
); err != nil {
|
||||
@@ -384,7 +393,7 @@ func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMA
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectMemoryUsageInformation,
|
||||
uintptr(unsafe.Pointer(&info)),
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
@@ -406,7 +415,7 @@ func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectBasicAccountingInformation,
|
||||
uintptr(unsafe.Pointer(&info)),
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
@@ -415,7 +424,9 @@ func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// QueryStorageStats gets the storage (I/O) stats for the job object.
|
||||
// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error
|
||||
// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking()
|
||||
// hasn't been called since creation of the job.
|
||||
func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
@@ -430,7 +441,7 @@ func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFO
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectIoAttribution,
|
||||
uintptr(unsafe.Pointer(&info)),
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
@@ -476,7 +487,7 @@ func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
|
||||
status := winapi.NtQueryInformationProcess(
|
||||
h,
|
||||
winapi.ProcessVmCounters,
|
||||
uintptr(unsafe.Pointer(&vmCounters)),
|
||||
unsafe.Pointer(&vmCounters),
|
||||
uint32(unsafe.Sizeof(vmCounters)),
|
||||
nil,
|
||||
)
|
||||
@@ -497,3 +508,31 @@ func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
|
||||
|
||||
return jobWorkingSetSize, nil
|
||||
}
|
||||
|
||||
// SetIOTracking enables IO tracking for processes in the job object.
|
||||
// This enables use of the QueryStorageStats method.
|
||||
func (job *JobObject) SetIOTracking() error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
return enableIOTracking(job.handle)
|
||||
}
|
||||
|
||||
func enableIOTracking(job windows.Handle) error {
|
||||
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
|
||||
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
|
||||
}
|
||||
if _, err := windows.SetInformationJobObject(
|
||||
job,
|
||||
winapi.JobObjectIoAttribution,
|
||||
uintptr(unsafe.Pointer(&info)),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to enable IO tracking on job object: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
4
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
@@ -202,7 +202,7 @@ func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMI
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectExtendedLimitInformation,
|
||||
uintptr(unsafe.Pointer(&info)),
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
@@ -224,7 +224,7 @@ func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectCpuRateControlInformation,
|
||||
uintptr(unsafe.Pointer(&info)),
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
|
||||
65
vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
generated
vendored
65
vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
generated
vendored
@@ -5,10 +5,7 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrQueueClosed = errors.New("the queue is closed for reading and writing")
|
||||
ErrQueueEmpty = errors.New("the queue is empty")
|
||||
)
|
||||
var ErrQueueClosed = errors.New("the queue is closed for reading and writing")
|
||||
|
||||
// MessageQueue represents a threadsafe message queue to be used to retrieve or
|
||||
// write messages to.
|
||||
@@ -29,8 +26,8 @@ func NewMessageQueue() *MessageQueue {
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes `msg` to the queue.
|
||||
func (mq *MessageQueue) Write(msg interface{}) error {
|
||||
// Enqueue writes `msg` to the queue.
|
||||
func (mq *MessageQueue) Enqueue(msg interface{}) error {
|
||||
mq.m.Lock()
|
||||
defer mq.m.Unlock()
|
||||
|
||||
@@ -43,55 +40,37 @@ func (mq *MessageQueue) Write(msg interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read will read a value from the queue if available, otherwise return an error.
|
||||
func (mq *MessageQueue) Read() (interface{}, error) {
|
||||
// Dequeue will read a value from the queue and remove it. If the queue
|
||||
// is empty, this will block until the queue is closed or a value gets enqueued.
|
||||
func (mq *MessageQueue) Dequeue() (interface{}, error) {
|
||||
mq.m.Lock()
|
||||
defer mq.m.Unlock()
|
||||
|
||||
for !mq.closed && mq.size() == 0 {
|
||||
mq.c.Wait()
|
||||
}
|
||||
|
||||
// We got woken up, check if it's because the queue got closed.
|
||||
if mq.closed {
|
||||
return nil, ErrQueueClosed
|
||||
}
|
||||
if mq.isEmpty() {
|
||||
return nil, ErrQueueEmpty
|
||||
}
|
||||
|
||||
val := mq.messages[0]
|
||||
mq.messages[0] = nil
|
||||
mq.messages = mq.messages[1:]
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// ReadOrWait will read a value from the queue if available, else it will wait for a
|
||||
// value to become available. This will block forever if nothing gets written or until
|
||||
// the queue gets closed.
|
||||
func (mq *MessageQueue) ReadOrWait() (interface{}, error) {
|
||||
mq.m.Lock()
|
||||
if mq.closed {
|
||||
mq.m.Unlock()
|
||||
return nil, ErrQueueClosed
|
||||
}
|
||||
if mq.isEmpty() {
|
||||
for !mq.closed && mq.isEmpty() {
|
||||
mq.c.Wait()
|
||||
}
|
||||
mq.m.Unlock()
|
||||
return mq.Read()
|
||||
}
|
||||
val := mq.messages[0]
|
||||
mq.messages[0] = nil
|
||||
mq.messages = mq.messages[1:]
|
||||
mq.m.Unlock()
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// IsEmpty returns if the queue is empty
|
||||
func (mq *MessageQueue) IsEmpty() bool {
|
||||
// Size returns the size of the queue.
|
||||
func (mq *MessageQueue) Size() int {
|
||||
mq.m.RLock()
|
||||
defer mq.m.RUnlock()
|
||||
return len(mq.messages) == 0
|
||||
return mq.size()
|
||||
}
|
||||
|
||||
// Nonexported empty check that doesn't lock so we can call this in Read and Write.
|
||||
func (mq *MessageQueue) isEmpty() bool {
|
||||
return len(mq.messages) == 0
|
||||
// Nonexported size check to check if the queue is empty inside already locked functions.
|
||||
func (mq *MessageQueue) size() int {
|
||||
return len(mq.messages)
|
||||
}
|
||||
|
||||
// Close closes the queue for future writes or reads. Any attempts to read or write from the
|
||||
@@ -99,13 +78,15 @@ func (mq *MessageQueue) isEmpty() bool {
|
||||
func (mq *MessageQueue) Close() {
|
||||
mq.m.Lock()
|
||||
defer mq.m.Unlock()
|
||||
// Already closed
|
||||
|
||||
// Already closed, noop
|
||||
if mq.closed {
|
||||
return
|
||||
}
|
||||
|
||||
mq.messages = nil
|
||||
mq.closed = true
|
||||
// If there's anybody currently waiting on a value from ReadOrWait, we need to
|
||||
// If there's anybody currently waiting on a value from Dequeue, we need to
|
||||
// broadcast so the read(s) can return ErrQueueClosed.
|
||||
mq.c.Broadcast()
|
||||
}
|
||||
|
||||
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
@@ -175,7 +175,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
// LPDWORD lpReturnLength
|
||||
// );
|
||||
//
|
||||
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
||||
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
||||
|
||||
// HANDLE OpenJobObjectW(
|
||||
// DWORD dwDesiredAccess,
|
||||
|
||||
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
@@ -18,7 +18,7 @@ const ProcessVmCounters = 3
|
||||
// [out, optional] PULONG ReturnLength
|
||||
// );
|
||||
//
|
||||
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
|
||||
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
|
||||
|
||||
// typedef struct _VM_COUNTERS_EX
|
||||
// {
|
||||
|
||||
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
generated
vendored
@@ -12,7 +12,8 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
|
||||
// ULONG SystemInformationLength,
|
||||
// PULONG ReturnLength
|
||||
// );
|
||||
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
|
||||
//
|
||||
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
|
||||
|
||||
type SYSTEM_PROCESS_INFORMATION struct {
|
||||
NextEntryOffset uint32 // ULONG
|
||||
|
||||
6
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
@@ -100,7 +100,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
||||
return
|
||||
}
|
||||
|
||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
@@ -152,7 +152,7 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result
|
||||
return
|
||||
}
|
||||
|
||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
@@ -244,7 +244,7 @@ func LocalFree(ptr uintptr) {
|
||||
return
|
||||
}
|
||||
|
||||
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@@ -1,316 +0,0 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
@@ -1,22 +0,0 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,69 +0,0 @@
|
||||
# xxhash
|
||||
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -1,235 +0,0 @@
|
||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
||||
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -1,215 +0,0 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
|
||||
RET
|
||||
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@@ -1,76 +0,0 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
||||
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
||||
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -1,57 +0,0 @@
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
||||
2
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
2
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
@@ -158,7 +158,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
||||
}
|
||||
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
|
||||
logrus.Debugf("error logging into %q: %v", key, unauthorized)
|
||||
return fmt.Errorf("error logging into %q: invalid username/password", key)
|
||||
return fmt.Errorf("logging into %q: invalid username/password", key)
|
||||
}
|
||||
return fmt.Errorf("authenticating creds for %q: %w", key, err)
|
||||
}
|
||||
|
||||
20
vendor/github.com/containers/common/pkg/report/formatter.go
generated
vendored
20
vendor/github.com/containers/common/pkg/report/formatter.go
generated
vendored
@@ -59,16 +59,22 @@ type Formatter struct {
|
||||
func (f *Formatter) Parse(origin Origin, text string) (*Formatter, error) {
|
||||
f.Origin = origin
|
||||
|
||||
// docker tries to be smart and replaces \n with the actual newline character.
|
||||
// For compat we do the same but this will break formats such as '{{printf "\n"}}'
|
||||
// To be backwards compatible with the previous behavior we try to replace and
|
||||
// parse the template. If it fails use the original text and parse again.
|
||||
var normText string
|
||||
switch {
|
||||
case strings.HasPrefix(text, "table "):
|
||||
f.RenderTable = true
|
||||
text = "{{range .}}" + NormalizeFormat(text) + "{{end -}}"
|
||||
normText = "{{range .}}" + NormalizeFormat(text) + "{{end -}}"
|
||||
text = "{{range .}}" + text + "{{end -}}"
|
||||
case OriginUser == origin:
|
||||
text = EnforceRange(NormalizeFormat(text))
|
||||
normText = EnforceRange(NormalizeFormat(text))
|
||||
text = EnforceRange(text)
|
||||
default:
|
||||
text = NormalizeFormat(text)
|
||||
normText = NormalizeFormat(text)
|
||||
}
|
||||
f.text = text
|
||||
|
||||
if f.RenderTable || origin == OriginPodman {
|
||||
tw := tabwriter.NewWriter(f.writer, 12, 2, 2, ' ', tabwriter.StripEscape)
|
||||
@@ -77,10 +83,14 @@ func (f *Formatter) Parse(origin Origin, text string) (*Formatter, error) {
|
||||
f.RenderHeaders = true
|
||||
}
|
||||
|
||||
tmpl, err := f.template.Funcs(template.FuncMap(DefaultFuncs)).Parse(text)
|
||||
tmpl, err := f.template.Funcs(template.FuncMap(DefaultFuncs)).Parse(normText)
|
||||
if err != nil {
|
||||
tmpl, err = f.template.Funcs(template.FuncMap(DefaultFuncs)).Parse(text)
|
||||
f.template = tmpl
|
||||
f.text = text
|
||||
return f, err
|
||||
}
|
||||
f.text = normText
|
||||
f.template = tmpl
|
||||
return f, nil
|
||||
}
|
||||
|
||||
39
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
39
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
@@ -3,7 +3,6 @@ package archive
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
@@ -13,8 +12,8 @@ import (
|
||||
type archiveImageDestination struct {
|
||||
*tarfile.Destination // Implements most of types.ImageDestination
|
||||
ref archiveReference
|
||||
archive *tarfile.Writer // Should only be closed if writer != nil
|
||||
writer io.Closer // May be nil if the archive is shared
|
||||
writer *Writer // Should be closed if closeWriter
|
||||
closeWriter bool
|
||||
}
|
||||
|
||||
func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) {
|
||||
@@ -22,29 +21,28 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (privat
|
||||
return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||
}
|
||||
|
||||
var archive *tarfile.Writer
|
||||
var writer io.Closer
|
||||
if ref.archiveWriter != nil {
|
||||
archive = ref.archiveWriter
|
||||
writer = nil
|
||||
var writer *Writer
|
||||
var closeWriter bool
|
||||
if ref.writer != nil {
|
||||
writer = ref.writer
|
||||
closeWriter = false
|
||||
} else {
|
||||
fh, err := openArchiveForWriting(ref.path)
|
||||
w, err := NewWriter(sys, ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archive = tarfile.NewWriter(fh)
|
||||
writer = fh
|
||||
writer = w
|
||||
closeWriter = true
|
||||
}
|
||||
tarDest := tarfile.NewDestination(sys, archive, ref.Transport().Name(), ref.ref)
|
||||
tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref)
|
||||
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
|
||||
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
|
||||
}
|
||||
return &archiveImageDestination{
|
||||
Destination: tarDest,
|
||||
ref: ref,
|
||||
archive: archive,
|
||||
writer: writer,
|
||||
closeWriter: closeWriter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -56,7 +54,7 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *archiveImageDestination) Close() error {
|
||||
if d.writer != nil {
|
||||
if d.closeWriter {
|
||||
return d.writer.Close()
|
||||
}
|
||||
return nil
|
||||
@@ -70,8 +68,15 @@ func (d *archiveImageDestination) Close() error {
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
if d.writer != nil {
|
||||
return d.archive.Close()
|
||||
d.writer.imageCommitted()
|
||||
if d.closeWriter {
|
||||
// We could do this only in .Close(), but failures in .Close() are much more likely to be
|
||||
// ignored by callers that use defer. So, in single-image destinations, try to complete
|
||||
// the archive here.
|
||||
// But if Commit() is never called, let .Close() clean up.
|
||||
err := d.writer.Close()
|
||||
d.closeWriter = false
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
6
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
@@ -53,7 +53,7 @@ type archiveReference struct {
|
||||
// file, not necessarily path precisely).
|
||||
archiveReader *tarfile.Reader
|
||||
// If not nil, must have been created for path
|
||||
archiveWriter *tarfile.Writer
|
||||
writer *Writer
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
@@ -108,7 +108,7 @@ func NewIndexReference(path string, sourceIndex int) (types.ImageReference, erro
|
||||
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
|
||||
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
|
||||
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
|
||||
archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
|
||||
archiveReader *tarfile.Reader, writer *Writer) (types.ImageReference, error) {
|
||||
if strings.Contains(path, ":") {
|
||||
return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func newReference(path string, ref reference.NamedTagged, sourceIndex int,
|
||||
ref: ref,
|
||||
sourceIndex: sourceIndex,
|
||||
archiveReader: archiveReader,
|
||||
archiveWriter: archiveWriter,
|
||||
writer: writer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
98
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
98
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
@@ -13,47 +14,19 @@ import (
|
||||
|
||||
// Writer manages a single in-progress Docker archive and allows adding images to it.
|
||||
type Writer struct {
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
archive *tarfile.Writer
|
||||
writer io.Closer
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
regularFile bool // path refers to a regular file (e.g. not a pipe)
|
||||
archive *tarfile.Writer
|
||||
writer io.Closer
|
||||
|
||||
// The following state can only be acccessed with the mutex held.
|
||||
mutex sync.Mutex
|
||||
hadCommit bool // At least one successful commit has happened
|
||||
}
|
||||
|
||||
// NewWriter returns a Writer for path.
|
||||
// The caller should call .Close() on the returned object.
|
||||
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
|
||||
fh, err := openArchiveForWriting(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive := tarfile.NewWriter(fh)
|
||||
|
||||
return &Writer{
|
||||
path: path,
|
||||
archive: archive,
|
||||
writer: fh,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close writes all outstanding data about images to the archive, and
|
||||
// releases state associated with the Writer, if any.
|
||||
// No more images can be added after this is called.
|
||||
func (w *Writer) Close() error {
|
||||
err := w.archive.Close()
|
||||
if err2 := w.writer.Close(); err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewReference returns an ImageReference that allows adding an image to Writer,
|
||||
// with an optional reference.
|
||||
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
return newReference(w.path, destinationRef, -1, nil, w.archive)
|
||||
}
|
||||
|
||||
// openArchiveForWriting opens path for writing a tar archive,
|
||||
// making a few sanity checks.
|
||||
func openArchiveForWriting(path string) (*os.File, error) {
|
||||
// path can be either a pipe or a regular file
|
||||
// in the case of a pipe, we require that we can open it for write
|
||||
// in the case of a regular file, we don't want to overwrite any pre-existing file
|
||||
@@ -69,15 +42,62 @@ func openArchiveForWriting(path string) (*os.File, error) {
|
||||
fh.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
fhStat, err := fh.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("statting file %q: %w", path, err)
|
||||
}
|
||||
|
||||
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
|
||||
regularFile := fhStat.Mode().IsRegular()
|
||||
if regularFile && fhStat.Size() != 0 {
|
||||
return nil, errors.New("docker-archive doesn't support modifying existing images")
|
||||
}
|
||||
|
||||
archive := tarfile.NewWriter(fh)
|
||||
|
||||
succeeded = true
|
||||
return fh, nil
|
||||
return &Writer{
|
||||
path: path,
|
||||
regularFile: regularFile,
|
||||
archive: archive,
|
||||
writer: fh,
|
||||
hadCommit: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// imageCommitted notifies the Writer that at least one image was successfully commited to the stream.
|
||||
func (w *Writer) imageCommitted() {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
w.hadCommit = true
|
||||
}
|
||||
|
||||
// Close writes all outstanding data about images to the archive, and
|
||||
// releases state associated with the Writer, if any.
|
||||
// No more images can be added after this is called.
|
||||
func (w *Writer) Close() error {
|
||||
err := w.archive.Close()
|
||||
if err2 := w.writer.Close(); err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
if err == nil && w.regularFile && !w.hadCommit {
|
||||
// Writing to the destination never had a success; delete the destination if we created it.
|
||||
// This is done primarily because we don’t implement adding another image to a pre-existing image, so if we
|
||||
// left a partial archive around (notably because reading from the _source_ has failed), we couldn’t retry without
|
||||
// the caller manually deleting the partial archive. So, delete it instead.
|
||||
//
|
||||
// Archives with at least one successfully created image are left around; they might still be valuable.
|
||||
//
|
||||
// Note a corner case: If there _originally_ was an empty file (which is not a valid archive anyway), this deletes it.
|
||||
// Ideally, if w.regularFile, we should write the full contents to a temporary file and use os.Rename here, only on success.
|
||||
if err2 := os.Remove(w.path); err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewReference returns an ImageReference that allows adding an image to Writer,
|
||||
// with an optional reference.
|
||||
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
return newReference(w.path, destinationRef, -1, nil, w)
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
generated
vendored
@@ -53,7 +53,7 @@ func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon)
|
||||
// For daemonImageDestination, it must be a ref, which is NamedTagged.
|
||||
// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest.
|
||||
// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
|
||||
// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
|
||||
type daemonReference struct {
|
||||
id digest.Digest
|
||||
ref reference.Named // !reference.IsNameOnly
|
||||
|
||||
@@ -1,46 +1,60 @@
|
||||
package client
|
||||
// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go
|
||||
// Copyright 2022 github.com/distribution/distribution authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
)
|
||||
|
||||
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
// errcode.Errors slice.
|
||||
var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
|
||||
var errNoErrorsInBody = errors.New("no error details found in HTTP response body")
|
||||
|
||||
// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
|
||||
// unexpectedHTTPStatusError is returned when an unexpected HTTP status is
|
||||
// returned when making a registry api call.
|
||||
type UnexpectedHTTPStatusError struct {
|
||||
type unexpectedHTTPStatusError struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
func (e *UnexpectedHTTPStatusError) Error() string {
|
||||
func (e *unexpectedHTTPStatusError) Error() string {
|
||||
return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
|
||||
}
|
||||
|
||||
// UnexpectedHTTPResponseError is returned when an expected HTTP status code
|
||||
// unexpectedHTTPResponseError is returned when an expected HTTP status code
|
||||
// is returned, but the content was unexpected and failed to be parsed.
|
||||
type UnexpectedHTTPResponseError struct {
|
||||
type unexpectedHTTPResponseError struct {
|
||||
ParseErr error
|
||||
StatusCode int
|
||||
Response []byte
|
||||
}
|
||||
|
||||
func (e *UnexpectedHTTPResponseError) Error() string {
|
||||
func (e *unexpectedHTTPResponseError) Error() string {
|
||||
return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
|
||||
}
|
||||
|
||||
func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
||||
var errors errcode.Errors
|
||||
body, err := ioutil.ReadAll(r)
|
||||
body, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -63,7 +77,7 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &errors); err != nil {
|
||||
return &UnexpectedHTTPResponseError{
|
||||
return &unexpectedHTTPResponseError{
|
||||
ParseErr: err,
|
||||
StatusCode: statusCode,
|
||||
Response: body,
|
||||
@@ -73,8 +87,8 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
||||
if len(errors) == 0 {
|
||||
// If there was no error specified in the body, return
|
||||
// UnexpectedHTTPResponseError.
|
||||
return &UnexpectedHTTPResponseError{
|
||||
ParseErr: ErrNoErrorsInBody,
|
||||
return &unexpectedHTTPResponseError{
|
||||
ParseErr: errNoErrorsInBody,
|
||||
StatusCode: statusCode,
|
||||
Response: body,
|
||||
}
|
||||
@@ -94,15 +108,15 @@ func mergeErrors(err1, err2 error) error {
|
||||
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
|
||||
}
|
||||
|
||||
// HandleErrorResponse returns error parsed from HTTP response for an
|
||||
// handleErrorResponse returns error parsed from HTTP response for an
|
||||
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
|
||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||
// range.
|
||||
func HandleErrorResponse(resp *http.Response) error {
|
||||
func handleErrorResponse(resp *http.Response) error {
|
||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
for _, c := range challenge.ResponseChallenges(resp) {
|
||||
for _, c := range dockerChallenge.ResponseChallenges(resp) {
|
||||
if c.Scheme == "bearer" {
|
||||
var err errcode.Error
|
||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||
@@ -124,16 +138,10 @@ func HandleErrorResponse(resp *http.Response) error {
|
||||
}
|
||||
}
|
||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return &UnexpectedHTTPStatusError{Status: resp.Status}
|
||||
}
|
||||
|
||||
// SuccessStatus returns true if the argument is a successful HTTP response
|
||||
// code (in the range 200 - 399 inclusive).
|
||||
func SuccessStatus(status int) bool {
|
||||
return status >= 200 && status <= 399
|
||||
return &unexpectedHTTPStatusError{Status: resp.Status}
|
||||
}
|
||||
82
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
82
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
clientLib "github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -126,8 +125,9 @@ type dockerClient struct {
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
remoteName string
|
||||
actions string
|
||||
resourceType string
|
||||
remoteName string
|
||||
actions string
|
||||
}
|
||||
|
||||
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
|
||||
@@ -236,6 +236,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, regis
|
||||
}
|
||||
client.signatureBase = sigBase
|
||||
client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref)
|
||||
client.scope.resourceType = "repository"
|
||||
client.scope.actions = actions
|
||||
client.scope.remoteName = reference.Path(ref.ref)
|
||||
return client, nil
|
||||
@@ -474,6 +475,33 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
|
||||
}
|
||||
|
||||
// Checks if the auth headers in the response contain an indication of a failed
|
||||
// authorizdation because of an "insufficient_scope" error. If that's the case,
|
||||
// returns the required scope to be used for fetching a new token.
|
||||
func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) {
|
||||
if err == nil && res.StatusCode == http.StatusUnauthorized {
|
||||
challenges := parseAuthHeader(res.Header)
|
||||
for _, challenge := range challenges {
|
||||
if challenge.Scheme == "bearer" {
|
||||
if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" {
|
||||
if scope, ok := challenge.Parameters["scope"]; ok && scope != "" {
|
||||
if newScope, err := parseAuthScope(scope); err == nil {
|
||||
return true, newScope
|
||||
} else {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"scope": scope,
|
||||
"challenge": challenge,
|
||||
}).Error("Failed to parse the authentication scope from the given challenge")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it,
|
||||
// silently falling back to fallbackDelay if the header is missing or invalid.
|
||||
func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration {
|
||||
@@ -513,6 +541,29 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
|
||||
for {
|
||||
res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
|
||||
attempts++
|
||||
|
||||
// By default we use pre-defined scopes per operation. In
|
||||
// certain cases, this can fail when our authentication is
|
||||
// insufficient, then we might be getting an error back with a
|
||||
// Www-Authenticate Header indicating an insufficient scope.
|
||||
//
|
||||
// Check for that and update the client challenges to retry after
|
||||
// requesting a new token
|
||||
//
|
||||
// We only try this on the first attempt, to not overload an
|
||||
// already struggling server.
|
||||
// We also cannot retry with a body (stream != nil) as stream
|
||||
// was already read
|
||||
if attempts == 1 && stream == nil && auth != noAuth {
|
||||
if retry, newScope := needsRetryWithUpdatedScope(err, res); retry {
|
||||
logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
|
||||
// Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently
|
||||
// expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
|
||||
// for more than one extra scope.
|
||||
res, err = c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, newScope)
|
||||
extraScope = newScope
|
||||
}
|
||||
}
|
||||
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
|
||||
stream != nil || // We can't retry with a body (which is not restartable in the general case)
|
||||
attempts == backoffNumIterations {
|
||||
@@ -592,8 +643,18 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
|
||||
cacheKey := ""
|
||||
scopes := []authScope{c.scope}
|
||||
if extraScope != nil {
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below
|
||||
// uses the same separator when formatting a remote request (and because
|
||||
// repository names that we create can't contain colons, and extraScope values
|
||||
// coming from a server come from `parseAuthScope`, which also splits on colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions)
|
||||
if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 {
|
||||
return fmt.Errorf(
|
||||
"Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d",
|
||||
cacheKey,
|
||||
colonCount,
|
||||
)
|
||||
}
|
||||
scopes = append(scopes, *extraScope)
|
||||
}
|
||||
var token bearerToken
|
||||
@@ -648,9 +709,10 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
|
||||
if service, ok := challenge.Parameters["service"]; ok && service != "" {
|
||||
params.Add("service", service)
|
||||
}
|
||||
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
params.Add("grant_type", "refresh_token")
|
||||
@@ -700,8 +762,8 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
||||
}
|
||||
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -977,7 +1039,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), clientLib.HandleErrorResponse(res))
|
||||
return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), handleErrorResponse(res))
|
||||
}
|
||||
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
|
||||
|
||||
5
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@@ -358,8 +358,9 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
// Checking candidateRepo, and mounting from it, requires an
|
||||
// expanded token scope.
|
||||
extraScope := &authScope{
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
resourceType: "repository",
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
}
|
||||
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
|
||||
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
|
||||
|
||||
8
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
@@ -4,8 +4,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution/registry/client"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,7 +33,7 @@ func httpResponseToError(res *http.Response, context string) error {
|
||||
case http.StatusTooManyRequests:
|
||||
return ErrTooManyRequests
|
||||
case http.StatusUnauthorized:
|
||||
err := client.HandleErrorResponse(res)
|
||||
err := handleErrorResponse(res)
|
||||
return ErrUnauthorizedForCredentials{Err: err}
|
||||
default:
|
||||
if context != "" {
|
||||
@@ -48,8 +46,8 @@ func httpResponseToError(res *http.Response, context string) error {
|
||||
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
|
||||
// registry
|
||||
func registryHTTPResponseToError(res *http.Response) error {
|
||||
err := client.HandleErrorResponse(res)
|
||||
if e, ok := err.(*client.UnexpectedHTTPResponseError); ok {
|
||||
err := handleErrorResponse(res)
|
||||
if e, ok := err.(*unexpectedHTTPResponseError); ok {
|
||||
response := string(e.Response)
|
||||
if len(response) > 50 {
|
||||
response = response[:50] + "..."
|
||||
|
||||
4
vendor/github.com/containers/image/v5/docker/reference/reference.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/reference/reference.go
generated
vendored
@@ -3,13 +3,13 @@
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
|
||||
13
vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
generated
vendored
13
vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
generated
vendored
@@ -3,6 +3,7 @@ package docker
|
||||
// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
@@ -70,6 +71,18 @@ func parseAuthHeader(header http.Header) []challenge {
|
||||
return challenges
|
||||
}
|
||||
|
||||
// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions`
|
||||
func parseAuthScope(scopeStr string) (*authScope, error) {
|
||||
if parts := strings.Split(scopeStr, ":"); len(parts) == 3 {
|
||||
return &authScope{
|
||||
resourceType: parts[0],
|
||||
remoteName: parts[1],
|
||||
actions: parts[2],
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr)
|
||||
}
|
||||
|
||||
// NOTE: This is not a fully compliant parser per RFC 7235:
|
||||
// Most notably it does not support more than one challenge within a single header
|
||||
// Some of the whitespace parsing also seems noncompliant.
|
||||
|
||||
13
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
13
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
@@ -22,13 +22,14 @@ type Compat struct {
|
||||
// for implementations of private.ImageDestination.
|
||||
//
|
||||
// Use it like this:
|
||||
// type yourDestination struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
// dest := &yourDestination{…}
|
||||
// dest.Compat = impl.AddCompat(dest)
|
||||
//
|
||||
// type yourDestination struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// dest := &yourDestination{…}
|
||||
// dest.Compat = impl.AddCompat(dest)
|
||||
func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
|
||||
return Compat{dest}
|
||||
}
|
||||
|
||||
34
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
generated
vendored
34
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
generated
vendored
@@ -3,23 +3,25 @@
|
||||
// Compare with imagedestination/impl, which might require non-trivial implementation work.
|
||||
//
|
||||
// There are two kinds of stubs:
|
||||
// - Pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
|
||||
// implementation:
|
||||
//
|
||||
// type yourDestination struct {
|
||||
// stubs.ImplementsPutBlobPartial
|
||||
// …
|
||||
// }
|
||||
// - Stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
// type yourDestination struct {
|
||||
// stubs.NoPutBlobPartialInitialize
|
||||
// …
|
||||
// }
|
||||
// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
|
||||
// implementation:
|
||||
//
|
||||
// dest := &yourDestination{
|
||||
// …
|
||||
// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
|
||||
// }
|
||||
// type yourDestination struct {
|
||||
// stubs.ImplementsPutBlobPartial
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
//
|
||||
// type yourDestination struct {
|
||||
// stubs.NoPutBlobPartialInitialize
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// dest := &yourDestination{
|
||||
// …
|
||||
// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
|
||||
// }
|
||||
package stubs
|
||||
|
||||
13
vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
generated
vendored
13
vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
generated
vendored
@@ -19,13 +19,14 @@ type Compat struct {
|
||||
// for implementations of private.ImageSource.
|
||||
//
|
||||
// Use it like this:
|
||||
// type yourSource struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
// src := &yourSource{…}
|
||||
// src.Compat = impl.AddCompat(src)
|
||||
//
|
||||
// type yourSource struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// src := &yourSource{…}
|
||||
// src.Compat = impl.AddCompat(src)
|
||||
func AddCompat(src private.ImageSourceInternalOnly) Compat {
|
||||
return Compat{src}
|
||||
}
|
||||
|
||||
35
vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
generated
vendored
35
vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
generated
vendored
@@ -3,23 +3,26 @@
|
||||
// Compare with imagesource/impl, which might require non-trivial implementation work.
|
||||
//
|
||||
// There are two kinds of stubs:
|
||||
// - Pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
|
||||
// implementation:
|
||||
//
|
||||
// type yourSource struct {
|
||||
// stubs.ImplementsGetBlobAt
|
||||
// …
|
||||
// }
|
||||
// - Stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
// type yourSource struct {
|
||||
// stubs.NoGetBlobAtInitialize
|
||||
// …
|
||||
// }
|
||||
// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
|
||||
//
|
||||
// dest := &yourSource{
|
||||
// …
|
||||
// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
|
||||
// }
|
||||
// implementation:
|
||||
//
|
||||
// type yourSource struct {
|
||||
// stubs.ImplementsGetBlobAt
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
|
||||
// type yourSource struct {
|
||||
// stubs.NoGetBlobAtInitialize
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// dest := &yourSource{
|
||||
// …
|
||||
// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
|
||||
// }
|
||||
package stubs
|
||||
|
||||
13
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
13
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
@@ -228,3 +228,16 @@ func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet,
|
||||
variants := findCompressionMIMETypeSet(variantTable, mimeType)
|
||||
return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
|
||||
}
|
||||
|
||||
// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer {
|
||||
layers := make([]types.ImageInspectLayer, len(infos))
|
||||
for i, info := range infos {
|
||||
layers[i].MIMEType = info.MediaType
|
||||
layers[i].Digest = info.Digest
|
||||
layers[i].Size = info.Size
|
||||
layers[i].Annotations = info.Annotations
|
||||
}
|
||||
return layers
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
5
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
@@ -221,13 +221,16 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI
|
||||
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerInfos := m.LayerInfos()
|
||||
i := &types.ImageInspectInfo{
|
||||
Tag: m.Tag,
|
||||
Created: &s1.Created,
|
||||
DockerVersion: s1.DockerVersion,
|
||||
Architecture: s1.Architecture,
|
||||
Os: s1.OS,
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(layerInfos),
|
||||
LayersData: imgInspectLayersFromLayerInfos(layerInfos),
|
||||
Author: s1.Author,
|
||||
}
|
||||
if s1.Config != nil {
|
||||
i.Labels = s1.Config.Labels
|
||||
|
||||
5
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
5
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
@@ -271,6 +271,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
|
||||
if err := json.Unmarshal(config, s2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerInfos := m.LayerInfos()
|
||||
i := &types.ImageInspectInfo{
|
||||
Tag: "",
|
||||
Created: &s2.Created,
|
||||
@@ -278,7 +279,9 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
|
||||
Architecture: s2.Architecture,
|
||||
Variant: s2.Variant,
|
||||
Os: s2.OS,
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(layerInfos),
|
||||
LayersData: imgInspectLayersFromLayerInfos(layerInfos),
|
||||
Author: s2.Author,
|
||||
}
|
||||
if s2.Config != nil {
|
||||
i.Labels = s2.Config.Labels
|
||||
|
||||
5
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
5
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
@@ -212,6 +212,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
||||
if err := json.Unmarshal(config, d1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
layerInfos := m.LayerInfos()
|
||||
i := &types.ImageInspectInfo{
|
||||
Tag: "",
|
||||
Created: v1.Created,
|
||||
@@ -219,8 +220,10 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
||||
Labels: v1.Config.Labels,
|
||||
Architecture: v1.Architecture,
|
||||
Os: v1.OS,
|
||||
Layers: layerInfosToStrings(m.LayerInfos()),
|
||||
Layers: layerInfosToStrings(layerInfos),
|
||||
LayersData: imgInspectLayersFromLayerInfos(layerInfos),
|
||||
Env: v1.Config.Env,
|
||||
Author: v1.Author,
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
8
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
8
vendor/github.com/containers/image/v5/openshift/openshift-copies.go
generated
vendored
@@ -332,7 +332,7 @@ var (
|
||||
errEmptyCluster = errors.New("cluster has no server defined")
|
||||
)
|
||||
|
||||
//helper for checking certificate/key/CA
|
||||
// helper for checking certificate/key/CA
|
||||
func validateFileIsReadable(name string) error {
|
||||
answer, err := os.Open(name)
|
||||
defer func() {
|
||||
@@ -545,8 +545,10 @@ type clientConfigLoadingRules struct {
|
||||
// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load
|
||||
// Load starts by running the MigrationRules and then
|
||||
// takes the loading rules and returns a Config object based on following rules.
|
||||
// if the ExplicitPath, return the unmerged explicit file
|
||||
// Otherwise, return a merged config based on the Precedence slice
|
||||
//
|
||||
// - if the ExplicitPath, return the unmerged explicit file
|
||||
// - Otherwise, return a merged config based on the Precedence slice
|
||||
//
|
||||
// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
|
||||
// Read errors or files with non-deserializable content produce errors.
|
||||
// The first file to set a particular map key wins and map key's value is never changed.
|
||||
|
||||
15
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
15
vendor/github.com/containers/image/v5/pkg/docker/config/config.go
generated
vendored
@@ -683,7 +683,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
|
||||
// keys we prefer exact matches as well.
|
||||
for _, key := range keys {
|
||||
if val, exists := auths.AuthConfigs[key]; exists {
|
||||
return decodeDockerAuth(val)
|
||||
return decodeDockerAuth(path, key, val)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -698,7 +698,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
|
||||
registry = normalizeRegistry(registry)
|
||||
for k, v := range auths.AuthConfigs {
|
||||
if normalizeAuthFileKey(k, legacyFormat) == registry {
|
||||
return decodeDockerAuth(v)
|
||||
return decodeDockerAuth(path, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -729,9 +729,9 @@ func authKeysForKey(key string) (res []string) {
|
||||
return res
|
||||
}
|
||||
|
||||
// decodeDockerAuth decodes the username and password, which is
|
||||
// encoded in base64.
|
||||
func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
||||
// decodeDockerAuth decodes the username and password from conf,
|
||||
// which is entry key in path.
|
||||
func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
||||
decoded, err := base64.StdEncoding.DecodeString(conf.Auth)
|
||||
if err != nil {
|
||||
return types.DockerAuthConfig{}, err
|
||||
@@ -740,6 +740,11 @@ func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
|
||||
parts := strings.SplitN(string(decoded), ":", 2)
|
||||
if len(parts) != 2 {
|
||||
// if it's invalid just skip, as docker does
|
||||
if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those
|
||||
logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log.
|
||||
} else {
|
||||
logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path)
|
||||
}
|
||||
return types.DockerAuthConfig{}, nil
|
||||
}
|
||||
|
||||
|
||||
8
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
8
vendor/github.com/containers/image/v5/signature/policy_eval.go
generated
vendored
@@ -172,10 +172,10 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
|
||||
// but it does not necessarily mean that the contents of the signature are
|
||||
// consistent with local policy.
|
||||
// For example:
|
||||
// - Do not use a an existence of an accepted signature to determine whether to run
|
||||
// a container based on this image; use IsRunningImageAllowed instead.
|
||||
// - Just because a signature is accepted does not automatically mean the contents of the
|
||||
// signature are authorized to run code as root, or to affect system or cluster configuration.
|
||||
// - Do not use a an existence of an accepted signature to determine whether to run
|
||||
// a container based on this image; use IsRunningImageAllowed instead.
|
||||
// - Just because a signature is accepted does not automatically mean the contents of the
|
||||
// signature are authorized to run code as root, or to affect system or cluster configuration.
|
||||
func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) {
|
||||
if err := pc.changeState(pcReady, pcInUse); err != nil {
|
||||
return nil, err
|
||||
|
||||
1
vendor/github.com/containers/image/v5/tarball/doc.go
generated
vendored
1
vendor/github.com/containers/image/v5/tarball/doc.go
generated
vendored
@@ -2,6 +2,7 @@
|
||||
// tarballs and an optional template configuration.
|
||||
//
|
||||
// An example:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
|
||||
39
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
39
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
@@ -177,24 +177,25 @@ type BICReplacementCandidate struct {
|
||||
// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies.
|
||||
//
|
||||
// It records two kinds of data:
|
||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
|
||||
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
|
||||
//
|
||||
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
|
||||
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
|
||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
|
||||
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
|
||||
//
|
||||
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
|
||||
// compress/decompress blobs for their own purposes.
|
||||
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
|
||||
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
|
||||
//
|
||||
// - Known blob locations, managed by individual transports:
|
||||
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
|
||||
// recording transport-specific information that allows the transport to reuse the blob in the future;
|
||||
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
|
||||
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
|
||||
// compress/decompress blobs for their own purposes.
|
||||
//
|
||||
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
|
||||
// can be directly reused within a registry, or mounted across registries within a registry server.)
|
||||
// - Known blob locations, managed by individual transports:
|
||||
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
|
||||
// recording transport-specific information that allows the transport to reuse the blob in the future;
|
||||
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
|
||||
//
|
||||
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
|
||||
// can be directly reused within a registry, or mounted across registries within a registry server.)
|
||||
//
|
||||
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
|
||||
// users of the cache should just fall back to copying the blobs the usual way.
|
||||
@@ -465,7 +466,17 @@ type ImageInspectInfo struct {
|
||||
Variant string
|
||||
Os string
|
||||
Layers []string
|
||||
LayersData []ImageInspectLayer
|
||||
Env []string
|
||||
Author string
|
||||
}
|
||||
|
||||
// ImageInspectLayer is a set of metadata describing an image layers' detail
|
||||
type ImageInspectLayer struct {
|
||||
MIMEType string // "" if unknown.
|
||||
Digest digest.Digest
|
||||
Size int64 // -1 if unknown.
|
||||
Annotations map[string]string
|
||||
}
|
||||
|
||||
// DockerAuthConfig contains authorization information for connecting to a registry.
|
||||
|
||||
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@@ -6,7 +6,7 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 22
|
||||
VersionMinor = 23
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
2
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@@ -132,7 +132,7 @@ lint_task:
|
||||
meta_task:
|
||||
|
||||
container:
|
||||
image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}"
|
||||
image: "quay.io/libpod/imgts:latest"
|
||||
cpu: 1
|
||||
memory: 1
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@@ -1 +1 @@
|
||||
1.42.0
|
||||
1.43.0
|
||||
|
||||
19
vendor/github.com/containers/storage/containers.go
generated
vendored
19
vendor/github.com/containers/storage/containers.go
generated
vendored
@@ -3,7 +3,6 @@ package storage
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
@@ -144,26 +143,26 @@ func copyContainer(c *Container) *Container {
|
||||
}
|
||||
|
||||
func (c *Container) MountLabel() string {
|
||||
if label, ok := c.Flags["MountLabel"].(string); ok {
|
||||
if label, ok := c.Flags[mountLabelFlag].(string); ok {
|
||||
return label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Container) ProcessLabel() string {
|
||||
if label, ok := c.Flags["ProcessLabel"].(string); ok {
|
||||
if label, ok := c.Flags[processLabelFlag].(string); ok {
|
||||
return label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Container) MountOpts() []string {
|
||||
switch c.Flags["MountOpts"].(type) {
|
||||
switch c.Flags[mountOptsFlag].(type) {
|
||||
case []string:
|
||||
return c.Flags["MountOpts"].([]string)
|
||||
return c.Flags[mountOptsFlag].([]string)
|
||||
case []interface{}:
|
||||
var mountOpts []string
|
||||
for _, v := range c.Flags["MountOpts"].([]interface{}) {
|
||||
for _, v := range c.Flags[mountOptsFlag].([]interface{}) {
|
||||
if flag, ok := v.(string); ok {
|
||||
mountOpts = append(mountOpts, flag)
|
||||
}
|
||||
@@ -197,7 +196,7 @@ func (r *containerStore) datapath(id, key string) string {
|
||||
func (r *containerStore) Load() error {
|
||||
needSave := false
|
||||
rpath := r.containerspath()
|
||||
data, err := ioutil.ReadFile(rpath)
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
@@ -321,10 +320,10 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
||||
return nil, ErrDuplicateID
|
||||
}
|
||||
if options.MountOpts != nil {
|
||||
options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
|
||||
options.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
|
||||
}
|
||||
if options.Volatile {
|
||||
options.Flags["Volatile"] = true
|
||||
options.Flags[volatileFlag] = true
|
||||
}
|
||||
names = dedupeNames(names)
|
||||
for _, name := range names {
|
||||
@@ -484,7 +483,7 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
|
||||
if !ok {
|
||||
return nil, ErrContainerUnknown
|
||||
}
|
||||
return ioutil.ReadFile(r.datapath(c.ID, key))
|
||||
return os.ReadFile(r.datapath(c.ID, key))
|
||||
}
|
||||
|
||||
func (r *containerStore) BigDataSize(id, key string) (int64, error) {
|
||||
|
||||
11
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
11
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@@ -29,7 +29,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -47,7 +46,7 @@ import (
|
||||
mountpk "github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbatts/tar-split/tar/storage"
|
||||
@@ -170,7 +169,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||
|
||||
for _, path := range []string{"mnt", "diff"} {
|
||||
p := filepath.Join(home, path)
|
||||
entries, err := ioutil.ReadDir(p)
|
||||
entries, err := os.ReadDir(p)
|
||||
if err != nil {
|
||||
logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
|
||||
continue
|
||||
@@ -200,7 +199,7 @@ func supportsAufs() error {
|
||||
// proc/filesystems for when aufs is supported
|
||||
exec.Command("modprobe", "aufs").Run()
|
||||
|
||||
if userns.RunningInUserNS() {
|
||||
if unshare.IsRootless() {
|
||||
return ErrAufsNested
|
||||
}
|
||||
|
||||
@@ -730,14 +729,14 @@ func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.M
|
||||
// version of aufs.
|
||||
func useDirperm() bool {
|
||||
enableDirpermLock.Do(func() {
|
||||
base, err := ioutil.TempDir("", "storage-aufs-base")
|
||||
base, err := os.MkdirTemp("", "storage-aufs-base")
|
||||
if err != nil {
|
||||
logrus.Errorf("Checking dirperm1: %v", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(base)
|
||||
|
||||
union, err := ioutil.TempDir("", "storage-aufs-union")
|
||||
union, err := os.MkdirTemp("", "storage-aufs-union")
|
||||
if err != nil {
|
||||
logrus.Errorf("Checking dirperm1: %v", err)
|
||||
return
|
||||
|
||||
4
vendor/github.com/containers/storage/drivers/aufs/dirs.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/aufs/dirs.go
generated
vendored
@@ -1,17 +1,17 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package aufs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Return all the directories
|
||||
func loadIds(root string) ([]string, error) {
|
||||
dirs, err := ioutil.ReadDir(root)
|
||||
dirs, err := os.ReadDir(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
12
vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
generated
vendored
12
vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go
generated
vendored
@@ -1,12 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package aufs
|
||||
|
||||
import "errors"
|
||||
|
||||
// MsRemount declared to specify a non-linux system mount.
|
||||
const MsRemount = 0
|
||||
|
||||
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
|
||||
return errors.New("mount is not implemented on this platform")
|
||||
}
|
||||
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@@ -18,7 +18,6 @@ import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
@@ -524,7 +523,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
|
||||
if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -643,7 +642,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
return "", fmt.Errorf("%s: not a directory", dir)
|
||||
}
|
||||
|
||||
if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
|
||||
if quota, err := os.ReadFile(d.quotasDirID(id)); err == nil {
|
||||
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
|
||||
if err := d.enableQuota(); err != nil {
|
||||
return "", err
|
||||
|
||||
3
vendor/github.com/containers/storage/drivers/btrfs/version_none.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/btrfs/version_none.go
generated
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !linux btrfs_noversion !cgo
|
||||
//go:build linux && btrfs_noversion && cgo
|
||||
// +build linux,btrfs_noversion,cgo
|
||||
|
||||
package btrfs
|
||||
|
||||
|
||||
3
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
@@ -2,6 +2,7 @@ package graphdriver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -93,7 +94,7 @@ func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error
|
||||
return err
|
||||
}
|
||||
if len(output) > 0 {
|
||||
return fmt.Errorf(string(output))
|
||||
return errors.New(string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
3
vendor/github.com/containers/storage/drivers/copy/copy_linux.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/copy/copy_linux.go
generated
vendored
@@ -27,7 +27,6 @@ import (
|
||||
"github.com/containers/storage/pkg/pools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -207,7 +206,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
|
||||
s.Close()
|
||||
|
||||
case mode&os.ModeDevice != 0:
|
||||
if userns.RunningInUserNS() {
|
||||
if unshare.IsRootless() {
|
||||
// cannot create a device if running in user namespace
|
||||
return nil
|
||||
}
|
||||
|
||||
14
vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
generated
vendored
14
vendor/github.com/containers/storage/drivers/devmapper/device_setup.go
generated
vendored
@@ -8,7 +8,6 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -154,7 +153,7 @@ func readLVMConfig(root string) (directLVMConfig, error) {
|
||||
var cfg directLVMConfig
|
||||
|
||||
p := filepath.Join(root, "setup-config.json")
|
||||
b, err := ioutil.ReadFile(p)
|
||||
b, err := os.ReadFile(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return cfg, nil
|
||||
@@ -166,9 +165,10 @@ func readLVMConfig(root string) (directLVMConfig, error) {
|
||||
if len(b) == 0 {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
err = json.Unmarshal(b, &cfg)
|
||||
return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err)
|
||||
if err := json.Unmarshal(b, &cfg); err != nil {
|
||||
return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func writeLVMConfig(root string, cfg directLVMConfig) error {
|
||||
@@ -177,7 +177,7 @@ func writeLVMConfig(root string, cfg directLVMConfig) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshalling direct lvm config: %w", err)
|
||||
}
|
||||
if err := ioutil.WriteFile(p, b, 0600); err != nil {
|
||||
if err := os.WriteFile(p, b, 0600); err != nil {
|
||||
return fmt.Errorf("writing direct lvm config to file: %w", err)
|
||||
}
|
||||
return nil
|
||||
@@ -241,7 +241,7 @@ func setupDirectLVM(cfg directLVMConfig) error {
|
||||
}
|
||||
|
||||
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
|
||||
err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
|
||||
err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("writing storage thinp autoextend profile: %w", err)
|
||||
}
|
||||
|
||||
21
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
21
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -331,7 +330,7 @@ func (devices *DeviceSet) removeMetadata(info *devInfo) error {
|
||||
|
||||
// Given json data and file path, write it to disk
|
||||
func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error {
|
||||
tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp")
|
||||
tmpFile, err := os.CreateTemp(devices.metadataDir(), ".tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("devmapper: Error creating metadata file: %s", err)
|
||||
}
|
||||
@@ -630,7 +629,7 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
|
||||
|
||||
func (devices *DeviceSet) migrateOldMetaData() error {
|
||||
// Migrate old metadata file
|
||||
jsonData, err := ioutil.ReadFile(devices.oldMetadataFile())
|
||||
jsonData, err := os.ReadFile(devices.oldMetadataFile())
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
@@ -955,7 +954,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
|
||||
func (devices *DeviceSet) loadMetadata(hash string) *devInfo {
|
||||
info := &devInfo{Hash: hash, devices: devices}
|
||||
|
||||
jsonData, err := ioutil.ReadFile(devices.metadataFile(info))
|
||||
jsonData, err := os.ReadFile(devices.metadataFile(info))
|
||||
if err != nil {
|
||||
logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err)
|
||||
return nil
|
||||
@@ -1276,11 +1275,11 @@ func (devices *DeviceSet) setupBaseImage() error {
|
||||
}
|
||||
|
||||
func setCloseOnExec(name string) {
|
||||
fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
|
||||
for _, i := range fileInfos {
|
||||
link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
|
||||
fileEntries, _ := os.ReadDir("/proc/self/fd")
|
||||
for _, e := range fileEntries {
|
||||
link, _ := os.Readlink(filepath.Join("/proc/self/fd", e.Name()))
|
||||
if link == name {
|
||||
fd, err := strconv.Atoi(i.Name())
|
||||
fd, err := strconv.Atoi(e.Name())
|
||||
if err == nil {
|
||||
unix.CloseOnExec(fd)
|
||||
}
|
||||
@@ -1370,7 +1369,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
|
||||
}
|
||||
|
||||
func (devices *DeviceSet) loadTransactionMetaData() error {
|
||||
jsonData, err := ioutil.ReadFile(devices.transactionMetaFile())
|
||||
jsonData, err := os.ReadFile(devices.transactionMetaFile())
|
||||
if err != nil {
|
||||
// There is no active transaction. This will be the case
|
||||
// during upgrade.
|
||||
@@ -1451,7 +1450,7 @@ func (devices *DeviceSet) processPendingTransaction() error {
|
||||
}
|
||||
|
||||
func (devices *DeviceSet) loadDeviceSetMetaData() error {
|
||||
jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile())
|
||||
jsonData, err := os.ReadFile(devices.deviceSetMetaFile())
|
||||
if err != nil {
|
||||
// For backward compatibility return success if file does
|
||||
// not exist.
|
||||
@@ -2258,7 +2257,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
|
||||
}
|
||||
|
||||
func (devices *DeviceSet) unmountAndDeactivateAll(dir string) {
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
logrus.Warnf("devmapper: unmountAndDeactivate: %s", err)
|
||||
return
|
||||
|
||||
4
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
4
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@@ -1,10 +1,10 @@
|
||||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devmapper
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
@@ -227,7 +227,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
|
||||
// Create an "id" file with the container/image id in it to help reconstruct this in case
|
||||
// of later problems
|
||||
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
|
||||
if err := os.WriteFile(idFile, []byte(id), 0600); err != nil {
|
||||
d.ctr.Decrement(mp)
|
||||
d.DeviceSet.UnmountDevice(id, mp)
|
||||
return "", err
|
||||
|
||||
3
vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/devmapper/jsoniter.go
generated
vendored
@@ -1,3 +1,6 @@
|
||||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package devmapper
|
||||
|
||||
import jsoniter "github.com/json-iterator/go"
|
||||
|
||||
2
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@@ -48,7 +48,7 @@ type CreateOpts struct {
|
||||
ignoreChownErrors bool
|
||||
}
|
||||
|
||||
// MountOpts contains optional arguments for LayerStope.Mount() methods.
|
||||
// MountOpts contains optional arguments for Driver.Get() methods.
|
||||
type MountOpts struct {
|
||||
// Mount label is the MAC Labels to assign to mount point (SELINUX)
|
||||
MountLabel string
|
||||
|
||||
8
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
8
vendor/github.com/containers/storage/drivers/driver_linux.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -127,9 +128,14 @@ var (
|
||||
// GetFSMagic returns the filesystem id given the path.
|
||||
func GetFSMagic(rootpath string) (FsMagic, error) {
|
||||
var buf unix.Statfs_t
|
||||
if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
|
||||
path := filepath.Dir(rootpath)
|
||||
if err := unix.Statfs(path, &buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if _, ok := FsNames[FsMagic(buf.Type)]; !ok {
|
||||
logrus.Debugf("Unknown filesystem type %#x reported for %s", buf.Type, path)
|
||||
}
|
||||
return FsMagic(buf.Type), nil
|
||||
}
|
||||
|
||||
|
||||
15
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
15
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -33,10 +33,11 @@ type NaiveDiffDriver struct {
|
||||
// NewNaiveDiffDriver returns a fully functional driver that wraps the
|
||||
// given ProtoDriver and adds the capability of the following methods which
|
||||
// it may or may not support on its own:
|
||||
// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
|
||||
// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
|
||||
// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
|
||||
// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
|
||||
//
|
||||
// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
|
||||
// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
|
||||
// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
|
||||
// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
|
||||
func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver {
|
||||
return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater}
|
||||
}
|
||||
@@ -109,7 +110,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
|
||||
// are extracted from tar's with full second precision on modified time.
|
||||
// We need this hack here to make sure calls within same second receive
|
||||
// correct result.
|
||||
time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
|
||||
time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
|
||||
return err
|
||||
}), nil
|
||||
}
|
||||
@@ -179,7 +180,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
|
||||
}
|
||||
|
||||
tarOptions := &archive.TarOptions{
|
||||
InUserNS: userns.RunningInUserNS(),
|
||||
InUserNS: unshare.IsRootless(),
|
||||
IgnoreChownErrors: options.IgnoreChownErrors,
|
||||
ForceMask: forceMask,
|
||||
}
|
||||
|
||||
13
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
13
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
@@ -6,7 +6,6 @@ package overlay
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -27,7 +26,7 @@ import (
|
||||
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
|
||||
// When these exist naive diff should be used.
|
||||
func doesSupportNativeDiff(d, mountOpts string) error {
|
||||
td, err := ioutil.TempDir(d, "opaque-bug-check")
|
||||
td, err := os.MkdirTemp(d, "opaque-bug-check")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -82,7 +81,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
|
||||
}()
|
||||
|
||||
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
|
||||
if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
|
||||
if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write to merged directory: %w", err)
|
||||
}
|
||||
|
||||
@@ -121,7 +120,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
|
||||
// copying up a file from a lower layer unless/until its contents are being
|
||||
// modified
|
||||
func doesMetacopy(d, mountOpts string) (bool, error) {
|
||||
td, err := ioutil.TempDir(d, "metacopy-check")
|
||||
td, err := os.MkdirTemp(d, "metacopy-check")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -158,7 +157,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
|
||||
}
|
||||
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
|
||||
if errors.Is(err, unix.EINVAL) {
|
||||
logrus.Info("metacopy option not supported on this kernel", mountOpts)
|
||||
logrus.Infof("overlay: metacopy option not supported on this kernel, checked using options %q", mountOpts)
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err)
|
||||
@@ -186,7 +185,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
|
||||
|
||||
// doesVolatile checks if the filesystem supports the "volatile" mount option
|
||||
func doesVolatile(d string) (bool, error) {
|
||||
td, err := ioutil.TempDir(d, "volatile-check")
|
||||
td, err := os.MkdirTemp(d, "volatile-check")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -224,7 +223,7 @@ func doesVolatile(d string) (bool, error) {
|
||||
// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of
|
||||
// a idmapped lower layer.
|
||||
func supportsIdmappedLowerLayers(home string) (bool, error) {
|
||||
layerDir, err := ioutil.TempDir(home, "compat")
|
||||
layerDir, err := os.MkdirTemp(home, "compat")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
7
vendor/github.com/containers/storage/drivers/overlay/check_116.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/overlay/check_116.go
generated
vendored
@@ -1,14 +1,17 @@
|
||||
// +build go1.16
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func scanForMountProgramIndicators(home string) (detected bool, err error) {
|
||||
@@ -26,7 +29,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) {
|
||||
}
|
||||
if d.IsDir() {
|
||||
xattrs, err := system.Llistxattr(path)
|
||||
if err != nil {
|
||||
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
|
||||
return err
|
||||
}
|
||||
for _, xattr := range xattrs {
|
||||
|
||||
20
vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
generated
vendored
20
vendor/github.com/containers/storage/drivers/overlay/idmapped_utils.go
generated
vendored
@@ -5,7 +5,6 @@ package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
@@ -21,17 +20,6 @@ type attr struct {
|
||||
userNs uint64
|
||||
}
|
||||
|
||||
const (
|
||||
// _MOUNT_ATTR_IDMAP - Idmap mount to @userns_fd in struct mount_attr
|
||||
_MOUNT_ATTR_IDMAP = 0x00100000 //nolint:golint
|
||||
|
||||
// _OPEN_TREE_CLONE - Clone the source path mount
|
||||
_OPEN_TREE_CLONE = 0x00000001 //nolint:golint
|
||||
|
||||
// _MOVE_MOUNT_F_EMPTY_PATH - Move the path referenced by the fd
|
||||
_MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 //nolint:golint
|
||||
)
|
||||
|
||||
// openTree is a wrapper for the open_tree syscall
|
||||
func openTree(path string, flags int) (fd int, err error) {
|
||||
var _p0 *byte
|
||||
@@ -61,7 +49,7 @@ func moveMount(fdTree int, target string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
flags := _MOVE_MOUNT_F_EMPTY_PATH
|
||||
flags := unix.MOVE_MOUNT_F_EMPTY_PATH
|
||||
|
||||
_, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT),
|
||||
uintptr(fdTree), uintptr(unsafe.Pointer(_p1)),
|
||||
@@ -98,14 +86,14 @@ func createIDMappedMount(source, target string, pid int) error {
|
||||
}
|
||||
|
||||
var attr attr
|
||||
attr.attrSet = _MOUNT_ATTR_IDMAP
|
||||
attr.attrSet = unix.MOUNT_ATTR_IDMAP
|
||||
attr.attrClr = 0
|
||||
attr.propagation = 0
|
||||
attr.userNs = uint64(userNsFile.Fd())
|
||||
|
||||
defer userNsFile.Close()
|
||||
|
||||
targetDirFd, err := openTree(source, _OPEN_TREE_CLONE|unix.AT_RECURSIVE)
|
||||
targetDirFd, err := openTree(source, unix.OPEN_TREE_CLONE)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -144,7 +132,7 @@ func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int,
|
||||
for _, m := range idmap {
|
||||
mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size)
|
||||
}
|
||||
return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
|
||||
return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
|
||||
}
|
||||
if err := writeMappings("uid_map", uidMaps); err != nil {
|
||||
cleanupFunc()
|
||||
|
||||
3
vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/overlay/jsoniter.go
generated
vendored
@@ -1,3 +1,6 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package overlay
|
||||
|
||||
import jsoniter "github.com/json-iterator/go"
|
||||
|
||||
79
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
79
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -34,7 +33,6 @@ import (
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
"github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -347,7 +345,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||
logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
|
||||
if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
@@ -580,11 +578,11 @@ func cachedFeatureSet(feature string, set bool) string {
|
||||
}
|
||||
|
||||
func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) {
|
||||
content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
|
||||
content, err := os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
|
||||
if err == nil {
|
||||
return true, string(content), nil
|
||||
}
|
||||
content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
|
||||
content, err = os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
|
||||
if err == nil {
|
||||
return false, string(content), nil
|
||||
}
|
||||
@@ -608,7 +606,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
|
||||
}
|
||||
|
||||
var contents string
|
||||
flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home))
|
||||
flagContent, err := os.ReadFile(getMountProgramFlagFile(home))
|
||||
if err == nil {
|
||||
contents = strings.TrimSpace(string(flagContent))
|
||||
}
|
||||
@@ -621,7 +619,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return false, err
|
||||
}
|
||||
if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
|
||||
if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
|
||||
return false, err
|
||||
}
|
||||
if needsMountProgram {
|
||||
@@ -657,7 +655,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
||||
logLevel = logrus.DebugLevel
|
||||
}
|
||||
|
||||
layerDir, err := ioutil.TempDir(home, "compat")
|
||||
layerDir, err := os.MkdirTemp(home, "compat")
|
||||
if err != nil {
|
||||
patherr, ok := err.(*os.PathError)
|
||||
if ok && patherr.Err == syscall.ENOSPC {
|
||||
@@ -716,7 +714,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
||||
logrus.Debugf("overlay: test mount with multiple lowers succeeded")
|
||||
return supportsDType, nil
|
||||
}
|
||||
logrus.Debugf("overlay: test mount with multiple lowers failed %v", err)
|
||||
logrus.Debugf("overlay: test mount with multiple lowers failed: %v", err)
|
||||
}
|
||||
flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir)
|
||||
if selinux.GetEnabled() {
|
||||
@@ -728,7 +726,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
||||
logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower")
|
||||
return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported)
|
||||
}
|
||||
logrus.Debugf("overlay: test mount with a single lower failed %v", err)
|
||||
logrus.Debugf("overlay: test mount with a single lower failed: %v", err)
|
||||
}
|
||||
logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home)
|
||||
return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS)
|
||||
@@ -1009,7 +1007,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
}
|
||||
|
||||
// Write link id to link file
|
||||
if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
|
||||
if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1030,7 +1028,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
return err
|
||||
}
|
||||
if lower != "" {
|
||||
if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
|
||||
if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -1073,7 +1071,7 @@ func (d *Driver) getLower(parent string) (string, error) {
|
||||
}
|
||||
|
||||
// Read Parent link fileA
|
||||
parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link"))
|
||||
parentLink, err := os.ReadFile(path.Join(parentDir, "link"))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
@@ -1082,14 +1080,14 @@ func (d *Driver) getLower(parent string) (string, error) {
|
||||
if err := d.recreateSymlinks(); err != nil {
|
||||
return "", fmt.Errorf("recreating the links: %w", err)
|
||||
}
|
||||
parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link"))
|
||||
parentLink, err = os.ReadFile(path.Join(parentDir, "link"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
lowers := []string{path.Join(linkDir, string(parentLink))}
|
||||
|
||||
parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile))
|
||||
parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile))
|
||||
if err == nil {
|
||||
parentLowers := strings.Split(string(parentLower), ":")
|
||||
lowers = append(lowers, parentLowers...)
|
||||
@@ -1118,7 +1116,7 @@ func (d *Driver) dir2(id string) (string, bool) {
|
||||
|
||||
func (d *Driver) getLowerDirs(id string) ([]string, error) {
|
||||
var lowersArray []string
|
||||
lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
|
||||
lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile))
|
||||
if err == nil {
|
||||
for _, s := range strings.Split(string(lowers), ":") {
|
||||
lower := d.dir(s)
|
||||
@@ -1187,7 +1185,7 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa
|
||||
// Remove cleans the directories that are created for this id.
|
||||
func (d *Driver) Remove(id string) error {
|
||||
dir := d.dir(id)
|
||||
lid, err := ioutil.ReadFile(path.Join(dir, "link"))
|
||||
lid, err := os.ReadFile(path.Join(dir, "link"))
|
||||
if err == nil {
|
||||
if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
|
||||
logrus.Debugf("Failed to remove link: %v", err)
|
||||
@@ -1210,7 +1208,7 @@ func (d *Driver) recreateSymlinks() error {
|
||||
const maxIterations = 10
|
||||
|
||||
// List all the directories under the home directory
|
||||
dirs, err := ioutil.ReadDir(d.home)
|
||||
dirs, err := os.ReadDir(d.home)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading driver home directory %q: %w", d.home, err)
|
||||
}
|
||||
@@ -1229,11 +1227,11 @@ func (d *Driver) recreateSymlinks() error {
|
||||
// the layer's "link" file that points to the layer's "diff" directory.
|
||||
for _, dir := range dirs {
|
||||
// Skip over the linkDir and anything that is not a directory
|
||||
if dir.Name() == linkDir || !dir.Mode().IsDir() {
|
||||
if dir.Name() == linkDir || !dir.IsDir() {
|
||||
continue
|
||||
}
|
||||
// Read the "link" file under each layer to get the name of the symlink
|
||||
data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link"))
|
||||
data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link"))
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
|
||||
continue
|
||||
@@ -1258,7 +1256,7 @@ func (d *Driver) recreateSymlinks() error {
|
||||
linkDirFullPath := filepath.Join(d.home, "l")
|
||||
// Now check if we somehow lost a "link" file, by making sure
|
||||
// that each symlink we have corresponds to one.
|
||||
links, err := ioutil.ReadDir(linkDirFullPath)
|
||||
links, err := os.ReadDir(linkDirFullPath)
|
||||
if err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
continue
|
||||
@@ -1288,11 +1286,11 @@ func (d *Driver) recreateSymlinks() error {
|
||||
// it has the basename of our symlink in it.
|
||||
targetID := targetComponents[1]
|
||||
linkFile := filepath.Join(d.dir(targetID), "link")
|
||||
data, err := ioutil.ReadFile(linkFile)
|
||||
data, err := os.ReadFile(linkFile)
|
||||
if err != nil || string(data) != link.Name() {
|
||||
// NOTE: If two or more links point to the same target, we will update linkFile
|
||||
// with every value of link.Name(), and set madeProgress = true every time.
|
||||
if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
|
||||
if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
|
||||
continue
|
||||
}
|
||||
@@ -1312,7 +1310,7 @@ func (d *Driver) recreateSymlinks() error {
|
||||
}
|
||||
|
||||
// Get creates and mounts the required file system for the given id and returns the mount path.
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
return d.get(id, false, options)
|
||||
}
|
||||
|
||||
@@ -1346,7 +1344,16 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
}
|
||||
if !d.usingMetacopy {
|
||||
if hasMetacopyOption(optsList) {
|
||||
logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel")
|
||||
if d.options.mountProgram == "" {
|
||||
release := ""
|
||||
var uts unix.Utsname
|
||||
if err := unix.Uname(&uts); err == nil {
|
||||
release = " " + string(uts.Release[:]) + " " + string(uts.Version[:])
|
||||
}
|
||||
logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release)
|
||||
} else {
|
||||
logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it")
|
||||
}
|
||||
}
|
||||
optsList = stripOption(optsList, "metacopy=on")
|
||||
}
|
||||
@@ -1358,7 +1365,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
}
|
||||
}
|
||||
|
||||
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
|
||||
lowers, err := os.ReadFile(path.Join(dir, lowerFile))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
@@ -1374,7 +1381,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
|
||||
// Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers
|
||||
// lists that we're building. "diff" itself is the upper, so it won't be in the lists.
|
||||
link, err := ioutil.ReadFile(path.Join(dir, "link"))
|
||||
link, err := os.ReadFile(path.Join(dir, "link"))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return "", err
|
||||
@@ -1383,7 +1390,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
if err := d.recreateSymlinks(); err != nil {
|
||||
return "", fmt.Errorf("recreating the links: %w", err)
|
||||
}
|
||||
link, err = ioutil.ReadFile(path.Join(dir, "link"))
|
||||
link, err = os.ReadFile(path.Join(dir, "link"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -1608,7 +1615,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
diffDir := path.Join(id, "diff")
|
||||
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir)
|
||||
} else {
|
||||
opts = fmt.Sprintf("lowerdir=%s", strings.Join(relLowers, ":"))
|
||||
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(relLowers, ":"))
|
||||
}
|
||||
if len(optsList) > 0 {
|
||||
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
|
||||
@@ -1649,7 +1656,7 @@ func (d *Driver) Put(id string) error {
|
||||
if count := d.ctr.Decrement(mountpoint); count > 0 {
|
||||
return nil
|
||||
}
|
||||
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
|
||||
if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1658,7 +1665,7 @@ func (d *Driver) Put(id string) error {
|
||||
mappedRoot := filepath.Join(d.home, id, "mapped")
|
||||
// It should not happen, but cleanup any mapped mount if it was leaked.
|
||||
if _, err := os.Stat(mappedRoot); err == nil {
|
||||
mounts, err := ioutil.ReadDir(mappedRoot)
|
||||
mounts, err := os.ReadDir(mappedRoot)
|
||||
if err == nil {
|
||||
// Go through all of the mapped mounts.
|
||||
for _, m := range mounts {
|
||||
@@ -1806,7 +1813,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
applyDir, err = ioutil.TempDir(d.getStagingDir(), "")
|
||||
applyDir, err = os.MkdirTemp(d.getStagingDir(), "")
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
@@ -1826,7 +1833,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||
GIDMaps: idMappings.GIDs(),
|
||||
IgnoreChownErrors: d.options.ignoreChownErrors,
|
||||
WhiteoutFormat: d.getWhiteoutFormat(),
|
||||
InUserNS: userns.RunningInUserNS(),
|
||||
InUserNS: unshare.IsRootless(),
|
||||
})
|
||||
out.Target = applyDir
|
||||
return out, err
|
||||
@@ -1884,7 +1891,7 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
|
||||
IgnoreChownErrors: d.options.ignoreChownErrors,
|
||||
ForceMask: d.options.forceMask,
|
||||
WhiteoutFormat: d.getWhiteoutFormat(),
|
||||
InUserNS: userns.RunningInUserNS(),
|
||||
InUserNS: unshare.IsRootless(),
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -2167,7 +2174,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
|
||||
}
|
||||
// tell the additional layer store that we use this layer.
|
||||
// mark this layer as "additional layer"
|
||||
if err := ioutil.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
|
||||
if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
notifyUseAdditionalLayer(al.path)
|
||||
@@ -2175,7 +2182,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
|
||||
}
|
||||
|
||||
func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) {
|
||||
al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer"))
|
||||
al, err := os.ReadFile(path.Join(d.dir(id), "additionallayer"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
@@ -52,7 +52,6 @@ struct fsxattr {
|
||||
import "C"
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
@@ -123,11 +122,9 @@ func generateUniqueProjectID(path string) (uint32, error) {
|
||||
// This is a way to prevent xfs_quota management from conflicting with
|
||||
// containers/storage.
|
||||
|
||||
//
|
||||
// Then try to create a test directory with the next project id and set a quota
|
||||
// on it. If that works, continue to scan existing containers to map allocated
|
||||
// project ids.
|
||||
//
|
||||
func NewControl(basePath string) (*Control, error) {
|
||||
//
|
||||
// Get project id of parent dir as minimal id to be used by driver
|
||||
@@ -336,7 +333,7 @@ func setProjectID(targetPath string, projectID uint32) error {
|
||||
// findNextProjectID - find the next project id to be used for containers
|
||||
// by scanning driver home directory to find used project ids
|
||||
func (q *Control) findNextProjectID(home string) error {
|
||||
files, err := ioutil.ReadDir(home)
|
||||
files, err := os.ReadDir(home)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read directory failed : %s", home)
|
||||
}
|
||||
|
||||
13
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
13
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@@ -10,7 +10,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -24,7 +23,7 @@ import (
|
||||
"github.com/Microsoft/go-winio"
|
||||
"github.com/Microsoft/go-winio/backuptar"
|
||||
"github.com/Microsoft/hcsshim"
|
||||
"github.com/containers/storage/drivers"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
@@ -475,7 +474,7 @@ func (d *Driver) Put(id string) error {
|
||||
// We use this opportunity to cleanup any -removing folders which may be
|
||||
// still left if the daemon was killed while it was removing a layer.
|
||||
func (d *Driver) Cleanup() error {
|
||||
items, err := ioutil.ReadDir(d.info.HomeDir)
|
||||
items, err := os.ReadDir(d.info.HomeDir)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
@@ -870,7 +869,7 @@ func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ..
|
||||
|
||||
// resolveID computes the layerID information based on the given id.
|
||||
func (d *Driver) resolveID(id string) (string, error) {
|
||||
content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID"))
|
||||
content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID"))
|
||||
if os.IsNotExist(err) {
|
||||
return id, nil
|
||||
} else if err != nil {
|
||||
@@ -881,13 +880,13 @@ func (d *Driver) resolveID(id string) (string, error) {
|
||||
|
||||
// setID stores the layerId in disk.
|
||||
func (d *Driver) setID(id, altID string) error {
|
||||
return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
|
||||
return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
|
||||
}
|
||||
|
||||
// getLayerChain returns the layer chain information.
|
||||
func (d *Driver) getLayerChain(id string) ([]string, error) {
|
||||
jPath := filepath.Join(d.dir(id), "layerchain.json")
|
||||
content, err := ioutil.ReadFile(jPath)
|
||||
content, err := os.ReadFile(jPath)
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
@@ -911,7 +910,7 @@ func (d *Driver) setLayerChain(id string, chain []string) error {
|
||||
}
|
||||
|
||||
jPath := filepath.Join(d.dir(id), "layerchain.json")
|
||||
err = ioutil.WriteFile(jPath, content, 0600)
|
||||
err = os.WriteFile(jPath, content, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write layerchain file - %s", err)
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
zfs "github.com/mistifyio/go-zfs/v3"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
9
vendor/github.com/containers/storage/images.go
generated
vendored
9
vendor/github.com/containers/storage/images.go
generated
vendored
@@ -3,7 +3,6 @@ package storage
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -242,8 +241,8 @@ func (i *Image) recomputeDigests() error {
|
||||
if !bigDataNameIsManifest(name) {
|
||||
continue
|
||||
}
|
||||
if digest.Validate() != nil {
|
||||
return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, digest.Validate())
|
||||
if err := digest.Validate(); err != nil {
|
||||
return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, err)
|
||||
}
|
||||
// Deduplicate the digest values.
|
||||
if _, known := digests[digest]; !known {
|
||||
@@ -261,7 +260,7 @@ func (i *Image) recomputeDigests() error {
|
||||
func (r *imageStore) Load() error {
|
||||
shouldSave := false
|
||||
rpath := r.imagespath()
|
||||
data, err := ioutil.ReadFile(rpath)
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
@@ -636,7 +635,7 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||||
}
|
||||
return ioutil.ReadFile(r.datapath(image.ID, key))
|
||||
return os.ReadFile(r.datapath(image.ID, key))
|
||||
}
|
||||
|
||||
func (r *imageStore) BigDataSize(id, key string) (int64, error) {
|
||||
|
||||
25
vendor/github.com/containers/storage/layers.go
generated
vendored
25
vendor/github.com/containers/storage/layers.go
generated
vendored
@@ -5,7 +5,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -344,7 +343,15 @@ func (r *layerStore) layerspath() string {
|
||||
func (r *layerStore) Load() error {
|
||||
shouldSave := false
|
||||
rpath := r.layerspath()
|
||||
data, err := ioutil.ReadFile(rpath)
|
||||
info, err := os.Stat(rpath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
r.layerspathModified = info.ModTime()
|
||||
}
|
||||
data, err := os.ReadFile(rpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
@@ -435,7 +442,7 @@ func (r *layerStore) LoadLocked() error {
|
||||
func (r *layerStore) loadMounts() error {
|
||||
mounts := make(map[string]*Layer)
|
||||
mpath := r.mountspath()
|
||||
data, err := ioutil.ReadFile(mpath)
|
||||
data, err := os.ReadFile(mpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
@@ -556,6 +563,8 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
||||
uidMap: copyIDMap(s.uidMap),
|
||||
gidMap: copyIDMap(s.gidMap),
|
||||
}
|
||||
rlstore.Lock()
|
||||
defer rlstore.Unlock()
|
||||
if err := rlstore.Load(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -577,6 +586,8 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
|
||||
bymount: make(map[string]*Layer),
|
||||
byname: make(map[string]*Layer),
|
||||
}
|
||||
rlstore.RLock()
|
||||
defer rlstore.Unlock()
|
||||
if err := rlstore.Load(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -746,7 +757,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
||||
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
|
||||
templateCompressionType = templateLayer.CompressionType
|
||||
templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
|
||||
templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID))
|
||||
templateTSdata, tserr = os.ReadFile(r.tspath(templateLayer.ID))
|
||||
if tserr != nil && !os.IsNotExist(tserr) {
|
||||
return nil, -1, tserr
|
||||
}
|
||||
@@ -1381,6 +1392,9 @@ func (r *layerStore) Wipe() error {
|
||||
for id := range r.byid {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Slice(ids, func(i, j int) bool {
|
||||
return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created)
|
||||
})
|
||||
for _, id := range ids {
|
||||
if err := r.Delete(id); err != nil {
|
||||
return err
|
||||
@@ -1660,7 +1674,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
|
||||
if compressedDigester != nil {
|
||||
compressedWriter = compressedDigester.Hash()
|
||||
} else {
|
||||
compressedWriter = ioutil.Discard
|
||||
compressedWriter = io.Discard
|
||||
}
|
||||
compressedCounter := ioutils.NewWriteCounter(compressedWriter)
|
||||
defragmented = io.TeeReader(defragmented, compressedCounter)
|
||||
@@ -1924,7 +1938,6 @@ func (r *layerStore) Modified() (bool, error) {
|
||||
}
|
||||
if info != nil {
|
||||
tmodified = info.ModTime() != r.layerspathModified
|
||||
r.layerspathModified = info.ModTime()
|
||||
}
|
||||
|
||||
return tmodified, nil
|
||||
|
||||
23
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
23
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -25,7 +24,6 @@ import (
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ulikunitz/xz"
|
||||
)
|
||||
@@ -77,6 +75,7 @@ const (
|
||||
solaris = "solaris"
|
||||
windows = "windows"
|
||||
darwin = "darwin"
|
||||
freebsd = "freebsd"
|
||||
)
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
@@ -484,7 +483,7 @@ func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *
|
||||
}
|
||||
|
||||
// canonicalTarName provides a platform-independent and consistent posix-style
|
||||
//path for files and directories to be archived regardless of the platform.
|
||||
// path for files and directories to be archived regardless of the platform.
|
||||
func canonicalTarName(name string, isDir bool) (string, error) {
|
||||
name, err := CanonicalTarNameForPath(name)
|
||||
if err != nil {
|
||||
@@ -673,7 +672,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
if !strings.HasPrefix(targetPath, extractDir) {
|
||||
return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
|
||||
}
|
||||
if err := os.Link(targetPath, path); err != nil {
|
||||
if err := handleLLink(targetPath, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1107,7 +1106,9 @@ loop:
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
//
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
//
|
||||
// FIXME: specify behavior when target path exists vs. doesn't exist.
|
||||
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, true)
|
||||
@@ -1159,7 +1160,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
GIDMaps: tarMappings.GIDs(),
|
||||
Compression: Uncompressed,
|
||||
CopyPass: true,
|
||||
InUserNS: userns.RunningInUserNS(),
|
||||
InUserNS: unshare.IsRootless(),
|
||||
}
|
||||
archive, err := TarWithOptions(src, options)
|
||||
if err != nil {
|
||||
@@ -1174,7 +1175,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
UIDMaps: untarMappings.UIDs(),
|
||||
GIDMaps: untarMappings.GIDs(),
|
||||
ChownOpts: archiver.ChownOpts,
|
||||
InUserNS: userns.RunningInUserNS(),
|
||||
InUserNS: unshare.IsRootless(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
@@ -1194,7 +1195,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
|
||||
UIDMaps: untarMappings.UIDs(),
|
||||
GIDMaps: untarMappings.GIDs(),
|
||||
ChownOpts: archiver.ChownOpts,
|
||||
InUserNS: userns.RunningInUserNS(),
|
||||
InUserNS: unshare.IsRootless(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
@@ -1294,7 +1295,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
UIDMaps: archiver.UntarIDMappings.UIDs(),
|
||||
GIDMaps: archiver.UntarIDMappings.GIDs(),
|
||||
ChownOpts: archiver.ChownOpts,
|
||||
InUserNS: userns.RunningInUserNS(),
|
||||
InUserNS: unshare.IsRootless(),
|
||||
NoOverwriteDirNonDir: true,
|
||||
}
|
||||
err = archiver.Untar(r, filepath.Dir(dst), options)
|
||||
@@ -1348,7 +1349,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
|
||||
// of that file as an archive. The archive can only be read once - as soon as reading completes,
|
||||
// the file will be deleted.
|
||||
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
|
||||
f, err := ioutil.TempFile(dir, "")
|
||||
f, err := os.CreateTemp(dir, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1472,7 +1473,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
|
||||
err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
|
||||
}
|
||||
hashWorker.Wait()
|
||||
if err == nil {
|
||||
if err == nil && hashError != nil {
|
||||
err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
|
||||
}
|
||||
return err
|
||||
|
||||
28
vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
generated
vendored
28
vendor/github.com/containers/storage/pkg/archive/archive_freebsd.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build freebsd
|
||||
// +build freebsd
|
||||
|
||||
package archive
|
||||
@@ -8,10 +9,11 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -87,7 +89,7 @@ func minor(device uint64) uint64 {
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
if userns.RunningInUserNS() {
|
||||
if unshare.IsRootless() {
|
||||
// cannot create a device if running in user namespace
|
||||
return nil
|
||||
}
|
||||
@@ -110,16 +112,18 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *
|
||||
if forceMask != nil {
|
||||
permissionsMask = *forceMask
|
||||
}
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := os.Chmod(path, permissionsMask); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, permissionsMask); err != nil {
|
||||
return err
|
||||
}
|
||||
p, err := unix.BytePtrFromString(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _, e1 := unix.Syscall(unix.SYS_LCHMOD, uintptr(unsafe.Pointer(p)), uintptr(permissionsMask), 0)
|
||||
if e1 != 0 {
|
||||
return e1
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hardlink without following symlinks
|
||||
func handleLLink(targetPath string, path string) error {
|
||||
return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)
|
||||
}
|
||||
|
||||
13
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
13
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
@@ -1,3 +1,4 @@
|
||||
//go:build !windows && !freebsd
|
||||
// +build !windows,!freebsd
|
||||
|
||||
package archive
|
||||
@@ -97,7 +98,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
mode |= unix.S_IFIFO
|
||||
}
|
||||
|
||||
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
||||
return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor))
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
|
||||
@@ -118,3 +119,13 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hardlink without symlinks
|
||||
func handleLLink(targetPath, path string) error {
|
||||
// Note: on Linux, the link syscall will not follow symlinks.
|
||||
// This behavior is implementation-dependent since
|
||||
// POSIX.1-2008 so to make it clear that we need non-symlink
|
||||
// following here we use the linkat syscall which has a flags
|
||||
// field to select symlink following or not.
|
||||
return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/storage/pkg/archive/archive_windows.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/archive/archive_windows.go
generated
vendored
@@ -78,3 +78,8 @@ func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
// no notion of file ownership mapping yet on Windows
|
||||
return idtools.IDPair{0, 0}, nil
|
||||
}
|
||||
|
||||
// Hardlink without following symlinks
|
||||
func handleLLink(targetPath string, path string) error {
|
||||
return os.Link(targetPath, path)
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/archive/changes.go
generated
vendored
@@ -5,7 +5,6 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -403,7 +402,7 @@ func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string,
|
||||
oldRoot, newRoot *FileInfo
|
||||
)
|
||||
if oldDir == "" {
|
||||
emptyDir, err := ioutil.TempDir("", "empty")
|
||||
emptyDir, err := os.MkdirTemp("", "empty")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/storage/pkg/archive/copy.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/archive/copy.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -255,7 +254,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
|
||||
// The destination exists as a directory. No alteration
|
||||
// to srcContent is needed as its contents can be
|
||||
// simply extracted to the destination directory.
|
||||
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||
return dstInfo.Path, io.NopCloser(srcContent), nil
|
||||
case dstInfo.Exists && srcInfo.IsDir:
|
||||
// The destination exists as some type of file and the source
|
||||
// content is a directory. This is an error condition since
|
||||
|
||||
3
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/archive/diff.go
generated
vendored
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -102,7 +101,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||
basename := filepath.Base(hdr.Name)
|
||||
aufsHardlinks[basename] = hdr
|
||||
if aufsTempdir == "" {
|
||||
if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil {
|
||||
if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
|
||||
12
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
12
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
@@ -4,14 +4,13 @@ import (
|
||||
stdtar "archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/opencontainers/runc/libcontainer/userns"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||
@@ -31,7 +30,8 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
//
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, true, dest)
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
||||
}
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
options.InUserNS = userns.RunningInUserNS()
|
||||
options.InUserNS = unshare.IsRootless()
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
@@ -82,7 +82,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
||||
}
|
||||
}
|
||||
|
||||
r := ioutil.NopCloser(tarArchive)
|
||||
r := io.NopCloser(tarArchive)
|
||||
if decompress {
|
||||
decompressedArchive, err := archive.DecompressStream(tarArchive)
|
||||
if err != nil {
|
||||
@@ -136,7 +136,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
|
||||
err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
|
||||
}
|
||||
hashWorker.Wait()
|
||||
if err == nil {
|
||||
if err == nil && hashError != nil {
|
||||
err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
|
||||
}
|
||||
return err
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user