Compare commits

..

3 Commits

Author SHA1 Message Date
Miloslav Trmač
89966c513c Release 1.9.3
Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2022-10-19 19:11:14 +02:00
Miloslav Trmač
2382f751cb Update to c/image v5.22.1
> go get github.com/containers/image/v5@v5.22.1
> make vendor

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2022-10-19 19:11:14 +02:00
Miloslav Trmač
f24433e290 Pin Go to 1.18
1.19 has changed the expected gofmt format, and we don't want
to follow such changes on the stable branch.

go@1.18 is "keg-only", i.e. not installed by Brew to /usr/local/bin,
so we need to change PATH to point at it (as the installation instructs
us to).

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
2022-10-19 19:11:14 +02:00
704 changed files with 49578 additions and 13845 deletions

View File

@@ -84,12 +84,13 @@ osx_task:
macos_instance:
image: catalina-xcode
setup_script: |
export PATH=$GOPATH/bin:$PATH
# /usr/local/opt/go@1.18 will be populated by (brew install go@1.18) below
export PATH=$GOPATH/bin:/usr/local/opt/go@1.18/bin:$PATH
brew update
brew install gpgme go go-md2man
brew install gpgme go@1.18 go-md2man
go install golang.org/x/lint/golint@latest
test_script: |
export PATH=$GOPATH/bin:$PATH
export PATH=$GOPATH/bin:/usr/local/opt/go@1.18/bin:$PATH
go version
go env
make validate-local test-unit-local bin/skopeo

View File

@@ -1,17 +1,105 @@
---
# See also:
# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
# Format Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions
# Required to un-FUBAR default ${{github.workflow}} value
name: check_cirrus_cron
on:
# Note: This only applies to the default branch.
schedule:
# N/B: This should correspond to a period slightly after
# the last job finishes running. See job defs. at:
# https://cirrus-ci.com/settings/repository/6706677464432640
- cron: '59 23 * * 1-5'
# Debug: Allow triggering job manually in github-actions WebUI
workflow_dispatch: {}
# Note: This only applies to the default branch.
schedule:
# N/B: This should correspond to a period slightly after
# the last job finishes running. See job defs. at:
# https://cirrus-ci.com/settings/repository/6706677464432640
- cron: '59 23 * * 1-5'
# Debug: Allow triggering job manually in github-actions WebUI
workflow_dispatch: {}
permissions:
contents: read
env:
# Debug-mode can reveal secrets, only enable by a secret value.
# Ref: https://help.github.com/en/actions/configuring-and-managing-workflows/managing-a-workflow-run#enabling-step-debug-logging
ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}'
# CSV listing of e-mail addresses for delivery failure or error notices
RCPTCSV: rh.container.bot@gmail.com,podman-monitor@lists.podman.io
# Filename for table of cron-name to build-id data
# (must be in $GITHUB_WORKSPACE/artifacts/)
NAME_ID_FILEPATH: './artifacts/name_id.txt'
jobs:
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
call_cron_failures:
uses: containers/buildah/.github/workflows/check_cirrus_cron.yml@main
secrets: inherit
cron_failures:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
with:
persist-credentials: false
# Avoid duplicating cron_failures.sh in skopeo repo.
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
with:
repository: containers/podman
path: '_podman'
persist-credentials: false
- name: Get failed cron names and Build IDs
id: cron
run: './_podman/.github/actions/${{ github.workflow }}/${{ github.job }}.sh'
- if: steps.cron.outputs.failures > 0
shell: bash
# Must be inline, since context expressions are used.
# Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions
run: |
set -eo pipefail
(
echo "Detected one or more Cirrus-CI cron-triggered jobs have failed recently:"
echo ""
while read -r NAME BID; do
echo "Cron build '$NAME' Failed: https://cirrus-ci.com/build/$BID"
done < "$NAME_ID_FILEPATH"
echo ""
echo "# Source: ${{ github.workflow }} workflow on ${{ github.repository }}."
# Separate content from sendgrid.com automatic footer.
echo ""
echo ""
) > ./artifacts/email_body.txt
- if: steps.cron.outputs.failures > 0
name: Send failure notification e-mail
# Ref: https://github.com/dawidd6/action-send-mail
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
with:
server_address: ${{secrets.ACTION_MAIL_SERVER}}
server_port: 465
username: ${{secrets.ACTION_MAIL_USERNAME}}
password: ${{secrets.ACTION_MAIL_PASSWORD}}
subject: Cirrus-CI cron build failures on ${{github.repository}}
to: ${{env.RCPTCSV}}
from: ${{secrets.ACTION_MAIL_SENDER}}
body: file://./artifacts/email_body.txt
- if: always()
uses: actions/upload-artifact@82c141cc518b40d92cc801eee768e7aafc9c2fa2 # v2
with:
name: ${{ github.job }}_artifacts
path: artifacts/*
- if: failure()
name: Send error notification e-mail
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
with:
server_address: ${{secrets.ACTION_MAIL_SERVER}}
server_port: 465
username: ${{secrets.ACTION_MAIL_USERNAME}}
password: ${{secrets.ACTION_MAIL_PASSWORD}}
subject: Github workflow error on ${{github.repository}}
to: ${{env.RCPTCSV}}
from: ${{secrets.ACTION_MAIL_SENDER}}
body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}"

View File

@@ -28,15 +28,10 @@ ifeq ($(GOBIN),)
GOBIN := $(GOPATH)/bin
endif
# Scripts may also use CONTAINER_RUNTIME, so we need to export it.
# Note possibly non-obvious aspects of this:
# - We need to use 'command -v' here, not 'which', for compatibility with MacOS.
# - GNU Make 4.2.1 (included in Ubuntu 20.04) incorrectly tries to avoid invoking
# a shell, and fails because there is no /usr/bin/command. The trailing ';' in
# $(shell … ;) defeats that heuristic (recommended in
# https://savannah.gnu.org/bugs/index.php?57625 ).
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman ;),podman,docker)
GOMD2MAN ?= $(if $(shell command -v go-md2man ;),go-md2man,$(GOBIN)/go-md2man)
# Multiple scripts are sensitive to this value, make sure it's exported/available
# N/B: Need to use 'command -v' here for compatibility with MacOS.
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman),podman,docker)
GOMD2MAN ?= $(if $(shell command -v go-md2man),go-md2man,$(GOBIN)/go-md2man)
# Go module support: set `-mod=vendor` to use the vendored sources.
# See also hack/make.sh.
@@ -55,6 +50,8 @@ ifeq ($(GOOS), linux)
endif
endif
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
# If $TESTFLAGS is set, it is passed as extra arguments to 'go test'.
# You can increase test output verbosity with the option '-test.vv'.
# You can select certain tests to run, with `-test.run <regex>` for example:
@@ -90,7 +87,7 @@ endif
CONTAINER_GOSRC = /src/github.com/containers/skopeo
CONTAINER_RUN ?= $(CONTAINER_CMD) --security-opt label=disable -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN)
GIT_COMMIT := $(shell GIT_CEILING_DIRECTORIES=$$(cd ..; pwd) git rev-parse HEAD 2> /dev/null || true)
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
EXTRA_LDFLAGS ?=
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
@@ -250,7 +247,7 @@ vendor:
$(GO) mod verify
vendor-in-container:
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src golang $(MAKE) vendor
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src quay.io/libpod/golang:1.16 $(MAKE) vendor
# CAUTION: This is not a replacement for RPMs provided by your distro.
# Only intended to build and test the latest unreleased changes.

View File

@@ -207,7 +207,7 @@ Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between registry repositories and local directories. |
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
License
-

View File

@@ -260,8 +260,6 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
}
}
opts.destImage.warnAboutIneffectiveOptions(destRef.Transport())
return retry.IfNecessary(ctx, func() error {
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, &copy.Options{
RemoveSignatures: opts.removeSignatures,

View File

@@ -186,7 +186,6 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
Architecture: imgInspect.Architecture,
Os: imgInspect.Os,
Layers: imgInspect.Layers,
LayersData: imgInspect.LayersData,
Env: imgInspect.Env,
}
outputData.Digest, err = manifest.Digest(rawManifest)

View File

@@ -3,7 +3,6 @@ package inspect
import (
"time"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
)
@@ -20,6 +19,5 @@ type Output struct {
Architecture string
Os string
Layers []string
LayersData []types.ImageInspectLayer
Env []string
}

View File

@@ -244,11 +244,7 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
for _, tag := range tags {
taggedRef, err := reference.WithTag(repoRef, tag)
if err != nil {
logrus.WithFields(logrus.Fields{
"repo": repoRef.Name(),
"tag": tag,
}).Errorf("Error creating a tagged reference from registry tag list: %v", err)
continue
return nil, fmt.Errorf("Error creating a reference for repository %s and tag %q: %w", repoRef.Name(), tag, err)
}
ref, err := docker.NewReference(taggedRef)
if err != nil {
@@ -552,8 +548,6 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
}
opts.destImage.warnAboutIneffectiveOptions(transports.Get(opts.destination))
imageListSelection := copy.CopySystemImage
if opts.all {
imageListSelection = copy.CopyAllImages

View File

@@ -10,7 +10,6 @@ import (
commonFlag "github.com/containers/common/pkg/flag"
"github.com/containers/common/pkg/retry"
"github.com/containers/image/v5/directory"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
"github.com/containers/image/v5/transports/alltransports"
@@ -31,11 +30,11 @@ type errorShouldDisplayUsage struct {
// The error for closeErr is annotated with description (which is not a format string)
// Typical usage:
//
// defer func() {
// if err := something.Close(); err != nil {
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
// }
// }
// defer func() {
// if err := something.Close(); err != nil {
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
// }
// }
func noteCloseFailure(err error, description string, closeErr error) error {
// We dont accept a Closer() and close it ourselves because signature.PolicyContext has .Destroy(), not .Close().
// This also makes it harder for a caller to do
@@ -245,7 +244,6 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
}
// imageDestOptions is a superset of imageOptions specialized for image destinations.
// Every user should call imageDestOptions.warnAboutIneffectiveOptions() as part of handling the CLI
type imageDestOptions struct {
*imageOptions
dirForceCompression bool // Compress layers when saving to the dir: transport
@@ -254,13 +252,12 @@ type imageDestOptions struct {
compressionFormat string // Format to use for the compression
compressionLevel commonFlag.OptionalInt // Level to use for the compression
precomputeDigests bool // Precompute digests to dedup layers when saving to the docker: transport
imageDestFlagPrefix string
}
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
opts := imageDestOptions{imageOptions: genericOptions, imageDestFlagPrefix: flagPrefix}
opts := imageDestOptions{imageOptions: genericOptions}
fs := pflag.FlagSet{}
fs.AddFlagSet(&genericFlags)
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
@@ -298,19 +295,6 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
return ctx, err
}
// warnAboutIneffectiveOptions warns if any ineffective option was set by the user
// Every user should call this as part of handling the CLI
func (opts *imageDestOptions) warnAboutIneffectiveOptions(destTransport types.ImageTransport) {
if destTransport.Name() != directory.Transport.Name() {
if opts.dirForceCompression {
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"compress")
}
if opts.dirForceDecompression {
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"decompress")
}
}
}
func parseCreds(creds string) (string, string, error) {
if creds == "" {
return "", "", errors.New("credentials can't be empty")

View File

@@ -1,16 +1,3 @@
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
[comment]: <> ()
[comment]: <> (ANY changes made to this file, once commited/merged must)
[comment]: <> (be manually copy/pasted -in markdown- into the description)
[comment]: <> (field on Quay at the following locations:)
[comment]: <> ()
[comment]: <> (https://quay.io/repository/containers/skopeo)
[comment]: <> (https://quay.io/repository/skopeo/stable)
[comment]: <> (https://quay.io/repository/skopeo/testing)
[comment]: <> (https://quay.io/repository/skopeo/upstream)
[comment]: <> ()
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
----

View File

@@ -16,11 +16,27 @@ FROM registry.fedoraproject.org/fedora:latest
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
RUN dnf -y update && \
rpm --setcaps shadow-utils 2>/dev/null && \
dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
dnf -y copr enable rhcontainerbot/podman-next && \
dnf -y install skopeo \
--exclude container-selinux \
--enablerepo=updates-testing && \
dnf -y --enablerepo updates-testing --exclude container-selinux install \
make \
golang \
git \
go-md2man \
fuse-overlayfs \
fuse3 \
containers-common \
gpgme-devel \
libassuan-devel \
btrfs-progs-devel \
device-mapper-devel && \
mkdir /root/skopeo && \
git clone https://github.com/containers/skopeo \
/root/skopeo/src/github.com/containers/skopeo && \
export GOPATH=/root/skopeo && \
cd /root/skopeo/src/github.com/containers/skopeo && \
make bin/skopeo && \
make PREFIX=/usr install && \
rm -rf /root/skopeo/* && \
dnf -y remove git golang go-md2man make && \
dnf clean all && \
rm -rf /var/cache /var/log/dnf* /var/log/yum.*

View File

@@ -1,8 +1,8 @@
# This is a default registries.d configuration file. You may
# add to this file or create additional files in registries.d/.
#
# lookaside: for reading/writing simple signing signatures
# lookaside-staging: for writing simple signing signatures, preferred over lookaside
# lookaside: indicates a location that is read and write
# lookaside-staging: indicates a location that is only for write
#
# lookaside and lookaside-staging take a value of the following:
# lookaside: {schema}://location
@@ -10,12 +10,10 @@
# For reading signatures, schema may be http, https, or file.
# For writing signatures, schema may only be file.
# The default locations are built-in, for both reading and writing:
# /var/lib/containers/sigstore for root, or
# ~/.local/share/containers/sigstore for non-root users.
# This is the default signature write location for docker registries.
default-docker:
# lookaside: https://…
# lookaside-staging: file:///…
# lookaside: file:///var/lib/containers/sigstore
lookaside-staging: file:///var/lib/containers/sigstore
# The 'docker' indicator here is the start of the configuration
# for docker registries.
@@ -23,6 +21,6 @@ default-docker:
# docker:
#
# privateregistry.com:
# lookaside: https://privateregistry.com/sigstore/
# lookaside: http://privateregistry.com/sigstore/
# lookaside-staging: /mnt/nfs/privateregistry/sigstore

View File

@@ -56,7 +56,7 @@ After copying the image, write the digest of the resulting image to the file.
**--preserve-digests**
Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
Preserve the digests during copying. Fail if the digest cannot be preserved.
**--encrypt-layer** _ints_

View File

@@ -1,14 +1,14 @@
% skopeo-sync(1)
## NAME
skopeo\-sync - Synchronize images between registry repositories and local directories.
skopeo\-sync - Synchronize images between container registries and local directories.
## SYNOPSIS
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
## DESCRIPTION
Synchronize images between registry repoositories and local directories.
Synchronize images between container registries and local directories.
The synchronization is achieved by copying all the images found at _source_ to _destination_.
Useful to synchronize a local container registry mirror, and to to populate registries running inside of air-gapped environments.
@@ -66,7 +66,7 @@ Print usage statement.
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved.
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.

View File

@@ -108,7 +108,7 @@ Print the version number
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between registry repositories and local directories. |
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
## FILES
**/etc/containers/policy.json**

68
go.mod
View File

@@ -3,19 +3,20 @@ module github.com/containers/skopeo
go 1.17
require (
github.com/containers/common v0.50.1
github.com/containers/image/v5 v5.23.0
github.com/containers/common v0.49.1
github.com/containers/image/v5 v5.22.1
github.com/containers/ocicrypt v1.1.5
github.com/containers/storage v1.43.0
github.com/containers/storage v1.42.0
github.com/docker/docker v20.10.17+incompatible
github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0-rc1
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
github.com/opencontainers/image-tools v1.0.0-rc3
github.com/sirupsen/logrus v1.9.0
github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.0
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
gopkg.in/yaml.v2 v2.4.0
)
@@ -23,78 +24,85 @@ require (
require (
github.com/BurntSushi/toml v1.2.0 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/Microsoft/hcsshim v0.9.3 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/containerd/cgroups v1.0.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker v20.10.18+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/docker-credential-helpers v0.6.4 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-containerregistry v0.11.0 // indirect
github.com/google/go-containerregistry v0.10.0 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/honeycombio/libhoney-go v1.15.8 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.11 // indirect
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
github.com/klauspost/compress v1.15.9 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/kr/pretty v0.2.1 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e // indirect
github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
github.com/moby/sys/mountinfo v0.6.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/opencontainers/runc v1.1.4 // indirect
github.com/opencontainers/runc v1.1.3 // indirect
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
github.com/opencontainers/selinux v1.10.2 // indirect
github.com/opencontainers/selinux v1.10.1 // indirect
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/proglottis/gpgme v0.1.3 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/sigstore/sigstore v1.4.2 // indirect
github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.8.0 // indirect
github.com/sylabs/sif/v2 v2.7.1 // indirect
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
github.com/theupdateframework/go-tuf v0.5.1 // indirect
github.com/theupdateframework/go-tuf v0.3.1 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vbauerster/mpb/v7 v7.5.3 // indirect
github.com/vbauerster/mpb/v7 v7.4.2 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.etcd.io/bbolt v1.3.6 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
go.opencensus.io v0.23.0 // indirect
golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 // indirect
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect
go.opentelemetry.io/otel/trace v1.3.0 // indirect
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 // indirect
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20220720214146-176da50484ac // indirect
google.golang.org/grpc v1.48.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

348
go.sum

File diff suppressed because it is too large Load Diff

View File

@@ -34,7 +34,7 @@ if [[ "$SKOPEO_CONTAINER_TESTS" == "0" ]] && [[ "$CI" != "true" ]]; then
echo " the Makefile targets WITHOUT the '-local' suffix."
echo "***************************************************************"
) > /dev/stderr
sleep 5
sleep 5s
fi
echo

View File

@@ -11,7 +11,7 @@ import (
"strings"
"time"
"github.com/containers/storage/pkg/homedir"
"github.com/docker/docker/pkg/homedir"
"gopkg.in/check.v1"
)

View File

@@ -15,15 +15,11 @@ TestRunShell is not really a test; it is a convenient way to use the registry se
in openshift.go and CopySuite to get an interactive environment for experimentation.
To use it, run:
sudo make shell
to start a container, then within the container:
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
An example of what can be done within the container:
cd ..; make bin/skopeo PREFIX=/usr install
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
oc get istag personal:personal -o json

View File

@@ -50,7 +50,7 @@ function setup() {
local dir=$TESTDIR/dir
run_skopeo copy --dest-compress-format=zstd $remote_image oci:$dir:latest
run_skopeo copy --dest-compress --dest-compress-format=zstd $remote_image oci:$dir:latest
# zstd magic number
local magic=$(printf "\x28\xb5\x2f\xfd")

View File

@@ -57,7 +57,7 @@ func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
}).Warn("failed to parse job object message")
continue
}
if err := msq.Enqueue(notification); err == queue.ErrQueueClosed {
if err := msq.Write(notification); err == queue.ErrQueueClosed {
// Write will only return an error when the queue is closed.
// The only time a queue would ever be closed is when we call `Close` on
// the job it belongs to which also removes it from the jobMap, so something

View File

@@ -68,9 +68,6 @@ type Options struct {
// `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
// Defaults to false.
UseNTVariant bool
// `IOTracking` enables tracking I/O statistics on the job object. More specifically this
// calls SetInformationJobObject with the JobObjectIoAttribution class.
EnableIOTracking bool
}
// Create creates a job object.
@@ -137,12 +134,6 @@ func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
job.mq = mq
}
if options.EnableIOTracking {
if err := enableIOTracking(jobHandle); err != nil {
return nil, err
}
}
return job, nil
}
@@ -244,7 +235,7 @@ func (job *JobObject) PollNotification() (interface{}, error) {
if job.mq == nil {
return nil, ErrNotRegistered
}
return job.mq.Dequeue()
return job.mq.ReadOrWait()
}
// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
@@ -339,7 +330,7 @@ func (job *JobObject) Pids() ([]uint32, error) {
err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectBasicProcessIdList,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
)
@@ -365,7 +356,7 @@ func (job *JobObject) Pids() ([]uint32, error) {
if err = winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectBasicProcessIdList,
unsafe.Pointer(&buf[0]),
uintptr(unsafe.Pointer(&buf[0])),
uint32(len(buf)),
nil,
); err != nil {
@@ -393,7 +384,7 @@ func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMA
if err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectMemoryUsageInformation,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
@@ -415,7 +406,7 @@ func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_
if err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectBasicAccountingInformation,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
@@ -424,9 +415,7 @@ func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_
return &info, nil
}
// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error
// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking()
// hasn't been called since creation of the job.
// QueryStorageStats gets the storage (I/O) stats for the job object.
func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
@@ -441,7 +430,7 @@ func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFO
if err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectIoAttribution,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
@@ -487,7 +476,7 @@ func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
status := winapi.NtQueryInformationProcess(
h,
winapi.ProcessVmCounters,
unsafe.Pointer(&vmCounters),
uintptr(unsafe.Pointer(&vmCounters)),
uint32(unsafe.Sizeof(vmCounters)),
nil,
)
@@ -508,31 +497,3 @@ func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
return jobWorkingSetSize, nil
}
// SetIOTracking enables IO tracking for processes in the job object.
// This enables use of the QueryStorageStats method.
func (job *JobObject) SetIOTracking() error {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
return enableIOTracking(job.handle)
}
func enableIOTracking(job windows.Handle) error {
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
}
if _, err := windows.SetInformationJobObject(
job,
winapi.JobObjectIoAttribution,
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
); err != nil {
return fmt.Errorf("failed to enable IO tracking on job object: %w", err)
}
return nil
}

View File

@@ -202,7 +202,7 @@ func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMI
if err := winapi.QueryInformationJobObject(
job.handle,
windows.JobObjectExtendedLimitInformation,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
@@ -224,7 +224,7 @@ func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE
if err := winapi.QueryInformationJobObject(
job.handle,
windows.JobObjectCpuRateControlInformation,
unsafe.Pointer(&info),
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {

View File

@@ -5,7 +5,10 @@ import (
"sync"
)
var ErrQueueClosed = errors.New("the queue is closed for reading and writing")
var (
ErrQueueClosed = errors.New("the queue is closed for reading and writing")
ErrQueueEmpty = errors.New("the queue is empty")
)
// MessageQueue represents a threadsafe message queue to be used to retrieve or
// write messages to.
@@ -26,8 +29,8 @@ func NewMessageQueue() *MessageQueue {
}
}
// Enqueue writes `msg` to the queue.
func (mq *MessageQueue) Enqueue(msg interface{}) error {
// Write writes `msg` to the queue.
func (mq *MessageQueue) Write(msg interface{}) error {
mq.m.Lock()
defer mq.m.Unlock()
@@ -40,37 +43,55 @@ func (mq *MessageQueue) Enqueue(msg interface{}) error {
return nil
}
// Dequeue will read a value from the queue and remove it. If the queue
// is empty, this will block until the queue is closed or a value gets enqueued.
func (mq *MessageQueue) Dequeue() (interface{}, error) {
// Read will read a value from the queue if available, otherwise return an error.
func (mq *MessageQueue) Read() (interface{}, error) {
mq.m.Lock()
defer mq.m.Unlock()
for !mq.closed && mq.size() == 0 {
mq.c.Wait()
}
// We got woken up, check if it's because the queue got closed.
if mq.closed {
return nil, ErrQueueClosed
}
if mq.isEmpty() {
return nil, ErrQueueEmpty
}
val := mq.messages[0]
mq.messages[0] = nil
mq.messages = mq.messages[1:]
return val, nil
}
// Size returns the size of the queue.
func (mq *MessageQueue) Size() int {
mq.m.RLock()
defer mq.m.RUnlock()
return mq.size()
// ReadOrWait will read a value from the queue if available, else it will wait for a
// value to become available. This will block forever if nothing gets written or until
// the queue gets closed.
func (mq *MessageQueue) ReadOrWait() (interface{}, error) {
mq.m.Lock()
if mq.closed {
mq.m.Unlock()
return nil, ErrQueueClosed
}
if mq.isEmpty() {
for !mq.closed && mq.isEmpty() {
mq.c.Wait()
}
mq.m.Unlock()
return mq.Read()
}
val := mq.messages[0]
mq.messages[0] = nil
mq.messages = mq.messages[1:]
mq.m.Unlock()
return val, nil
}
// Nonexported size check to check if the queue is empty inside already locked functions.
func (mq *MessageQueue) size() int {
return len(mq.messages)
// IsEmpty returns if the queue is empty
func (mq *MessageQueue) IsEmpty() bool {
mq.m.RLock()
defer mq.m.RUnlock()
return len(mq.messages) == 0
}
// Nonexported empty check that doesn't lock so we can call this in Read and Write.
func (mq *MessageQueue) isEmpty() bool {
return len(mq.messages) == 0
}
// Close closes the queue for future writes or reads. Any attempts to read or write from the
@@ -78,15 +99,13 @@ func (mq *MessageQueue) size() int {
func (mq *MessageQueue) Close() {
mq.m.Lock()
defer mq.m.Unlock()
// Already closed, noop
// Already closed
if mq.closed {
return
}
mq.messages = nil
mq.closed = true
// If there's anybody currently waiting on a value from Dequeue, we need to
// If there's anybody currently waiting on a value from ReadOrWait, we need to
// broadcast so the read(s) can return ErrQueueClosed.
mq.c.Broadcast()
}

View File

@@ -175,7 +175,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// LPDWORD lpReturnLength
// );
//
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
// HANDLE OpenJobObjectW(
// DWORD dwDesiredAccess,

View File

@@ -18,7 +18,7 @@ const ProcessVmCounters = 3
// [out, optional] PULONG ReturnLength
// );
//
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
// typedef struct _VM_COUNTERS_EX
// {

View File

@@ -12,8 +12,7 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
// ULONG SystemInformationLength,
// PULONG ReturnLength
// );
//
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
type SYSTEM_PROCESS_INFORMATION struct {
NextEntryOffset uint32 // ULONG

View File

@@ -100,7 +100,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
return
}
func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
status = uint32(r0)
return
@@ -152,7 +152,7 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result
return
}
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
if r1 == 0 {
if e1 != 0 {
@@ -244,7 +244,7 @@ func LocalFree(ptr uintptr) {
return
}
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
status = uint32(r0)
return

20
vendor/github.com/beorn7/perks/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,20 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt generated vendored Normal file

File diff suppressed because it is too large Load Diff

316
vendor/github.com/beorn7/perks/quantile/stream.go generated vendored Normal file
View File

@@ -0,0 +1,316 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targetMap map[float64]float64) *Stream {
// Convert map to slice to avoid slow iterations on a map.
// ƒ is called on the hot path, so converting the map to a slice
// beforehand results in significant CPU savings.
targets := targetMapToSlice(targetMap)
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for _, t := range targets {
if t.quantile*s.n <= r {
f = (2 * t.epsilon * r) / t.quantile
} else {
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
type target struct {
quantile float64
epsilon float64
}
func targetMapToSlice(targetMap map[float64]float64) []target {
targets := make([]target, 0, len(targetMap))
for quantile, epsilon := range targetMap {
t := target{
quantile: quantile,
epsilon: epsilon,
}
targets = append(targets, t)
}
return targets
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}

22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt generated vendored Normal file
View File

@@ -0,0 +1,22 @@
Copyright (c) 2016 Caleb Spare
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

69
vendor/github.com/cespare/xxhash/v2/README.md generated vendored Normal file
View File

@@ -0,0 +1,69 @@
# xxhash
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
This package provides a straightforward API:
```
func Sum64(b []byte) uint64
func Sum64String(s string) uint64
type Digest struct{ ... }
func New() *Digest
```
The `Digest` type implements hash.Hash64. Its key methods are:
```
func (*Digest) Write([]byte) (int, error)
func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
This implementation provides a fast pure-Go implementation and an even faster
assembly implementation for amd64.
## Compatibility
This package is in a module and the latest code is in version 2 of the module.
You need a version of Go with at least "minimal module compatibility" to use
github.com/cespare/xxhash/v2:
* 1.9.7+ for Go 1.9
* 1.10.3+ for Go 1.10
* Go 1.11 or later
I recommend using the latest release of Go.
## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
| input size | purego | asm |
| --- | --- | --- |
| 5 B | 979.66 MB/s | 1291.17 MB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
the following commands under Go 1.11.2:
```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
```
## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

235
vendor/github.com/cespare/xxhash/v2/xxhash.go generated vendored Normal file
View File

@@ -0,0 +1,235 @@
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
// at http://cyan4973.github.io/xxHash/.
package xxhash
import (
"encoding/binary"
"errors"
"math/bits"
)
const (
prime1 uint64 = 11400714785074694791
prime2 uint64 = 14029467366897019727
prime3 uint64 = 1609587929392839161
prime4 uint64 = 9650029242287828579
prime5 uint64 = 2870177450012600261
)
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
// possible in the Go code is worth a small (but measurable) performance boost
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
// convenience in the Go code in a few places where we need to intentionally
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Digest implements hash.Hash64.
type Digest struct {
v1 uint64
v2 uint64
v3 uint64
v4 uint64
total uint64
mem [32]byte
n int // how much of mem is used
}
// New creates a new Digest that computes the 64-bit xxHash algorithm.
func New() *Digest {
var d Digest
d.Reset()
return &d
}
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
d.v1 = prime1v + prime2
d.v2 = prime2
d.v3 = 0
d.v4 = -prime1v
d.total = 0
d.n = 0
}
// Size always returns 8 bytes.
func (d *Digest) Size() int { return 8 }
// BlockSize always returns 32 bytes.
func (d *Digest) BlockSize() int { return 32 }
// Write adds more data to d. It always returns len(b), nil.
func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
if d.n+n < 32 {
// This new data doesn't even fill the current block.
copy(d.mem[d.n:], b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
copy(d.mem[d.n:], b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:]
d.n = 0
}
if len(b) >= 32 {
// One or more full blocks left.
nw := writeBlocks(d, b)
b = b[nw:]
}
// Store any remaining partial block.
copy(d.mem[:], b)
d.n = len(b)
return
}
// Sum appends the current hash to b and returns the resulting slice.
func (d *Digest) Sum(b []byte) []byte {
s := d.Sum64()
return append(
b,
byte(s>>56),
byte(s>>48),
byte(s>>40),
byte(s>>32),
byte(s>>24),
byte(s>>16),
byte(s>>8),
byte(s),
)
}
// Sum64 returns the current hash.
func (d *Digest) Sum64() uint64 {
var h uint64
if d.total >= 32 {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = d.v3 + prime5
}
h += d.total
i, end := 0, d.n
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(d.mem[i:i+8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(d.mem[i:i+4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for i < end {
h ^= uint64(d.mem[i]) * prime5
h = rol11(h) * prime1
i++
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
const (
magic = "xxh\x06"
marshaledSize = len(magic) + 8*5 + 32
)
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (d *Digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
b = appendUint64(b, d.v1)
b = appendUint64(b, d.v2)
b = appendUint64(b, d.v3)
b = appendUint64(b, d.v4)
b = appendUint64(b, d.total)
b = append(b, d.mem[:d.n]...)
b = b[:len(b)+len(d.mem)-d.n]
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (d *Digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic):]
b, d.v1 = consumeUint64(b)
b, d.v2 = consumeUint64(b)
b, d.v3 = consumeUint64(b)
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.LittleEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func consumeUint64(b []byte) ([]byte, uint64) {
x := u64(b)
return b[8:], x
}
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
func round(acc, input uint64) uint64 {
acc += input * prime2
acc = rol31(acc)
acc *= prime1
return acc
}
func mergeRound(acc, val uint64) uint64 {
val = round(0, val)
acc ^= val
acc = acc*prime1 + prime4
return acc
}
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// +build !appengine
// +build gc
// +build !purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
//
//go:noescape
func Sum64(b []byte) uint64
//go:noescape
func writeBlocks(d *Digest, b []byte) int

215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s generated vendored Normal file
View File

@@ -0,0 +1,215 @@
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
// Register allocation:
// AX h
// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
// R9 v2
// R10 v3
// R11 v4
// R12 tmp
// R13 prime1v
// R14 prime2v
// DI prime4v
// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (SI), R12 \
ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ DI, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), DI
// Load slice.
MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
// Check whether we have at least one block.
CMPQ DX, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8
ADDQ R14, R8
MOVQ R14, R9
XORQ R10, R10
XORQ R11, R11
SUBQ R13, R11
// Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
MOVQ R8, AX
ROLQ $1, AX
MOVQ R9, R12
ROLQ $7, R12
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
JMP afterBlocks
noBlocks:
MOVQ ·prime5v(SB), AX
afterBlocks:
ADDQ DX, AX
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
CMPQ SI, BX
JG fourByte
wordLoop:
// Calculate k1.
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ DI, AX
CMPQ SI, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
CMPQ SI, BX
JG singles
MOVL (SI), R8
ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ SI, BX
JGE finalize
singlesLoop:
MOVBQZX (SI), R12
ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
CMPQ SI, BX
JL singlesLoop
finalize:
MOVQ AX, R12
SHRQ $33, R12
XORQ R12, AX
IMULQ R14, AX
MOVQ AX, R12
SHRQ $29, R12
XORQ R12, AX
IMULQ ·prime3v(SB), AX
MOVQ AX, R12
SHRQ $32, R12
XORQ R12, AX
MOVQ AX, ret+24(FP)
RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
// Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
// Load slice.
MOVQ b_base+8(FP), SI
MOVQ b_len+16(FP), DX
LEAQ (SI)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
MOVQ 24(AX), R11 // v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
// Copy vN back to d.
MOVQ R8, 0(AX)
MOVQ R9, 8(AX)
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
// The number of bytes written is SI minus the old base pointer.
SUBQ b_base+8(FP), SI
MOVQ SI, ret+32(FP)
RET

76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go generated vendored Normal file
View File

@@ -0,0 +1,76 @@
// +build !amd64 appengine !gc purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
// d.Write(b)
// return d.Sum64()
// but this is faster, particularly for small inputs.
n := len(b)
var h uint64
if n >= 32 {
v1 := prime1v + prime2
v2 := prime2
v3 := uint64(0)
v4 := -prime1v
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = prime5
}
h += uint64(n)
i, end := 0, len(b)
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for ; i < end; i++ {
h ^= uint64(b[i]) * prime5
h = rol11(h) * prime1
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
func writeBlocks(d *Digest, b []byte) int {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
n := len(b)
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
return n - len(b)
}

15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go generated vendored Normal file
View File

@@ -0,0 +1,15 @@
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
package xxhash
// Sum64String computes the 64-bit xxHash digest of s.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
// WriteString adds more data to d. It always returns len(s), nil.
func (d *Digest) WriteString(s string) (n int, err error) {
return d.Write([]byte(s))
}

57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go generated vendored Normal file
View File

@@ -0,0 +1,57 @@
// +build !appengine
// This file encapsulates usage of unsafe.
// xxhash_safe.go contains the safe implementations.
package xxhash
import (
"unsafe"
)
// In the future it's possible that compiler optimizations will make these
// XxxString functions unnecessary by realizing that calls such as
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
//
// var b []byte
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
// bh.Len = len(s)
// bh.Cap = len(s)
//
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
// weight to this sequence of expressions that any function that uses it will
// not be inlined. Instead, the functions below use a different unsafe
// conversion designed to minimize the inliner weight and allow both to be
// inlined. There is also a test (TestInlining) which verifies that these are
// inlined.
//
// See https://github.com/golang/go/issues/42739 for discussion.
// Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
return Sum64(b)
}
// WriteString adds more data to d. It always returns len(s), nil.
// It may be faster than Write([]byte(s)) by avoiding a copy.
func (d *Digest) WriteString(s string) (n int, err error) {
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
// d.Write always returns len(s), nil.
// Ignoring the return output and returning these fixed values buys a
// savings of 6 in the inliner's cost model.
return len(s), nil
}
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
// of the first two words is the same as the layout of a string.
type sliceHeader struct {
s string
cap int
}

View File

@@ -158,7 +158,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
}
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
logrus.Debugf("error logging into %q: %v", key, unauthorized)
return fmt.Errorf("logging into %q: invalid username/password", key)
return fmt.Errorf("error logging into %q: invalid username/password", key)
}
return fmt.Errorf("authenticating creds for %q: %w", key, err)
}

View File

@@ -59,22 +59,16 @@ type Formatter struct {
func (f *Formatter) Parse(origin Origin, text string) (*Formatter, error) {
f.Origin = origin
// docker tries to be smart and replaces \n with the actual newline character.
// For compat we do the same but this will break formats such as '{{printf "\n"}}'
// To be backwards compatible with the previous behavior we try to replace and
// parse the template. If it fails use the original text and parse again.
var normText string
switch {
case strings.HasPrefix(text, "table "):
f.RenderTable = true
normText = "{{range .}}" + NormalizeFormat(text) + "{{end -}}"
text = "{{range .}}" + text + "{{end -}}"
text = "{{range .}}" + NormalizeFormat(text) + "{{end -}}"
case OriginUser == origin:
normText = EnforceRange(NormalizeFormat(text))
text = EnforceRange(text)
text = EnforceRange(NormalizeFormat(text))
default:
normText = NormalizeFormat(text)
text = NormalizeFormat(text)
}
f.text = text
if f.RenderTable || origin == OriginPodman {
tw := tabwriter.NewWriter(f.writer, 12, 2, 2, ' ', tabwriter.StripEscape)
@@ -83,14 +77,10 @@ func (f *Formatter) Parse(origin Origin, text string) (*Formatter, error) {
f.RenderHeaders = true
}
tmpl, err := f.template.Funcs(template.FuncMap(DefaultFuncs)).Parse(normText)
tmpl, err := f.template.Funcs(template.FuncMap(DefaultFuncs)).Parse(text)
if err != nil {
tmpl, err = f.template.Funcs(template.FuncMap(DefaultFuncs)).Parse(text)
f.template = tmpl
f.text = text
return f, err
}
f.text = normText
f.template = tmpl
return f, nil
}

View File

@@ -3,6 +3,7 @@ package archive
import (
"context"
"fmt"
"io"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/internal/private"
@@ -12,8 +13,8 @@ import (
type archiveImageDestination struct {
*tarfile.Destination // Implements most of types.ImageDestination
ref archiveReference
writer *Writer // Should be closed if closeWriter
closeWriter bool
archive *tarfile.Writer // Should only be closed if writer != nil
writer io.Closer // May be nil if the archive is shared
}
func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) {
@@ -21,28 +22,29 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (privat
return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
}
var writer *Writer
var closeWriter bool
if ref.writer != nil {
writer = ref.writer
closeWriter = false
var archive *tarfile.Writer
var writer io.Closer
if ref.archiveWriter != nil {
archive = ref.archiveWriter
writer = nil
} else {
w, err := NewWriter(sys, ref.path)
fh, err := openArchiveForWriting(ref.path)
if err != nil {
return nil, err
}
writer = w
closeWriter = true
archive = tarfile.NewWriter(fh)
writer = fh
}
tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref)
tarDest := tarfile.NewDestination(sys, archive, ref.Transport().Name(), ref.ref)
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
}
return &archiveImageDestination{
Destination: tarDest,
ref: ref,
archive: archive,
writer: writer,
closeWriter: closeWriter,
}, nil
}
@@ -54,7 +56,7 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *archiveImageDestination) Close() error {
if d.closeWriter {
if d.writer != nil {
return d.writer.Close()
}
return nil
@@ -68,15 +70,8 @@ func (d *archiveImageDestination) Close() error {
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
d.writer.imageCommitted()
if d.closeWriter {
// We could do this only in .Close(), but failures in .Close() are much more likely to be
// ignored by callers that use defer. So, in single-image destinations, try to complete
// the archive here.
// But if Commit() is never called, let .Close() clean up.
err := d.writer.Close()
d.closeWriter = false
return err
if d.writer != nil {
return d.archive.Close()
}
return nil
}

View File

@@ -53,7 +53,7 @@ type archiveReference struct {
// file, not necessarily path precisely).
archiveReader *tarfile.Reader
// If not nil, must have been created for path
writer *Writer
archiveWriter *tarfile.Writer
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
@@ -108,7 +108,7 @@ func NewIndexReference(path string, sourceIndex int) (types.ImageReference, erro
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
archiveReader *tarfile.Reader, writer *Writer) (types.ImageReference, error) {
archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
if strings.Contains(path, ":") {
return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
}
@@ -126,7 +126,7 @@ func newReference(path string, ref reference.NamedTagged, sourceIndex int,
ref: ref,
sourceIndex: sourceIndex,
archiveReader: archiveReader,
writer: writer,
archiveWriter: archiveWriter,
}, nil
}

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"io"
"os"
"sync"
"github.com/containers/image/v5/docker/internal/tarfile"
"github.com/containers/image/v5/docker/reference"
@@ -14,19 +13,47 @@ import (
// Writer manages a single in-progress Docker archive and allows adding images to it.
type Writer struct {
path string // The original, user-specified path; not the maintained temporary file, if any
regularFile bool // path refers to a regular file (e.g. not a pipe)
archive *tarfile.Writer
writer io.Closer
// The following state can only be acccessed with the mutex held.
mutex sync.Mutex
hadCommit bool // At least one successful commit has happened
path string // The original, user-specified path; not the maintained temporary file, if any
archive *tarfile.Writer
writer io.Closer
}
// NewWriter returns a Writer for path.
// The caller should call .Close() on the returned object.
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
fh, err := openArchiveForWriting(path)
if err != nil {
return nil, err
}
archive := tarfile.NewWriter(fh)
return &Writer{
path: path,
archive: archive,
writer: fh,
}, nil
}
// Close writes all outstanding data about images to the archive, and
// releases state associated with the Writer, if any.
// No more images can be added after this is called.
func (w *Writer) Close() error {
err := w.archive.Close()
if err2 := w.writer.Close(); err2 != nil && err == nil {
err = err2
}
return err
}
// NewReference returns an ImageReference that allows adding an image to Writer,
// with an optional reference.
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
return newReference(w.path, destinationRef, -1, nil, w.archive)
}
// openArchiveForWriting opens path for writing a tar archive,
// making a few sanity checks.
func openArchiveForWriting(path string) (*os.File, error) {
// path can be either a pipe or a regular file
// in the case of a pipe, we require that we can open it for write
// in the case of a regular file, we don't want to overwrite any pre-existing file
@@ -42,62 +69,15 @@ func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
fh.Close()
}
}()
fhStat, err := fh.Stat()
if err != nil {
return nil, fmt.Errorf("statting file %q: %w", path, err)
}
regularFile := fhStat.Mode().IsRegular()
if regularFile && fhStat.Size() != 0 {
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
return nil, errors.New("docker-archive doesn't support modifying existing images")
}
archive := tarfile.NewWriter(fh)
succeeded = true
return &Writer{
path: path,
regularFile: regularFile,
archive: archive,
writer: fh,
hadCommit: false,
}, nil
}
// imageCommitted notifies the Writer that at least one image was successfully commited to the stream.
func (w *Writer) imageCommitted() {
w.mutex.Lock()
defer w.mutex.Unlock()
w.hadCommit = true
}
// Close writes all outstanding data about images to the archive, and
// releases state associated with the Writer, if any.
// No more images can be added after this is called.
func (w *Writer) Close() error {
err := w.archive.Close()
if err2 := w.writer.Close(); err2 != nil && err == nil {
err = err2
}
if err == nil && w.regularFile && !w.hadCommit {
// Writing to the destination never had a success; delete the destination if we created it.
// This is done primarily because we dont implement adding another image to a pre-existing image, so if we
// left a partial archive around (notably because reading from the _source_ has failed), we couldnt retry without
// the caller manually deleting the partial archive. So, delete it instead.
//
// Archives with at least one successfully created image are left around; they might still be valuable.
//
// Note a corner case: If there _originally_ was an empty file (which is not a valid archive anyway), this deletes it.
// Ideally, if w.regularFile, we should write the full contents to a temporary file and use os.Rename here, only on success.
if err2 := os.Remove(w.path); err2 != nil {
err = err2
}
}
return err
}
// NewReference returns an ImageReference that allows adding an image to Writer,
// with an optional reference.
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
return newReference(w.path, destinationRef, -1, nil, w)
return fh, nil
}

View File

@@ -53,7 +53,7 @@ func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon)
// For daemonImageDestination, it must be a ref, which is NamedTagged.
// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest.
// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we dont bother.)
// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we dont bother.)
type daemonReference struct {
id digest.Digest
ref reference.Named // !reference.IsNameOnly

View File

@@ -1,6 +1,7 @@
package docker
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
@@ -27,6 +28,7 @@ import (
"github.com/containers/storage/pkg/homedir"
"github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2"
clientLib "github.com/docker/distribution/registry/client"
"github.com/docker/go-connections/tlsconfig"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
@@ -125,9 +127,8 @@ type dockerClient struct {
}
type authScope struct {
resourceType string
remoteName string
actions string
remoteName string
actions string
}
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
@@ -236,7 +237,6 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, regis
}
client.signatureBase = sigBase
client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref)
client.scope.resourceType = "repository"
client.scope.actions = actions
client.scope.remoteName = reference.Path(ref.ref)
return client, nil
@@ -475,33 +475,6 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
}
// Checks if the auth headers in the response contain an indication of a failed
// authorizdation because of an "insufficient_scope" error. If that's the case,
// returns the required scope to be used for fetching a new token.
func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) {
if err == nil && res.StatusCode == http.StatusUnauthorized {
challenges := parseAuthHeader(res.Header)
for _, challenge := range challenges {
if challenge.Scheme == "bearer" {
if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" {
if scope, ok := challenge.Parameters["scope"]; ok && scope != "" {
if newScope, err := parseAuthScope(scope); err == nil {
return true, newScope
} else {
logrus.WithFields(logrus.Fields{
"error": err,
"scope": scope,
"challenge": challenge,
}).Error("Failed to parse the authentication scope from the given challenge")
}
}
}
}
}
}
return false, nil
}
// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it,
// silently falling back to fallbackDelay if the header is missing or invalid.
func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration {
@@ -541,29 +514,6 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
for {
res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
attempts++
// By default we use pre-defined scopes per operation. In
// certain cases, this can fail when our authentication is
// insufficient, then we might be getting an error back with a
// Www-Authenticate Header indicating an insufficient scope.
//
// Check for that and update the client challenges to retry after
// requesting a new token
//
// We only try this on the first attempt, to not overload an
// already struggling server.
// We also cannot retry with a body (stream != nil) as stream
// was already read
if attempts == 1 && stream == nil && auth != noAuth {
if retry, newScope := needsRetryWithUpdatedScope(err, res); retry {
logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
// Note: This retry ignores extraScope. Thats, strictly speaking, incorrect, but we dont currently
// expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
// for more than one extra scope.
res, err = c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, newScope)
extraScope = newScope
}
}
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
stream != nil || // We can't retry with a body (which is not restartable in the general case)
attempts == backoffNumIterations {
@@ -643,18 +593,8 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
cacheKey := ""
scopes := []authScope{c.scope}
if extraScope != nil {
// Using ':' as a separator here is unambiguous because getBearerToken below
// uses the same separator when formatting a remote request (and because
// repository names that we create can't contain colons, and extraScope values
// coming from a server come from `parseAuthScope`, which also splits on colons).
cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions)
if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 {
return fmt.Errorf(
"Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d",
cacheKey,
colonCount,
)
}
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
scopes = append(scopes, *extraScope)
}
var token bearerToken
@@ -709,10 +649,9 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
if service, ok := challenge.Parameters["service"]; ok && service != "" {
params.Add("service", service)
}
for _, scope := range scopes {
if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
if scope.remoteName != "" && scope.actions != "" {
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
}
}
params.Add("grant_type", "refresh_token")
@@ -762,8 +701,8 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
}
for _, scope := range scopes {
if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
if scope.remoteName != "" && scope.actions != "" {
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
}
}
@@ -983,15 +922,25 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error.
func isManifestUnknownError(err error) bool {
var errs errcode.Errors
if !errors.As(err, &errs) || len(errs) == 0 {
return false
if errors.As(err, &errs) && len(errs) != 0 {
firstErr := errs[0]
// docker/distribution, and as defined in the spec
var ec errcode.ErrorCoder
if errors.As(firstErr, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown {
return true
}
// registry.redhat.io as of October 2022
var e errcode.Error
if errors.As(firstErr, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
return true
}
}
err = errs[0]
ec, ok := err.(errcode.ErrorCoder)
if !ok {
return false
// ALSO registry.redhat.io as of October 2022
var unexpected *clientLib.UnexpectedHTTPResponseError
if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) {
return true
}
return ec.ErrorCode() == v2.ErrorCodeManifestUnknown
return false
}
// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for
@@ -1039,7 +988,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
defer res.Body.Close()
if res.StatusCode != http.StatusOK {
return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), handleErrorResponse(res))
return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), clientLib.HandleErrorResponse(res))
}
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)

View File

@@ -358,9 +358,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
// Checking candidateRepo, and mounting from it, requires an
// expanded token scope.
extraScope := &authScope{
resourceType: "repository",
remoteName: reference.Path(candidateRepo),
actions: "pull",
remoteName: reference.Path(candidateRepo),
actions: "pull",
}
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
@@ -653,6 +652,7 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
Digest: "", // We will fill this in later.
Size: 0,
}, nil)
ociConfig.RootFS.Type = "layers"
} else {
logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String())
// We dont benefit from a real BlobInfoCache here because we never try to reuse/mount configs.

View File

@@ -4,6 +4,8 @@ import (
"errors"
"fmt"
"net/http"
"github.com/docker/distribution/registry/client"
)
var (
@@ -33,7 +35,7 @@ func httpResponseToError(res *http.Response, context string) error {
case http.StatusTooManyRequests:
return ErrTooManyRequests
case http.StatusUnauthorized:
err := handleErrorResponse(res)
err := client.HandleErrorResponse(res)
return ErrUnauthorizedForCredentials{Err: err}
default:
if context != "" {
@@ -46,13 +48,14 @@ func httpResponseToError(res *http.Response, context string) error {
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
// registry
func registryHTTPResponseToError(res *http.Response) error {
err := handleErrorResponse(res)
if e, ok := err.(*unexpectedHTTPResponseError); ok {
err := client.HandleErrorResponse(res)
if e, ok := err.(*client.UnexpectedHTTPResponseError); ok {
response := string(e.Response)
if len(response) > 50 {
response = response[:50] + "..."
}
err = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
// %.0w makes e visible to error.Unwrap() without including any text
err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e)
}
return err
}

View File

@@ -3,13 +3,13 @@
//
// Grammar
//
// reference := name [ ":" tag ] [ "@" digest ]
// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/

View File

@@ -3,7 +3,6 @@ package docker
// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
import (
"fmt"
"net/http"
"strings"
)
@@ -71,18 +70,6 @@ func parseAuthHeader(header http.Header) []challenge {
return challenges
}
// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions`
func parseAuthScope(scopeStr string) (*authScope, error) {
if parts := strings.Split(scopeStr, ":"); len(parts) == 3 {
return &authScope{
resourceType: parts[0],
remoteName: parts[1],
actions: parts[2],
}, nil
}
return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr)
}
// NOTE: This is not a fully compliant parser per RFC 7235:
// Most notably it does not support more than one challenge within a single header
// Some of the whitespace parsing also seems noncompliant.

View File

@@ -22,14 +22,13 @@ type Compat struct {
// for implementations of private.ImageDestination.
//
// Use it like this:
// type yourDestination struct {
// impl.Compat
// …
// }
// dest := &yourDestination{…}
// dest.Compat = impl.AddCompat(dest)
//
// type yourDestination struct {
// impl.Compat
// …
// }
//
// dest := &yourDestination{…}
// dest.Compat = impl.AddCompat(dest)
func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
return Compat{dest}
}

View File

@@ -3,25 +3,23 @@
// Compare with imagedestination/impl, which might require non-trivial implementation work.
//
// There are two kinds of stubs:
// - Pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
// implementation:
//
// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
// implementation:
// type yourDestination struct {
// stubs.ImplementsPutBlobPartial
// …
// }
// - Stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
// means that a constructor must be called:
// type yourDestination struct {
// stubs.NoPutBlobPartialInitialize
// …
// }
//
// type yourDestination struct {
// stubs.ImplementsPutBlobPartial
//
// }
// dest := &yourDestination{
//
// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
// }
//
// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
// means that a constructor must be called:
//
// type yourDestination struct {
// stubs.NoPutBlobPartialInitialize
// …
// }
//
// dest := &yourDestination{
// …
// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
// }
package stubs

View File

@@ -19,14 +19,13 @@ type Compat struct {
// for implementations of private.ImageSource.
//
// Use it like this:
// type yourSource struct {
// impl.Compat
// …
// }
// src := &yourSource{…}
// src.Compat = impl.AddCompat(src)
//
// type yourSource struct {
// impl.Compat
// …
// }
//
// src := &yourSource{…}
// src.Compat = impl.AddCompat(src)
func AddCompat(src private.ImageSourceInternalOnly) Compat {
return Compat{src}
}

View File

@@ -3,26 +3,23 @@
// Compare with imagesource/impl, which might require non-trivial implementation work.
//
// There are two kinds of stubs:
// - Pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
// implementation:
//
// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
// type yourSource struct {
// stubs.ImplementsGetBlobAt
// …
// }
// - Stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
// means that a constructor must be called:
// type yourSource struct {
// stubs.NoGetBlobAtInitialize
// …
// }
//
// implementation:
// dest := &yourSource{
// …
// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
// }
//
// type yourSource struct {
// stubs.ImplementsGetBlobAt
// …
// }
//
// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
// means that a constructor must be called:
// type yourSource struct {
// stubs.NoGetBlobAtInitialize
// …
// }
//
// dest := &yourSource{
// …
// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
// }
package stubs

View File

@@ -228,16 +228,3 @@ func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet,
variants := findCompressionMIMETypeSet(variantTable, mimeType)
return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
}
// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer {
layers := make([]types.ImageInspectLayer, len(infos))
for i, info := range infos {
layers[i].MIMEType = info.MediaType
layers[i].Digest = info.Digest
layers[i].Size = info.Size
layers[i].Annotations = info.Annotations
}
return layers
}

View File

@@ -221,16 +221,13 @@ func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageI
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil {
return nil, err
}
layerInfos := m.LayerInfos()
i := &types.ImageInspectInfo{
Tag: m.Tag,
Created: &s1.Created,
DockerVersion: s1.DockerVersion,
Architecture: s1.Architecture,
Os: s1.OS,
Layers: layerInfosToStrings(layerInfos),
LayersData: imgInspectLayersFromLayerInfos(layerInfos),
Author: s1.Author,
Layers: layerInfosToStrings(m.LayerInfos()),
}
if s1.Config != nil {
i.Labels = s1.Config.Labels

View File

@@ -271,7 +271,6 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
if err := json.Unmarshal(config, s2); err != nil {
return nil, err
}
layerInfos := m.LayerInfos()
i := &types.ImageInspectInfo{
Tag: "",
Created: &s2.Created,
@@ -279,9 +278,7 @@ func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*t
Architecture: s2.Architecture,
Variant: s2.Variant,
Os: s2.OS,
Layers: layerInfosToStrings(layerInfos),
LayersData: imgInspectLayersFromLayerInfos(layerInfos),
Author: s2.Author,
Layers: layerInfosToStrings(m.LayerInfos()),
}
if s2.Config != nil {
i.Labels = s2.Config.Labels

View File

@@ -212,7 +212,6 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
if err := json.Unmarshal(config, d1); err != nil {
return nil, err
}
layerInfos := m.LayerInfos()
i := &types.ImageInspectInfo{
Tag: "",
Created: v1.Created,
@@ -220,10 +219,8 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
Labels: v1.Config.Labels,
Architecture: v1.Architecture,
Os: v1.OS,
Layers: layerInfosToStrings(layerInfos),
LayersData: imgInspectLayersFromLayerInfos(layerInfos),
Layers: layerInfosToStrings(m.LayerInfos()),
Env: v1.Config.Env,
Author: v1.Author,
}
return i, nil
}

View File

@@ -332,7 +332,7 @@ var (
errEmptyCluster = errors.New("cluster has no server defined")
)
// helper for checking certificate/key/CA
//helper for checking certificate/key/CA
func validateFileIsReadable(name string) error {
answer, err := os.Open(name)
defer func() {
@@ -545,10 +545,8 @@ type clientConfigLoadingRules struct {
// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load
// Load starts by running the MigrationRules and then
// takes the loading rules and returns a Config object based on following rules.
//
// - if the ExplicitPath, return the unmerged explicit file
// - Otherwise, return a merged config based on the Precedence slice
//
// if the ExplicitPath, return the unmerged explicit file
// Otherwise, return a merged config based on the Precedence slice
// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored.
// Read errors or files with non-deserializable content produce errors.
// The first file to set a particular map key wins and map key's value is never changed.

View File

@@ -683,7 +683,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
// keys we prefer exact matches as well.
for _, key := range keys {
if val, exists := auths.AuthConfigs[key]; exists {
return decodeDockerAuth(path, key, val)
return decodeDockerAuth(val)
}
}
@@ -698,7 +698,7 @@ func findCredentialsInFile(key, registry, path string, legacyFormat bool) (types
registry = normalizeRegistry(registry)
for k, v := range auths.AuthConfigs {
if normalizeAuthFileKey(k, legacyFormat) == registry {
return decodeDockerAuth(path, k, v)
return decodeDockerAuth(v)
}
}
@@ -729,9 +729,9 @@ func authKeysForKey(key string) (res []string) {
return res
}
// decodeDockerAuth decodes the username and password from conf,
// which is entry key in path.
func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) {
// decodeDockerAuth decodes the username and password, which is
// encoded in base64.
func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) {
decoded, err := base64.StdEncoding.DecodeString(conf.Auth)
if err != nil {
return types.DockerAuthConfig{}, err
@@ -740,11 +740,6 @@ func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuth
parts := strings.SplitN(string(decoded), ":", 2)
if len(parts) != 2 {
// if it's invalid just skip, as docker does
if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, dont warn about those
logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Dont include the text of decoded, because that might put secrets into a log.
} else {
logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path)
}
return types.DockerAuthConfig{}, nil
}

View File

@@ -172,10 +172,10 @@ func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) Polic
// but it does not necessarily mean that the contents of the signature are
// consistent with local policy.
// For example:
// - Do not use a an existence of an accepted signature to determine whether to run
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
// - Do not use a an existence of an accepted signature to determine whether to run
// a container based on this image; use IsRunningImageAllowed instead.
// - Just because a signature is accepted does not automatically mean the contents of the
// signature are authorized to run code as root, or to affect system or cluster configuration.
func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(ctx context.Context, publicImage types.UnparsedImage) (sigs []*Signature, finalErr error) {
if err := pc.changeState(pcReady, pcInUse); err != nil {
return nil, err

View File

@@ -2,7 +2,6 @@
// tarballs and an optional template configuration.
//
// An example:
//
// package main
//
// import (

View File

@@ -177,25 +177,24 @@ type BICReplacementCandidate struct {
// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies.
//
// It records two kinds of data:
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
//
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression),
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
//
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
// compress/decompress blobs for their own purposes.
//
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
// compress/decompress blobs for their own purposes.
// - Known blob locations, managed by individual transports:
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
// recording transport-specific information that allows the transport to reuse the blob in the future;
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
//
// - Known blob locations, managed by individual transports:
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
// recording transport-specific information that allows the transport to reuse the blob in the future;
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
//
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
// can be directly reused within a registry, or mounted across registries within a registry server.)
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
// can be directly reused within a registry, or mounted across registries within a registry server.)
//
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
// users of the cache should just fall back to copying the blobs the usual way.
@@ -466,17 +465,7 @@ type ImageInspectInfo struct {
Variant string
Os string
Layers []string
LayersData []ImageInspectLayer
Env []string
Author string
}
// ImageInspectLayer is a set of metadata describing an image layers' detail
type ImageInspectLayer struct {
MIMEType string // "" if unknown.
Digest digest.Digest
Size int64 // -1 if unknown.
Annotations map[string]string
}
// DockerAuthConfig contains authorization information for connecting to a registry.

View File

@@ -6,9 +6,9 @@ const (
// VersionMajor is for an API incompatible changes
VersionMajor = 5
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 23
VersionMinor = 22
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""

View File

@@ -132,7 +132,7 @@ lint_task:
meta_task:
container:
image: "quay.io/libpod/imgts:latest"
image: "quay.io/libpod/imgts:${IMAGE_SUFFIX}"
cpu: 1
memory: 1

View File

@@ -1 +1 @@
1.43.0
1.42.0

View File

@@ -3,6 +3,7 @@ package storage
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sync"
@@ -143,26 +144,26 @@ func copyContainer(c *Container) *Container {
}
func (c *Container) MountLabel() string {
if label, ok := c.Flags[mountLabelFlag].(string); ok {
if label, ok := c.Flags["MountLabel"].(string); ok {
return label
}
return ""
}
func (c *Container) ProcessLabel() string {
if label, ok := c.Flags[processLabelFlag].(string); ok {
if label, ok := c.Flags["ProcessLabel"].(string); ok {
return label
}
return ""
}
func (c *Container) MountOpts() []string {
switch c.Flags[mountOptsFlag].(type) {
switch c.Flags["MountOpts"].(type) {
case []string:
return c.Flags[mountOptsFlag].([]string)
return c.Flags["MountOpts"].([]string)
case []interface{}:
var mountOpts []string
for _, v := range c.Flags[mountOptsFlag].([]interface{}) {
for _, v := range c.Flags["MountOpts"].([]interface{}) {
if flag, ok := v.(string); ok {
mountOpts = append(mountOpts, flag)
}
@@ -196,7 +197,7 @@ func (r *containerStore) datapath(id, key string) string {
func (r *containerStore) Load() error {
needSave := false
rpath := r.containerspath()
data, err := os.ReadFile(rpath)
data, err := ioutil.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -320,10 +321,10 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
return nil, ErrDuplicateID
}
if options.MountOpts != nil {
options.Flags[mountOptsFlag] = append([]string{}, options.MountOpts...)
options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
}
if options.Volatile {
options.Flags[volatileFlag] = true
options.Flags["Volatile"] = true
}
names = dedupeNames(names)
for _, name := range names {
@@ -483,7 +484,7 @@ func (r *containerStore) BigData(id, key string) ([]byte, error) {
if !ok {
return nil, ErrContainerUnknown
}
return os.ReadFile(r.datapath(c.ID, key))
return ioutil.ReadFile(r.datapath(c.ID, key))
}
func (r *containerStore) BigDataSize(id, key string) (int64, error) {

View File

@@ -29,6 +29,7 @@ import (
"fmt"
"io"
"io/fs"
"io/ioutil"
"os"
"os/exec"
"path"
@@ -46,7 +47,7 @@ import (
mountpk "github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
"github.com/vbatts/tar-split/tar/storage"
@@ -169,7 +170,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
for _, path := range []string{"mnt", "diff"} {
p := filepath.Join(home, path)
entries, err := os.ReadDir(p)
entries, err := ioutil.ReadDir(p)
if err != nil {
logger.WithError(err).WithField("dir", p).Error("error reading dir entries")
continue
@@ -199,7 +200,7 @@ func supportsAufs() error {
// proc/filesystems for when aufs is supported
exec.Command("modprobe", "aufs").Run()
if unshare.IsRootless() {
if userns.RunningInUserNS() {
return ErrAufsNested
}
@@ -729,14 +730,14 @@ func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.M
// version of aufs.
func useDirperm() bool {
enableDirpermLock.Do(func() {
base, err := os.MkdirTemp("", "storage-aufs-base")
base, err := ioutil.TempDir("", "storage-aufs-base")
if err != nil {
logrus.Errorf("Checking dirperm1: %v", err)
return
}
defer os.RemoveAll(base)
union, err := os.MkdirTemp("", "storage-aufs-union")
union, err := ioutil.TempDir("", "storage-aufs-union")
if err != nil {
logrus.Errorf("Checking dirperm1: %v", err)
return

View File

@@ -1,17 +1,17 @@
//go:build linux
// +build linux
package aufs
import (
"bufio"
"io/ioutil"
"os"
"path"
)
// Return all the directories
func loadIds(root string) ([]string, error) {
dirs, err := os.ReadDir(root)
dirs, err := ioutil.ReadDir(root)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,12 @@
// +build !linux
package aufs
import "errors"
// MsRemount declared to specify a non-linux system mount.
const MsRemount = 0
func mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
return errors.New("mount is not implemented on this platform")
}

View File

@@ -18,6 +18,7 @@ import "C"
import (
"fmt"
"io/fs"
"io/ioutil"
"math"
"os"
"path"
@@ -523,7 +524,7 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil {
return err
}
if err := os.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil {
return err
}
}
@@ -642,7 +643,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
return "", fmt.Errorf("%s: not a directory", dir)
}
if quota, err := os.ReadFile(d.quotasDirID(id)); err == nil {
if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
if err := d.enableQuota(); err != nil {
return "", err

View File

@@ -1,5 +1,4 @@
//go:build linux && btrfs_noversion && cgo
// +build linux,btrfs_noversion,cgo
// +build !linux btrfs_noversion !cgo
package btrfs

View File

@@ -2,7 +2,6 @@ package graphdriver
import (
"bytes"
"errors"
"fmt"
"os"
@@ -94,7 +93,7 @@ func ChownPathByMaps(path string, toContainer, toHost *idtools.IDMappings) error
return err
}
if len(output) > 0 {
return errors.New(string(output))
return fmt.Errorf(string(output))
}
return nil

View File

@@ -27,6 +27,7 @@ import (
"github.com/containers/storage/pkg/pools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runc/libcontainer/userns"
"golang.org/x/sys/unix"
)
@@ -206,7 +207,7 @@ func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
s.Close()
case mode&os.ModeDevice != 0:
if unshare.IsRootless() {
if userns.RunningInUserNS() {
// cannot create a device if running in user namespace
return nil
}

View File

@@ -8,6 +8,7 @@ import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -153,7 +154,7 @@ func readLVMConfig(root string) (directLVMConfig, error) {
var cfg directLVMConfig
p := filepath.Join(root, "setup-config.json")
b, err := os.ReadFile(p)
b, err := ioutil.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
return cfg, nil
@@ -165,10 +166,9 @@ func readLVMConfig(root string) (directLVMConfig, error) {
if len(b) == 0 {
return cfg, nil
}
if err := json.Unmarshal(b, &cfg); err != nil {
return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err)
}
return cfg, nil
err = json.Unmarshal(b, &cfg)
return cfg, fmt.Errorf("unmarshaling previous device setup config: %w", err)
}
func writeLVMConfig(root string, cfg directLVMConfig) error {
@@ -177,7 +177,7 @@ func writeLVMConfig(root string, cfg directLVMConfig) error {
if err != nil {
return fmt.Errorf("marshalling direct lvm config: %w", err)
}
if err := os.WriteFile(p, b, 0600); err != nil {
if err := ioutil.WriteFile(p, b, 0600); err != nil {
return fmt.Errorf("writing direct lvm config to file: %w", err)
}
return nil
@@ -241,7 +241,7 @@ func setupDirectLVM(cfg directLVMConfig) error {
}
profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent)
err = os.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600)
if err != nil {
return fmt.Errorf("writing storage thinp autoextend profile: %w", err)
}

View File

@@ -9,6 +9,7 @@ import (
"fmt"
"io"
"io/fs"
"io/ioutil"
"os"
"os/exec"
"path"
@@ -330,7 +331,7 @@ func (devices *DeviceSet) removeMetadata(info *devInfo) error {
// Given json data and file path, write it to disk
func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error {
tmpFile, err := os.CreateTemp(devices.metadataDir(), ".tmp")
tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp")
if err != nil {
return fmt.Errorf("devmapper: Error creating metadata file: %s", err)
}
@@ -629,7 +630,7 @@ func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
func (devices *DeviceSet) migrateOldMetaData() error {
// Migrate old metadata file
jsonData, err := os.ReadFile(devices.oldMetadataFile())
jsonData, err := ioutil.ReadFile(devices.oldMetadataFile())
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -954,7 +955,7 @@ func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInf
func (devices *DeviceSet) loadMetadata(hash string) *devInfo {
info := &devInfo{Hash: hash, devices: devices}
jsonData, err := os.ReadFile(devices.metadataFile(info))
jsonData, err := ioutil.ReadFile(devices.metadataFile(info))
if err != nil {
logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err)
return nil
@@ -1275,11 +1276,11 @@ func (devices *DeviceSet) setupBaseImage() error {
}
func setCloseOnExec(name string) {
fileEntries, _ := os.ReadDir("/proc/self/fd")
for _, e := range fileEntries {
link, _ := os.Readlink(filepath.Join("/proc/self/fd", e.Name()))
fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
for _, i := range fileInfos {
link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
if link == name {
fd, err := strconv.Atoi(e.Name())
fd, err := strconv.Atoi(i.Name())
if err == nil {
unix.CloseOnExec(fd)
}
@@ -1369,7 +1370,7 @@ func (devices *DeviceSet) ResizePool(size int64) error {
}
func (devices *DeviceSet) loadTransactionMetaData() error {
jsonData, err := os.ReadFile(devices.transactionMetaFile())
jsonData, err := ioutil.ReadFile(devices.transactionMetaFile())
if err != nil {
// There is no active transaction. This will be the case
// during upgrade.
@@ -1450,7 +1451,7 @@ func (devices *DeviceSet) processPendingTransaction() error {
}
func (devices *DeviceSet) loadDeviceSetMetaData() error {
jsonData, err := os.ReadFile(devices.deviceSetMetaFile())
jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile())
if err != nil {
// For backward compatibility return success if file does
// not exist.
@@ -2257,7 +2258,7 @@ func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error {
}
func (devices *DeviceSet) unmountAndDeactivateAll(dir string) {
files, err := os.ReadDir(dir)
files, err := ioutil.ReadDir(dir)
if err != nil {
logrus.Warnf("devmapper: unmountAndDeactivate: %s", err)
return

View File

@@ -1,10 +1,10 @@
//go:build linux && cgo
// +build linux,cgo
package devmapper
import (
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
@@ -227,7 +227,7 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) {
// Create an "id" file with the container/image id in it to help reconstruct this in case
// of later problems
if err := os.WriteFile(idFile, []byte(id), 0600); err != nil {
if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
d.ctr.Decrement(mp)
d.DeviceSet.UnmountDevice(id, mp)
return "", err

View File

@@ -1,6 +1,3 @@
//go:build linux && cgo
// +build linux,cgo
package devmapper
import jsoniter "github.com/json-iterator/go"

View File

@@ -48,7 +48,7 @@ type CreateOpts struct {
ignoreChownErrors bool
}
// MountOpts contains optional arguments for Driver.Get() methods.
// MountOpts contains optional arguments for LayerStope.Mount() methods.
type MountOpts struct {
// Mount label is the MAC Labels to assign to mount point (SELINUX)
MountLabel string

View File

@@ -7,7 +7,6 @@ import (
"path/filepath"
"github.com/containers/storage/pkg/mount"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
@@ -128,14 +127,9 @@ var (
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
var buf unix.Statfs_t
path := filepath.Dir(rootpath)
if err := unix.Statfs(path, &buf); err != nil {
if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil {
return 0, err
}
if _, ok := FsNames[FsMagic(buf.Type)]; !ok {
logrus.Debugf("Unknown filesystem type %#x reported for %s", buf.Type, path)
}
return FsMagic(buf.Type), nil
}

View File

@@ -10,7 +10,7 @@ import (
"github.com/containers/storage/pkg/chrootarchive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/sirupsen/logrus"
)
@@ -33,11 +33,10 @@ type NaiveDiffDriver struct {
// NewNaiveDiffDriver returns a fully functional driver that wraps the
// given ProtoDriver and adds the capability of the following methods which
// it may or may not support on its own:
//
// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
// Diff(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) (io.ReadCloser, error)
// Changes(id string, idMappings *idtools.IDMappings, parent string, parentMappings *idtools.IDMappings, mountLabel string) ([]archive.Change, error)
// ApplyDiff(id, parent string, options ApplyDiffOpts) (size int64, err error)
// DiffSize(id string, idMappings *idtools.IDMappings, parent, parentMappings *idtools.IDMappings, mountLabel string) (size int64, err error)
func NewNaiveDiffDriver(driver ProtoDriver, updater LayerIDMapUpdater) Driver {
return &NaiveDiffDriver{ProtoDriver: driver, LayerIDMapUpdater: updater}
}
@@ -110,7 +109,7 @@ func (gdw *NaiveDiffDriver) Diff(id string, idMappings *idtools.IDMappings, pare
// are extracted from tar's with full second precision on modified time.
// We need this hack here to make sure calls within same second receive
// correct result.
time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
return err
}), nil
}
@@ -180,7 +179,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts)
}
tarOptions := &archive.TarOptions{
InUserNS: unshare.IsRootless(),
InUserNS: userns.RunningInUserNS(),
IgnoreChownErrors: options.IgnoreChownErrors,
ForceMask: forceMask,
}

View File

@@ -6,6 +6,7 @@ package overlay
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -26,7 +27,7 @@ import (
// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
// When these exist naive diff should be used.
func doesSupportNativeDiff(d, mountOpts string) error {
td, err := os.MkdirTemp(d, "opaque-bug-check")
td, err := ioutil.TempDir(d, "opaque-bug-check")
if err != nil {
return err
}
@@ -81,7 +82,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
}()
// Touch file in d to force copy up of opaque directory "d" from "l2" to "l3"
if err := os.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil {
return fmt.Errorf("failed to write to merged directory: %w", err)
}
@@ -120,7 +121,7 @@ func doesSupportNativeDiff(d, mountOpts string) error {
// copying up a file from a lower layer unless/until its contents are being
// modified
func doesMetacopy(d, mountOpts string) (bool, error) {
td, err := os.MkdirTemp(d, "metacopy-check")
td, err := ioutil.TempDir(d, "metacopy-check")
if err != nil {
return false, err
}
@@ -157,7 +158,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
}
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
if errors.Is(err, unix.EINVAL) {
logrus.Infof("overlay: metacopy option not supported on this kernel, checked using options %q", mountOpts)
logrus.Info("metacopy option not supported on this kernel", mountOpts)
return false, nil
}
return false, fmt.Errorf("failed to mount overlay for metacopy check with %q options: %w", mountOpts, err)
@@ -185,7 +186,7 @@ func doesMetacopy(d, mountOpts string) (bool, error) {
// doesVolatile checks if the filesystem supports the "volatile" mount option
func doesVolatile(d string) (bool, error) {
td, err := os.MkdirTemp(d, "volatile-check")
td, err := ioutil.TempDir(d, "volatile-check")
if err != nil {
return false, err
}
@@ -223,7 +224,7 @@ func doesVolatile(d string) (bool, error) {
// supportsIdmappedLowerLayers checks if the kernel supports mounting overlay on top of
// a idmapped lower layer.
func supportsIdmappedLowerLayers(home string) (bool, error) {
layerDir, err := os.MkdirTemp(home, "compat")
layerDir, err := ioutil.TempDir(home, "compat")
if err != nil {
return false, err
}

View File

@@ -1,17 +1,14 @@
//go:build linux
// +build linux
// +build go1.16
package overlay
import (
"errors"
"io/fs"
"path/filepath"
"strings"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/system"
"golang.org/x/sys/unix"
)
func scanForMountProgramIndicators(home string) (detected bool, err error) {
@@ -29,7 +26,7 @@ func scanForMountProgramIndicators(home string) (detected bool, err error) {
}
if d.IsDir() {
xattrs, err := system.Llistxattr(path)
if err != nil && !errors.Is(err, unix.EOPNOTSUPP) {
if err != nil {
return err
}
for _, xattr := range xattrs {

View File

@@ -5,6 +5,7 @@ package overlay
import (
"fmt"
"io/ioutil"
"os"
"syscall"
"unsafe"
@@ -20,6 +21,17 @@ type attr struct {
userNs uint64
}
const (
// _MOUNT_ATTR_IDMAP - Idmap mount to @userns_fd in struct mount_attr
_MOUNT_ATTR_IDMAP = 0x00100000 //nolint:golint
// _OPEN_TREE_CLONE - Clone the source path mount
_OPEN_TREE_CLONE = 0x00000001 //nolint:golint
// _MOVE_MOUNT_F_EMPTY_PATH - Move the path referenced by the fd
_MOVE_MOUNT_F_EMPTY_PATH = 0x00000004 //nolint:golint
)
// openTree is a wrapper for the open_tree syscall
func openTree(path string, flags int) (fd int, err error) {
var _p0 *byte
@@ -49,7 +61,7 @@ func moveMount(fdTree int, target string) (err error) {
return err
}
flags := unix.MOVE_MOUNT_F_EMPTY_PATH
flags := _MOVE_MOUNT_F_EMPTY_PATH
_, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT),
uintptr(fdTree), uintptr(unsafe.Pointer(_p1)),
@@ -86,14 +98,14 @@ func createIDMappedMount(source, target string, pid int) error {
}
var attr attr
attr.attrSet = unix.MOUNT_ATTR_IDMAP
attr.attrSet = _MOUNT_ATTR_IDMAP
attr.attrClr = 0
attr.propagation = 0
attr.userNs = uint64(userNsFile.Fd())
defer userNsFile.Close()
targetDirFd, err := openTree(source, unix.OPEN_TREE_CLONE)
targetDirFd, err := openTree(source, _OPEN_TREE_CLONE|unix.AT_RECURSIVE)
if err != nil {
return err
}
@@ -132,7 +144,7 @@ func createUsernsProcess(uidMaps []idtools.IDMap, gidMaps []idtools.IDMap) (int,
for _, m := range idmap {
mappings = mappings + fmt.Sprintf("%d %d %d\n", m.ContainerID, m.HostID, m.Size)
}
return os.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
return ioutil.WriteFile(fmt.Sprintf("/proc/%d/%s", pid, fname), []byte(mappings), 0600)
}
if err := writeMappings("uid_map", uidMaps); err != nil {
cleanupFunc()

View File

@@ -1,6 +1,3 @@
//go:build linux
// +build linux
package overlay
import jsoniter "github.com/json-iterator/go"

View File

@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
@@ -33,6 +34,7 @@ import (
units "github.com/docker/go-units"
"github.com/hashicorp/go-multierror"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/opencontainers/selinux/go-selinux"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
@@ -345,7 +347,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
logrus.Warnf("Network file system detected as backing store. Enforcing overlay option `force_mask=\"%o\"`. Add it to storage.conf to silence this warning", m)
}
if err := os.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte("true"), 0600); err != nil {
return nil, err
}
} else {
@@ -578,11 +580,11 @@ func cachedFeatureSet(feature string, set bool) string {
}
func cachedFeatureCheck(runhome, feature string) (supported bool, text string, err error) {
content, err := os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
content, err := ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, true)))
if err == nil {
return true, string(content), nil
}
content, err = os.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
content, err = ioutil.ReadFile(filepath.Join(runhome, cachedFeatureSet(feature, false)))
if err == nil {
return false, string(content), nil
}
@@ -606,7 +608,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
}
var contents string
flagContent, err := os.ReadFile(getMountProgramFlagFile(home))
flagContent, err := ioutil.ReadFile(getMountProgramFlagFile(home))
if err == nil {
contents = strings.TrimSpace(string(flagContent))
}
@@ -619,7 +621,7 @@ func SupportsNativeOverlay(home, runhome string) (bool, error) {
if err != nil && !os.IsNotExist(err) {
return false, err
}
if err := os.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
if err := ioutil.WriteFile(getMountProgramFlagFile(home), []byte(fmt.Sprintf("%t", needsMountProgram)), 0600); err != nil && !os.IsNotExist(err) {
return false, err
}
if needsMountProgram {
@@ -655,7 +657,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
logLevel = logrus.DebugLevel
}
layerDir, err := os.MkdirTemp(home, "compat")
layerDir, err := ioutil.TempDir(home, "compat")
if err != nil {
patherr, ok := err.(*os.PathError)
if ok && patherr.Err == syscall.ENOSPC {
@@ -714,7 +716,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
logrus.Debugf("overlay: test mount with multiple lowers succeeded")
return supportsDType, nil
}
logrus.Debugf("overlay: test mount with multiple lowers failed: %v", err)
logrus.Debugf("overlay: test mount with multiple lowers failed %v", err)
}
flags = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lower1Dir, upperDir, workDir)
if selinux.GetEnabled() {
@@ -726,7 +728,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
logrus.StandardLogger().Logf(logLevel, "overlay: test mount with multiple lowers failed, but succeeded with a single lower")
return supportsDType, fmt.Errorf("kernel too old to provide multiple lowers feature for overlay: %w", graphdriver.ErrNotSupported)
}
logrus.Debugf("overlay: test mount with a single lower failed: %v", err)
logrus.Debugf("overlay: test mount with a single lower failed %v", err)
}
logrus.StandardLogger().Logf(logLevel, "'overlay' is not supported over %s at %q", backingFs, home)
return supportsDType, fmt.Errorf("'overlay' is not supported over %s at %q: %w", backingFs, home, graphdriver.ErrIncompatibleFS)
@@ -1007,7 +1009,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
}
// Write link id to link file
if err := os.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil {
return err
}
@@ -1028,7 +1030,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
return err
}
if lower != "" {
if err := os.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil {
return err
}
}
@@ -1071,7 +1073,7 @@ func (d *Driver) getLower(parent string) (string, error) {
}
// Read Parent link fileA
parentLink, err := os.ReadFile(path.Join(parentDir, "link"))
parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link"))
if err != nil {
if !os.IsNotExist(err) {
return "", err
@@ -1080,14 +1082,14 @@ func (d *Driver) getLower(parent string) (string, error) {
if err := d.recreateSymlinks(); err != nil {
return "", fmt.Errorf("recreating the links: %w", err)
}
parentLink, err = os.ReadFile(path.Join(parentDir, "link"))
parentLink, err = ioutil.ReadFile(path.Join(parentDir, "link"))
if err != nil {
return "", err
}
}
lowers := []string{path.Join(linkDir, string(parentLink))}
parentLower, err := os.ReadFile(path.Join(parentDir, lowerFile))
parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile))
if err == nil {
parentLowers := strings.Split(string(parentLower), ":")
lowers = append(lowers, parentLowers...)
@@ -1116,7 +1118,7 @@ func (d *Driver) dir2(id string) (string, bool) {
func (d *Driver) getLowerDirs(id string) ([]string, error) {
var lowersArray []string
lowers, err := os.ReadFile(path.Join(d.dir(id), lowerFile))
lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile))
if err == nil {
for _, s := range strings.Split(string(lowers), ":") {
lower := d.dir(s)
@@ -1185,7 +1187,7 @@ func (d *Driver) optsAppendMappings(opts string, uidMaps, gidMaps []idtools.IDMa
// Remove cleans the directories that are created for this id.
func (d *Driver) Remove(id string) error {
dir := d.dir(id)
lid, err := os.ReadFile(path.Join(dir, "link"))
lid, err := ioutil.ReadFile(path.Join(dir, "link"))
if err == nil {
if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil {
logrus.Debugf("Failed to remove link: %v", err)
@@ -1208,7 +1210,7 @@ func (d *Driver) recreateSymlinks() error {
const maxIterations = 10
// List all the directories under the home directory
dirs, err := os.ReadDir(d.home)
dirs, err := ioutil.ReadDir(d.home)
if err != nil {
return fmt.Errorf("reading driver home directory %q: %w", d.home, err)
}
@@ -1227,11 +1229,11 @@ func (d *Driver) recreateSymlinks() error {
// the layer's "link" file that points to the layer's "diff" directory.
for _, dir := range dirs {
// Skip over the linkDir and anything that is not a directory
if dir.Name() == linkDir || !dir.IsDir() {
if dir.Name() == linkDir || !dir.Mode().IsDir() {
continue
}
// Read the "link" file under each layer to get the name of the symlink
data, err := os.ReadFile(path.Join(d.dir(dir.Name()), "link"))
data, err := ioutil.ReadFile(path.Join(d.dir(dir.Name()), "link"))
if err != nil {
errs = multierror.Append(errs, fmt.Errorf("reading name of symlink for %q: %w", dir.Name(), err))
continue
@@ -1256,7 +1258,7 @@ func (d *Driver) recreateSymlinks() error {
linkDirFullPath := filepath.Join(d.home, "l")
// Now check if we somehow lost a "link" file, by making sure
// that each symlink we have corresponds to one.
links, err := os.ReadDir(linkDirFullPath)
links, err := ioutil.ReadDir(linkDirFullPath)
if err != nil {
errs = multierror.Append(errs, err)
continue
@@ -1286,11 +1288,11 @@ func (d *Driver) recreateSymlinks() error {
// it has the basename of our symlink in it.
targetID := targetComponents[1]
linkFile := filepath.Join(d.dir(targetID), "link")
data, err := os.ReadFile(linkFile)
data, err := ioutil.ReadFile(linkFile)
if err != nil || string(data) != link.Name() {
// NOTE: If two or more links point to the same target, we will update linkFile
// with every value of link.Name(), and set madeProgress = true every time.
if err := os.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
if err := ioutil.WriteFile(linkFile, []byte(link.Name()), 0644); err != nil {
errs = multierror.Append(errs, fmt.Errorf("correcting link for layer %s: %w", targetID, err))
continue
}
@@ -1310,7 +1312,7 @@ func (d *Driver) recreateSymlinks() error {
}
// Get creates and mounts the required file system for the given id and returns the mount path.
func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
return d.get(id, false, options)
}
@@ -1344,16 +1346,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
if !d.usingMetacopy {
if hasMetacopyOption(optsList) {
if d.options.mountProgram == "" {
release := ""
var uts unix.Utsname
if err := unix.Uname(&uts); err == nil {
release = " " + string(uts.Release[:]) + " " + string(uts.Version[:])
}
logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel"+release)
} else {
logrus.Debugf("Ignoring global metacopy option, the mount program doesn't support it")
}
logrus.StandardLogger().Logf(logLevel, "Ignoring global metacopy option, not supported with booted kernel")
}
optsList = stripOption(optsList, "metacopy=on")
}
@@ -1365,7 +1358,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
}
}
lowers, err := os.ReadFile(path.Join(dir, lowerFile))
lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile))
if err != nil && !os.IsNotExist(err) {
return "", err
}
@@ -1381,7 +1374,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
// Check if $link/../diff{1-*} exist. If they do, add them, in order, as the front of the lowers
// lists that we're building. "diff" itself is the upper, so it won't be in the lists.
link, err := os.ReadFile(path.Join(dir, "link"))
link, err := ioutil.ReadFile(path.Join(dir, "link"))
if err != nil {
if !os.IsNotExist(err) {
return "", err
@@ -1390,7 +1383,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
if err := d.recreateSymlinks(); err != nil {
return "", fmt.Errorf("recreating the links: %w", err)
}
link, err = os.ReadFile(path.Join(dir, "link"))
link, err = ioutil.ReadFile(path.Join(dir, "link"))
if err != nil {
return "", err
}
@@ -1615,7 +1608,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
diffDir := path.Join(id, "diff")
opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", strings.Join(relLowers, ":"), diffDir, workdir)
} else {
opts = fmt.Sprintf("lowerdir=%s:%s", diffDir, strings.Join(relLowers, ":"))
opts = fmt.Sprintf("lowerdir=%s", strings.Join(relLowers, ":"))
}
if len(optsList) > 0 {
opts = fmt.Sprintf("%s,%s", opts, strings.Join(optsList, ","))
@@ -1656,7 +1649,7 @@ func (d *Driver) Put(id string) error {
if count := d.ctr.Decrement(mountpoint); count > 0 {
return nil
}
if _, err := os.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) {
return err
}
@@ -1665,7 +1658,7 @@ func (d *Driver) Put(id string) error {
mappedRoot := filepath.Join(d.home, id, "mapped")
// It should not happen, but cleanup any mapped mount if it was leaked.
if _, err := os.Stat(mappedRoot); err == nil {
mounts, err := os.ReadDir(mappedRoot)
mounts, err := ioutil.ReadDir(mappedRoot)
if err == nil {
// Go through all of the mapped mounts.
for _, m := range mounts {
@@ -1813,7 +1806,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
if err != nil && !os.IsExist(err) {
return graphdriver.DriverWithDifferOutput{}, err
}
applyDir, err = os.MkdirTemp(d.getStagingDir(), "")
applyDir, err = ioutil.TempDir(d.getStagingDir(), "")
if err != nil {
return graphdriver.DriverWithDifferOutput{}, err
}
@@ -1833,7 +1826,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
GIDMaps: idMappings.GIDs(),
IgnoreChownErrors: d.options.ignoreChownErrors,
WhiteoutFormat: d.getWhiteoutFormat(),
InUserNS: unshare.IsRootless(),
InUserNS: userns.RunningInUserNS(),
})
out.Target = applyDir
return out, err
@@ -1891,7 +1884,7 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts)
IgnoreChownErrors: d.options.ignoreChownErrors,
ForceMask: d.options.forceMask,
WhiteoutFormat: d.getWhiteoutFormat(),
InUserNS: unshare.IsRootless(),
InUserNS: userns.RunningInUserNS(),
}); err != nil {
return 0, err
}
@@ -2174,7 +2167,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
}
// tell the additional layer store that we use this layer.
// mark this layer as "additional layer"
if err := os.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
if err := ioutil.WriteFile(path.Join(dir, "additionallayer"), []byte(al.path), 0644); err != nil {
return err
}
notifyUseAdditionalLayer(al.path)
@@ -2182,7 +2175,7 @@ func (al *additionalLayer) CreateAs(id, parent string) error {
}
func (d *Driver) getAdditionalLayerPathByID(id string) (string, error) {
al, err := os.ReadFile(path.Join(d.dir(id), "additionallayer"))
al, err := ioutil.ReadFile(path.Join(d.dir(id), "additionallayer"))
if err != nil {
return "", err
}

View File

@@ -52,6 +52,7 @@ struct fsxattr {
import "C"
import (
"fmt"
"io/ioutil"
"math"
"os"
"path"
@@ -122,9 +123,11 @@ func generateUniqueProjectID(path string) (uint32, error) {
// This is a way to prevent xfs_quota management from conflicting with
// containers/storage.
//
// Then try to create a test directory with the next project id and set a quota
// on it. If that works, continue to scan existing containers to map allocated
// project ids.
//
func NewControl(basePath string) (*Control, error) {
//
// Get project id of parent dir as minimal id to be used by driver
@@ -333,7 +336,7 @@ func setProjectID(targetPath string, projectID uint32) error {
// findNextProjectID - find the next project id to be used for containers
// by scanning driver home directory to find used project ids
func (q *Control) findNextProjectID(home string) error {
files, err := os.ReadDir(home)
files, err := ioutil.ReadDir(home)
if err != nil {
return fmt.Errorf("read directory failed : %s", home)
}

View File

@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -23,7 +24,7 @@ import (
"github.com/Microsoft/go-winio"
"github.com/Microsoft/go-winio/backuptar"
"github.com/Microsoft/hcsshim"
graphdriver "github.com/containers/storage/drivers"
"github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/idtools"
@@ -474,7 +475,7 @@ func (d *Driver) Put(id string) error {
// We use this opportunity to cleanup any -removing folders which may be
// still left if the daemon was killed while it was removing a layer.
func (d *Driver) Cleanup() error {
items, err := os.ReadDir(d.info.HomeDir)
items, err := ioutil.ReadDir(d.info.HomeDir)
if err != nil {
if os.IsNotExist(err) {
return nil
@@ -869,7 +870,7 @@ func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ..
// resolveID computes the layerID information based on the given id.
func (d *Driver) resolveID(id string) (string, error) {
content, err := os.ReadFile(filepath.Join(d.dir(id), "layerID"))
content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID"))
if os.IsNotExist(err) {
return id, nil
} else if err != nil {
@@ -880,13 +881,13 @@ func (d *Driver) resolveID(id string) (string, error) {
// setID stores the layerId in disk.
func (d *Driver) setID(id, altID string) error {
return os.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600)
}
// getLayerChain returns the layer chain information.
func (d *Driver) getLayerChain(id string) ([]string, error) {
jPath := filepath.Join(d.dir(id), "layerchain.json")
content, err := os.ReadFile(jPath)
content, err := ioutil.ReadFile(jPath)
if os.IsNotExist(err) {
return nil, nil
} else if err != nil {
@@ -910,7 +911,7 @@ func (d *Driver) setLayerChain(id string, chain []string) error {
}
jPath := filepath.Join(d.dir(id), "layerchain.json")
err = os.WriteFile(jPath, content, 0600)
err = ioutil.WriteFile(jPath, content, 0600)
if err != nil {
return fmt.Errorf("unable to write layerchain file - %s", err)
}

View File

@@ -18,7 +18,7 @@ import (
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/mount"
"github.com/containers/storage/pkg/parsers"
zfs "github.com/mistifyio/go-zfs/v3"
"github.com/mistifyio/go-zfs"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"

View File

@@ -3,6 +3,7 @@ package storage
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -241,8 +242,8 @@ func (i *Image) recomputeDigests() error {
if !bigDataNameIsManifest(name) {
continue
}
if err := digest.Validate(); err != nil {
return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, err)
if digest.Validate() != nil {
return fmt.Errorf("validating digest %q for big data item %q: %w", string(digest), name, digest.Validate())
}
// Deduplicate the digest values.
if _, known := digests[digest]; !known {
@@ -260,7 +261,7 @@ func (i *Image) recomputeDigests() error {
func (r *imageStore) Load() error {
shouldSave := false
rpath := r.imagespath()
data, err := os.ReadFile(rpath)
data, err := ioutil.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -635,7 +636,7 @@ func (r *imageStore) BigData(id, key string) ([]byte, error) {
if !ok {
return nil, fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
}
return os.ReadFile(r.datapath(image.ID, key))
return ioutil.ReadFile(r.datapath(image.ID, key))
}
func (r *imageStore) BigDataSize(id, key string) (int64, error) {

View File

@@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@@ -343,15 +344,7 @@ func (r *layerStore) layerspath() string {
func (r *layerStore) Load() error {
shouldSave := false
rpath := r.layerspath()
info, err := os.Stat(rpath)
if err != nil {
if !os.IsNotExist(err) {
return err
}
} else {
r.layerspathModified = info.ModTime()
}
data, err := os.ReadFile(rpath)
data, err := ioutil.ReadFile(rpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -442,7 +435,7 @@ func (r *layerStore) LoadLocked() error {
func (r *layerStore) loadMounts() error {
mounts := make(map[string]*Layer)
mpath := r.mountspath()
data, err := os.ReadFile(mpath)
data, err := ioutil.ReadFile(mpath)
if err != nil && !os.IsNotExist(err) {
return err
}
@@ -563,8 +556,6 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
uidMap: copyIDMap(s.uidMap),
gidMap: copyIDMap(s.gidMap),
}
rlstore.Lock()
defer rlstore.Unlock()
if err := rlstore.Load(); err != nil {
return nil, err
}
@@ -586,8 +577,6 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
bymount: make(map[string]*Layer),
byname: make(map[string]*Layer),
}
rlstore.RLock()
defer rlstore.Unlock()
if err := rlstore.Load(); err != nil {
return nil, err
}
@@ -757,7 +746,7 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
templateCompressionType = templateLayer.CompressionType
templateUIDs, templateGIDs = append([]uint32{}, templateLayer.UIDs...), append([]uint32{}, templateLayer.GIDs...)
templateTSdata, tserr = os.ReadFile(r.tspath(templateLayer.ID))
templateTSdata, tserr = ioutil.ReadFile(r.tspath(templateLayer.ID))
if tserr != nil && !os.IsNotExist(tserr) {
return nil, -1, tserr
}
@@ -1392,9 +1381,6 @@ func (r *layerStore) Wipe() error {
for id := range r.byid {
ids = append(ids, id)
}
sort.Slice(ids, func(i, j int) bool {
return r.byid[ids[i]].Created.After(r.byid[ids[j]].Created)
})
for _, id := range ids {
if err := r.Delete(id); err != nil {
return err
@@ -1674,7 +1660,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
if compressedDigester != nil {
compressedWriter = compressedDigester.Hash()
} else {
compressedWriter = io.Discard
compressedWriter = ioutil.Discard
}
compressedCounter := ioutils.NewWriteCounter(compressedWriter)
defragmented = io.TeeReader(defragmented, compressedCounter)
@@ -1938,6 +1924,7 @@ func (r *layerStore) Modified() (bool, error) {
}
if info != nil {
tmodified = info.ModTime() != r.layerspathModified
r.layerspathModified = info.ModTime()
}
return tmodified, nil

View File

@@ -9,6 +9,7 @@ import (
"fmt"
"io"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -24,6 +25,7 @@ import (
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
gzip "github.com/klauspost/pgzip"
"github.com/opencontainers/runc/libcontainer/userns"
"github.com/sirupsen/logrus"
"github.com/ulikunitz/xz"
)
@@ -75,7 +77,6 @@ const (
solaris = "solaris"
windows = "windows"
darwin = "darwin"
freebsd = "freebsd"
)
var xattrsToIgnore = map[string]interface{}{
@@ -483,7 +484,7 @@ func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *
}
// canonicalTarName provides a platform-independent and consistent posix-style
// path for files and directories to be archived regardless of the platform.
//path for files and directories to be archived regardless of the platform.
func canonicalTarName(name string, isDir bool) (string, error) {
name, err := CanonicalTarNameForPath(name)
if err != nil {
@@ -672,7 +673,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
if !strings.HasPrefix(targetPath, extractDir) {
return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname))
}
if err := handleLLink(targetPath, path); err != nil {
if err := os.Link(targetPath, path); err != nil {
return err
}
@@ -1106,9 +1107,7 @@ loop:
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
//
// identity (uncompressed), gzip, bzip2, xz.
//
// identity (uncompressed), gzip, bzip2, xz.
// FIXME: specify behavior when target path exists vs. doesn't exist.
func Untar(tarArchive io.Reader, dest string, options *TarOptions) error {
return untarHandler(tarArchive, dest, options, true)
@@ -1160,7 +1159,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
GIDMaps: tarMappings.GIDs(),
Compression: Uncompressed,
CopyPass: true,
InUserNS: unshare.IsRootless(),
InUserNS: userns.RunningInUserNS(),
}
archive, err := TarWithOptions(src, options)
if err != nil {
@@ -1175,7 +1174,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
InUserNS: unshare.IsRootless(),
InUserNS: userns.RunningInUserNS(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1195,7 +1194,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
UIDMaps: untarMappings.UIDs(),
GIDMaps: untarMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
InUserNS: unshare.IsRootless(),
InUserNS: userns.RunningInUserNS(),
}
return archiver.Untar(archive, dst, options)
}
@@ -1295,7 +1294,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
UIDMaps: archiver.UntarIDMappings.UIDs(),
GIDMaps: archiver.UntarIDMappings.GIDs(),
ChownOpts: archiver.ChownOpts,
InUserNS: unshare.IsRootless(),
InUserNS: userns.RunningInUserNS(),
NoOverwriteDirNonDir: true,
}
err = archiver.Untar(r, filepath.Dir(dst), options)
@@ -1349,7 +1348,7 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id
// of that file as an archive. The archive can only be read once - as soon as reading completes,
// the file will be deleted.
func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) {
f, err := os.CreateTemp(dir, "")
f, err := ioutil.TempFile(dir, "")
if err != nil {
return nil, err
}
@@ -1473,7 +1472,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
}
hashWorker.Wait()
if err == nil && hashError != nil {
if err == nil {
err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
}
return err

View File

@@ -1,4 +1,3 @@
//go:build freebsd
// +build freebsd
package archive
@@ -9,11 +8,10 @@ import (
"os"
"path/filepath"
"syscall"
"unsafe"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/system"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runc/libcontainer/userns"
"golang.org/x/sys/unix"
)
@@ -89,7 +87,7 @@ func minor(device uint64) uint64 {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
if unshare.IsRootless() {
if userns.RunningInUserNS() {
// cannot create a device if running in user namespace
return nil
}
@@ -112,18 +110,16 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *
if forceMask != nil {
permissionsMask = *forceMask
}
p, err := unix.BytePtrFromString(path)
if err != nil {
return err
}
_, _, e1 := unix.Syscall(unix.SYS_LCHMOD, uintptr(unsafe.Pointer(p)), uintptr(permissionsMask), 0)
if e1 != 0 {
return e1
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, permissionsMask); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, permissionsMask); err != nil {
return err
}
}
return nil
}
// Hardlink without following symlinks
func handleLLink(targetPath string, path string) error {
return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)
}

View File

@@ -1,4 +1,3 @@
//go:build !windows && !freebsd
// +build !windows,!freebsd
package archive
@@ -98,7 +97,7 @@ func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
mode |= unix.S_IFIFO
}
return system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor))
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *os.FileMode) error {
@@ -119,13 +118,3 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo, forceMask *
}
return nil
}
// Hardlink without symlinks
func handleLLink(targetPath, path string) error {
// Note: on Linux, the link syscall will not follow symlinks.
// This behavior is implementation-dependent since
// POSIX.1-2008 so to make it clear that we need non-symlink
// following here we use the linkat syscall which has a flags
// field to select symlink following or not.
return unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)
}

View File

@@ -78,8 +78,3 @@ func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
// no notion of file ownership mapping yet on Windows
return idtools.IDPair{0, 0}, nil
}
// Hardlink without following symlinks
func handleLLink(targetPath string, path string) error {
return os.Link(targetPath, path)
}

View File

@@ -5,6 +5,7 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -402,7 +403,7 @@ func ChangesDirs(newDir string, newMappings *idtools.IDMappings, oldDir string,
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
emptyDir, err := os.MkdirTemp("", "empty")
emptyDir, err := ioutil.TempDir("", "empty")
if err != nil {
return nil, err
}

View File

@@ -4,6 +4,7 @@ import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@@ -254,7 +255,7 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, io.NopCloser(srcContent), nil
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"io"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -101,7 +102,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = os.MkdirTemp("", "storageplnk"); err != nil {
if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)

View File

@@ -4,13 +4,14 @@ import (
stdtar "archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sync"
"github.com/containers/storage/pkg/archive"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runc/libcontainer/userns"
)
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
@@ -30,8 +31,7 @@ func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
// and unpacks it into the directory at `dest`.
// The archive may be compressed with one of the following algorithms:
//
// identity (uncompressed), gzip, bzip2, xz.
// identity (uncompressed), gzip, bzip2, xz.
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
return untarHandler(tarArchive, dest, options, true, dest)
}
@@ -66,7 +66,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
}
if options == nil {
options = &archive.TarOptions{}
options.InUserNS = unshare.IsRootless()
options.InUserNS = userns.RunningInUserNS()
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
@@ -82,7 +82,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
}
}
r := io.NopCloser(tarArchive)
r := ioutil.NopCloser(tarArchive)
if decompress {
decompressedArchive, err := archive.DecompressStream(tarArchive)
if err != nil {
@@ -136,7 +136,7 @@ func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap
err = fmt.Errorf("extracting data to %q while copying: %w", dest, err)
}
hashWorker.Wait()
if err == nil && hashError != nil {
if err == nil {
err = fmt.Errorf("calculating digest of data for %q while copying: %w", dest, hashError)
}
return err

Some files were not shown because too many files have changed in this diff Show More