mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 22:08:44 +00:00
Compare commits
68 Commits
v1.9.0
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
929763f010 | ||
|
|
9f83d9cabe | ||
|
|
5eace4078f | ||
|
|
ee60474d5a | ||
|
|
ff2a361a0a | ||
|
|
7ebff0f533 | ||
|
|
787e10873c | ||
|
|
a2f29acc7d | ||
|
|
ee84302b60 | ||
|
|
a169ccf8f3 | ||
|
|
89ae387d7b | ||
|
|
66fe7af769 | ||
|
|
feabfac2a7 | ||
|
|
cf354b7abd | ||
|
|
18a95f947e | ||
|
|
07da29fd37 | ||
|
|
9b40f0be2f | ||
|
|
869d496f18 | ||
|
|
166b587a77 | ||
|
|
0a42c33af9 | ||
|
|
90c5033886 | ||
|
|
d3ff6e2635 | ||
|
|
06cf25fb53 | ||
|
|
3a05dca94e | ||
|
|
7bbaffc4f4 | ||
|
|
2b948c177a | ||
|
|
d9dfc44888 | ||
|
|
ba23a9162f | ||
|
|
1eada90813 | ||
|
|
4b9ffac0cc | ||
|
|
a81460437a | ||
|
|
f36752a279 | ||
|
|
4e2dee4362 | ||
|
|
d58e59a57d | ||
|
|
3450c11a0d | ||
|
|
b2e7139331 | ||
|
|
3a808c2ed5 | ||
|
|
04169cac6e | ||
|
|
a99bd0c9e3 | ||
|
|
97c3eabacf | ||
|
|
fa2b15ff76 | ||
|
|
9e79da5e33 | ||
|
|
97cb423b52 | ||
|
|
32f24e8870 | ||
|
|
a863a0dccb | ||
|
|
67a4e04471 | ||
|
|
14b05e8064 | ||
|
|
e95123a2d4 | ||
|
|
ca1b0f34d1 | ||
|
|
28a5365945 | ||
|
|
73a668e99d | ||
|
|
61c28f5d47 | ||
|
|
eafd7e5518 | ||
|
|
2cfbeb2db8 | ||
|
|
b9cf626ea3 | ||
|
|
263d3264ba | ||
|
|
63dabfcf8b | ||
|
|
2eac0f463a | ||
|
|
c10b63dc71 | ||
|
|
b7e7374e71 | ||
|
|
08846d18cc | ||
|
|
049163fcec | ||
|
|
3039cd5a77 | ||
|
|
b42e664854 | ||
|
|
ad12a292a3 | ||
|
|
ee477d8877 | ||
|
|
dbe47d765a | ||
|
|
f1485781be |
15
.cirrus.yml
15
.cirrus.yml
@@ -24,19 +24,13 @@ env:
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
####
|
||||
FEDORA_NAME: "fedora-36"
|
||||
PRIOR_FEDORA_NAME: "fedora-35"
|
||||
UBUNTU_NAME: "ubuntu-2204"
|
||||
|
||||
# Google-cloud VM Images
|
||||
IMAGE_SUFFIX: "c6340043416535040"
|
||||
IMAGE_SUFFIX: "c5495735033528320"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}"
|
||||
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}"
|
||||
|
||||
# Container FQIN's
|
||||
FEDORA_CONTAINER_FQIN: "quay.io/libpod/fedora_podman:${IMAGE_SUFFIX}"
|
||||
PRIOR_FEDORA_CONTAINER_FQIN: "quay.io/libpod/prior-fedora_podman:${IMAGE_SUFFIX}"
|
||||
UBUNTU_CONTAINER_FQIN: "quay.io/libpod/ubuntu_podman:${IMAGE_SUFFIX}"
|
||||
|
||||
# Built along with the standard PR-based workflow in c/automation_images
|
||||
SKOPEO_CIDEV_CONTAINER_FQIN: "quay.io/libpod/skopeo_cidev:${IMAGE_SUFFIX}"
|
||||
@@ -221,13 +215,12 @@ meta_task:
|
||||
container: &smallcontainer
|
||||
cpu: 2
|
||||
memory: 2
|
||||
image: quay.io/libpod/imgts:$IMAGE_SUFFIX
|
||||
image: quay.io/libpod/imgts:latest
|
||||
env:
|
||||
# Space-separated list of images used by this repository state
|
||||
IMGNAMES: >-
|
||||
IMGNAMES: |
|
||||
${FEDORA_CACHE_IMAGE_NAME}
|
||||
${PRIOR_FEDORA_CACHE_IMAGE_NAME}
|
||||
${UBUNTU_CACHE_IMAGE_NAME}
|
||||
build-push-${IMAGE_SUFFIX}
|
||||
BUILDID: "${CIRRUS_BUILD_ID}"
|
||||
REPOREF: "${CIRRUS_REPO_NAME}"
|
||||
GCPJSON: ENCRYPTED[6867b5a83e960e7c159a98fe6c8360064567a071c6f4b5e7d532283ecd870aa65c94ccd74bdaa9bf7aadac9d42e20a67]
|
||||
|
||||
112
.github/workflows/check_cirrus_cron.yml
vendored
112
.github/workflows/check_cirrus_cron.yml
vendored
@@ -1,105 +1,17 @@
|
||||
---
|
||||
|
||||
# See also:
|
||||
# https://github.com/containers/podman/blob/main/.github/workflows/check_cirrus_cron.yml
|
||||
|
||||
# Format Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
# Required to un-FUBAR default ${{github.workflow}} value
|
||||
name: check_cirrus_cron
|
||||
|
||||
on:
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
# Debug-mode can reveal secrets, only enable by a secret value.
|
||||
# Ref: https://help.github.com/en/actions/configuring-and-managing-workflows/managing-a-workflow-run#enabling-step-debug-logging
|
||||
ACTIONS_STEP_DEBUG: '${{ secrets.ACTIONS_STEP_DEBUG }}'
|
||||
# CSV listing of e-mail addresses for delivery failure or error notices
|
||||
RCPTCSV: rh.container.bot@gmail.com,podman-monitor@lists.podman.io
|
||||
# Filename for table of cron-name to build-id data
|
||||
# (must be in $GITHUB_WORKSPACE/artifacts/)
|
||||
NAME_ID_FILEPATH: './artifacts/name_id.txt'
|
||||
# Note: This only applies to the default branch.
|
||||
schedule:
|
||||
# N/B: This should correspond to a period slightly after
|
||||
# the last job finishes running. See job defs. at:
|
||||
# https://cirrus-ci.com/settings/repository/6706677464432640
|
||||
- cron: '59 23 * * 1-5'
|
||||
# Debug: Allow triggering job manually in github-actions WebUI
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
cron_failures:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
# Avoid duplicating cron_failures.sh in skopeo repo.
|
||||
- uses: actions/checkout@629c2de402a417ea7690ca6ce3f33229e27606a5 # v2
|
||||
with:
|
||||
repository: containers/podman
|
||||
path: '_podman'
|
||||
persist-credentials: false
|
||||
|
||||
- name: Get failed cron names and Build IDs
|
||||
id: cron
|
||||
run: './_podman/.github/actions/${{ github.workflow }}/${{ github.job }}.sh'
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
shell: bash
|
||||
# Must be inline, since context expressions are used.
|
||||
# Ref: https://docs.github.com/en/free-pro-team@latest/actions/reference/context-and-expression-syntax-for-github-actions
|
||||
run: |
|
||||
set -eo pipefail
|
||||
(
|
||||
echo "Detected one or more Cirrus-CI cron-triggered jobs have failed recently:"
|
||||
echo ""
|
||||
|
||||
while read -r NAME BID; do
|
||||
echo "Cron build '$NAME' Failed: https://cirrus-ci.com/build/$BID"
|
||||
done < "$NAME_ID_FILEPATH"
|
||||
|
||||
echo ""
|
||||
echo "# Source: ${{ github.workflow }} workflow on ${{ github.repository }}."
|
||||
# Separate content from sendgrid.com automatic footer.
|
||||
echo ""
|
||||
echo ""
|
||||
) > ./artifacts/email_body.txt
|
||||
|
||||
- if: steps.cron.outputs.failures > 0
|
||||
name: Send failure notification e-mail
|
||||
# Ref: https://github.com/dawidd6/action-send-mail
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Cirrus-CI cron build failures on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: file://./artifacts/email_body.txt
|
||||
|
||||
- if: always()
|
||||
uses: actions/upload-artifact@82c141cc518b40d92cc801eee768e7aafc9c2fa2 # v2
|
||||
with:
|
||||
name: ${{ github.job }}_artifacts
|
||||
path: artifacts/*
|
||||
|
||||
- if: failure()
|
||||
name: Send error notification e-mail
|
||||
uses: dawidd6/action-send-mail@a80d851dc950256421f1d1d735a2dc1ef314ac8f # v2.2.2
|
||||
with:
|
||||
server_address: ${{secrets.ACTION_MAIL_SERVER}}
|
||||
server_port: 465
|
||||
username: ${{secrets.ACTION_MAIL_USERNAME}}
|
||||
password: ${{secrets.ACTION_MAIL_PASSWORD}}
|
||||
subject: Github workflow error on ${{github.repository}}
|
||||
to: ${{env.RCPTCSV}}
|
||||
from: ${{secrets.ACTION_MAIL_SENDER}}
|
||||
body: "Job failed: https://github.com/${{github.repository}}/actions/runs/${{github.run_id}}"
|
||||
# Ref: https://docs.github.com/en/actions/using-workflows/reusing-workflows
|
||||
call_cron_failures:
|
||||
uses: containers/buildah/.github/workflows/check_cirrus_cron.yml@main
|
||||
secrets: inherit
|
||||
|
||||
19
Makefile
19
Makefile
@@ -28,10 +28,15 @@ ifeq ($(GOBIN),)
|
||||
GOBIN := $(GOPATH)/bin
|
||||
endif
|
||||
|
||||
# Multiple scripts are sensitive to this value, make sure it's exported/available
|
||||
# N/B: Need to use 'command -v' here for compatibility with MacOS.
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man),go-md2man,$(GOBIN)/go-md2man)
|
||||
# Scripts may also use CONTAINER_RUNTIME, so we need to export it.
|
||||
# Note possibly non-obvious aspects of this:
|
||||
# - We need to use 'command -v' here, not 'which', for compatibility with MacOS.
|
||||
# - GNU Make 4.2.1 (included in Ubuntu 20.04) incorrectly tries to avoid invoking
|
||||
# a shell, and fails because there is no /usr/bin/command. The trailing ';' in
|
||||
# $(shell … ;) defeats that heuristic (recommended in
|
||||
# https://savannah.gnu.org/bugs/index.php?57625 ).
|
||||
export CONTAINER_RUNTIME ?= $(if $(shell command -v podman ;),podman,docker)
|
||||
GOMD2MAN ?= $(if $(shell command -v go-md2man ;),go-md2man,$(GOBIN)/go-md2man)
|
||||
|
||||
# Go module support: set `-mod=vendor` to use the vendored sources.
|
||||
# See also hack/make.sh.
|
||||
@@ -50,8 +55,6 @@ ifeq ($(GOOS), linux)
|
||||
endif
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
|
||||
# If $TESTFLAGS is set, it is passed as extra arguments to 'go test'.
|
||||
# You can increase test output verbosity with the option '-test.vv'.
|
||||
# You can select certain tests to run, with `-test.run <regex>` for example:
|
||||
@@ -87,7 +90,7 @@ endif
|
||||
CONTAINER_GOSRC = /src/github.com/containers/skopeo
|
||||
CONTAINER_RUN ?= $(CONTAINER_CMD) --security-opt label=disable -v $(CURDIR):$(CONTAINER_GOSRC) -w $(CONTAINER_GOSRC) $(SKOPEO_CIDEV_CONTAINER_FQIN)
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
GIT_COMMIT := $(shell GIT_CEILING_DIRECTORIES=$$(cd ..; pwd) git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
EXTRA_LDFLAGS ?=
|
||||
SKOPEO_LDFLAGS := -ldflags '-X main.gitCommit=${GIT_COMMIT} $(EXTRA_LDFLAGS)'
|
||||
@@ -247,7 +250,7 @@ vendor:
|
||||
$(GO) mod verify
|
||||
|
||||
vendor-in-container:
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src quay.io/libpod/golang:1.16 $(MAKE) vendor
|
||||
podman run --privileged --rm --env HOME=/root -v $(CURDIR):/src -w /src golang $(MAKE) vendor
|
||||
|
||||
# CAUTION: This is not a replacement for RPMs provided by your distro.
|
||||
# Only intended to build and test the latest unreleased changes.
|
||||
|
||||
@@ -207,7 +207,7 @@ Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate
|
||||
| [skopeo-manifest-digest(1)](/docs/skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](/docs/skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](/docs/skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between container registries and local directories. |
|
||||
| [skopeo-sync(1)](/docs/skopeo-sync.1.md) | Synchronize images between registry repositories and local directories. |
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
@@ -25,7 +25,7 @@ type copyOptions struct {
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
additionalTags []string // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
@@ -260,7 +260,9 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
}
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
opts.destImage.warnAboutIneffectiveOptions(destRef.Transport())
|
||||
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
manifestBytes, err := copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
func deleteCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -70,7 +70,7 @@ func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
return retry.RetryIfNecessary(ctx, func() error {
|
||||
return retry.IfNecessary(ctx, func() error {
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}, opts.retryOpts)
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
format string
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
config bool // Output the raw config blob instead of parsing information about the image
|
||||
@@ -96,7 +96,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
src, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -109,7 +109,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
rawManifest, _, err = src.GetManifest(ctx, nil)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -132,7 +132,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
|
||||
if opts.config && opts.raw {
|
||||
var configBlob []byte
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
configBlob, err = img.ConfigBlob(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -145,7 +145,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return nil
|
||||
} else if opts.config {
|
||||
var config *v1.Image
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
config, err = img.OCIConfig(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -168,7 +168,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := retry.RetryIfNecessary(ctx, func() error {
|
||||
if err := retry.IfNecessary(ctx, func() error {
|
||||
imgInspect, err = img.Inspect(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -186,6 +186,7 @@ func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error)
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
LayersData: imgInspect.LayersData,
|
||||
Env: imgInspect.Env,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
|
||||
@@ -3,6 +3,7 @@ package inspect
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
@@ -19,5 +20,6 @@ type Output struct {
|
||||
Architecture string
|
||||
Os string
|
||||
Layers []string
|
||||
LayersData []types.ImageInspectLayer
|
||||
Env []string
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) *cobra.Command {
|
||||
@@ -68,13 +68,13 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
rawSource types.ImageSource
|
||||
src types.ImageCloser
|
||||
)
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
rawSource, err = parseImageSource(ctx, opts.image, imageName)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
src, err = image.FromSource(ctx, sys, rawSource)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -145,7 +145,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
r io.ReadCloser
|
||||
blobSize int64
|
||||
)
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
r, blobSize, err = rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -160,7 +160,7 @@ func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
}
|
||||
|
||||
var manifest []byte
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
manifest, _, err = src.Manifest(ctx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
|
||||
@@ -27,7 +27,7 @@ type tagListOutput struct {
|
||||
type tagsOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
}
|
||||
|
||||
var transportHandlers = map[string]func(ctx context.Context, sys *types.SystemContext, opts *tagsOptions, userInput string) (repositoryName string, tagListing []string, err error){
|
||||
@@ -120,7 +120,7 @@ func listDockerRepoTags(ctx context.Context, sys *types.SystemContext, opts *tag
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
repositoryName, tagListing, err = listDockerTags(ctx, sys, imgRef)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
|
||||
@@ -33,7 +33,7 @@ type syncOptions struct {
|
||||
deprecatedTLSVerify *deprecatedTLSVerifyOption
|
||||
srcImage *imageOptions // Source image options
|
||||
destImage *imageDestOptions // Destination image options
|
||||
retryOpts *retry.RetryOptions
|
||||
retryOpts *retry.Options
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
signBySigstorePrivateKey string // Sign the image using a sigstore private key
|
||||
@@ -244,7 +244,11 @@ func imagesToCopyFromRepo(sys *types.SystemContext, repoRef reference.Named) ([]
|
||||
for _, tag := range tags {
|
||||
taggedRef, err := reference.WithTag(repoRef, tag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating a reference for repository %s and tag %q: %w", repoRef.Name(), tag, err)
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"repo": repoRef.Name(),
|
||||
"tag": tag,
|
||||
}).Errorf("Error creating a tagged reference from registry tag list: %v", err)
|
||||
continue
|
||||
}
|
||||
ref, err := docker.NewReference(taggedRef)
|
||||
if err != nil {
|
||||
@@ -548,6 +552,8 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
return errors.New("sync from 'dir' to 'dir' not implemented, consider using rsync instead")
|
||||
}
|
||||
|
||||
opts.destImage.warnAboutIneffectiveOptions(transports.Get(opts.destination))
|
||||
|
||||
imageListSelection := copy.CopySystemImage
|
||||
if opts.all {
|
||||
imageListSelection = copy.CopyAllImages
|
||||
@@ -571,7 +577,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
|
||||
sourceArg := args[0]
|
||||
var srcRepoList []repoDescriptor
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
srcRepoList, err = imagesToCopy(sourceArg, opts.source, sourceCtx)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
@@ -657,7 +663,7 @@ func (opts *syncOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
logrus.WithFields(fromToFields).Infof("Would have copied image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
} else {
|
||||
logrus.WithFields(fromToFields).Infof("Copying image ref %d/%d", counter+1, len(srcRepo.ImageRefs))
|
||||
if err = retry.RetryIfNecessary(ctx, func() error {
|
||||
if err = retry.IfNecessary(ctx, func() error {
|
||||
_, err = copy.Image(ctx, policyContext, destRef, ref, &options)
|
||||
return err
|
||||
}, opts.retryOpts); err != nil {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
commonFlag "github.com/containers/common/pkg/flag"
|
||||
"github.com/containers/common/pkg/retry"
|
||||
"github.com/containers/image/v5/directory"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/transports/alltransports"
|
||||
@@ -30,11 +31,11 @@ type errorShouldDisplayUsage struct {
|
||||
// The error for closeErr is annotated with description (which is not a format string)
|
||||
// Typical usage:
|
||||
//
|
||||
// defer func() {
|
||||
// if err := something.Close(); err != nil {
|
||||
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
|
||||
// }
|
||||
// }
|
||||
// defer func() {
|
||||
// if err := something.Close(); err != nil {
|
||||
// returnedErr = noteCloseFailure(returnedErr, "closing something", err)
|
||||
// }
|
||||
// }
|
||||
func noteCloseFailure(err error, description string, closeErr error) error {
|
||||
// We don’t accept a Closer() and close it ourselves because signature.PolicyContext has .Destroy(), not .Close().
|
||||
// This also makes it harder for a caller to do
|
||||
@@ -174,8 +175,8 @@ func imageFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLS
|
||||
return fs, opts
|
||||
}
|
||||
|
||||
func retryFlags() (pflag.FlagSet, *retry.RetryOptions) {
|
||||
opts := retry.RetryOptions{}
|
||||
func retryFlags() (pflag.FlagSet, *retry.Options) {
|
||||
opts := retry.Options{}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.IntVar(&opts.MaxRetry, "retry-times", 0, "the number of times to possibly retry")
|
||||
return fs, &opts
|
||||
@@ -244,6 +245,7 @@ func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for image destinations.
|
||||
// Every user should call imageDestOptions.warnAboutIneffectiveOptions() as part of handling the CLI
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
@@ -252,12 +254,13 @@ type imageDestOptions struct {
|
||||
compressionFormat string // Format to use for the compression
|
||||
compressionLevel commonFlag.OptionalInt // Level to use for the compression
|
||||
precomputeDigests bool // Precompute digests to dedup layers when saving to the docker: transport
|
||||
imageDestFlagPrefix string
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, deprecatedTLSVerify *deprecatedTLSVerifyOption, flagPrefix, credsOptionAlias string) (pflag.FlagSet, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, deprecatedTLSVerify, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
opts := imageDestOptions{imageOptions: genericOptions, imageDestFlagPrefix: flagPrefix}
|
||||
fs := pflag.FlagSet{}
|
||||
fs.AddFlagSet(&genericFlags)
|
||||
fs.BoolVar(&opts.dirForceCompression, flagPrefix+"compress", false, "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)")
|
||||
@@ -295,6 +298,19 @@ func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
// warnAboutIneffectiveOptions warns if any ineffective option was set by the user
|
||||
// Every user should call this as part of handling the CLI
|
||||
func (opts *imageDestOptions) warnAboutIneffectiveOptions(destTransport types.ImageTransport) {
|
||||
if destTransport.Name() != directory.Transport.Name() {
|
||||
if opts.dirForceCompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"compress")
|
||||
}
|
||||
if opts.dirForceDecompression {
|
||||
logrus.Warnf("--%s can only be used if the destination transport is 'dir'", opts.imageDestFlagPrefix+"decompress")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (ANY changes made to this file, once commited/merged must)
|
||||
[comment]: <> (be manually copy/pasted -in markdown- into the description)
|
||||
[comment]: <> (field on Quay at the following locations:)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (https://quay.io/repository/containers/skopeo)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/stable)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/testing)
|
||||
[comment]: <> (https://quay.io/repository/skopeo/upstream)
|
||||
[comment]: <> ()
|
||||
[comment]: <> (***ATTENTION*** ***WARNING*** ***ALERT*** ***CAUTION*** ***DANGER***)
|
||||
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
@@ -16,27 +16,11 @@ FROM registry.fedoraproject.org/fedora:latest
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1995337#c3
|
||||
RUN dnf -y update && \
|
||||
rpm --setcaps shadow-utils 2>/dev/null && \
|
||||
dnf -y --enablerepo updates-testing --exclude container-selinux install \
|
||||
make \
|
||||
golang \
|
||||
git \
|
||||
go-md2man \
|
||||
fuse-overlayfs \
|
||||
fuse3 \
|
||||
containers-common \
|
||||
gpgme-devel \
|
||||
libassuan-devel \
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel && \
|
||||
mkdir /root/skopeo && \
|
||||
git clone https://github.com/containers/skopeo \
|
||||
/root/skopeo/src/github.com/containers/skopeo && \
|
||||
export GOPATH=/root/skopeo && \
|
||||
cd /root/skopeo/src/github.com/containers/skopeo && \
|
||||
make bin/skopeo && \
|
||||
make PREFIX=/usr install && \
|
||||
rm -rf /root/skopeo/* && \
|
||||
dnf -y remove git golang go-md2man make && \
|
||||
dnf -y install 'dnf-command(copr)' --enablerepo=updates-testing && \
|
||||
dnf -y copr enable rhcontainerbot/podman-next && \
|
||||
dnf -y install skopeo \
|
||||
--exclude container-selinux \
|
||||
--enablerepo=updates-testing && \
|
||||
dnf clean all && \
|
||||
rm -rf /var/cache /var/log/dnf* /var/log/yum.*
|
||||
|
||||
|
||||
14
default.yaml
14
default.yaml
@@ -1,8 +1,8 @@
|
||||
# This is a default registries.d configuration file. You may
|
||||
# add to this file or create additional files in registries.d/.
|
||||
#
|
||||
# lookaside: indicates a location that is read and write
|
||||
# lookaside-staging: indicates a location that is only for write
|
||||
# lookaside: for reading/writing simple signing signatures
|
||||
# lookaside-staging: for writing simple signing signatures, preferred over lookaside
|
||||
#
|
||||
# lookaside and lookaside-staging take a value of the following:
|
||||
# lookaside: {schema}://location
|
||||
@@ -10,10 +10,12 @@
|
||||
# For reading signatures, schema may be http, https, or file.
|
||||
# For writing signatures, schema may only be file.
|
||||
|
||||
# This is the default signature write location for docker registries.
|
||||
# The default locations are built-in, for both reading and writing:
|
||||
# /var/lib/containers/sigstore for root, or
|
||||
# ~/.local/share/containers/sigstore for non-root users.
|
||||
default-docker:
|
||||
# lookaside: file:///var/lib/containers/sigstore
|
||||
lookaside-staging: file:///var/lib/containers/sigstore
|
||||
# lookaside: https://…
|
||||
# lookaside-staging: file:///…
|
||||
|
||||
# The 'docker' indicator here is the start of the configuration
|
||||
# for docker registries.
|
||||
@@ -21,6 +23,6 @@ default-docker:
|
||||
# docker:
|
||||
#
|
||||
# privateregistry.com:
|
||||
# lookaside: http://privateregistry.com/sigstore/
|
||||
# lookaside: https://privateregistry.com/sigstore/
|
||||
# lookaside-staging: /mnt/nfs/privateregistry/sigstore
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ After copying the image, write the digest of the resulting image to the file.
|
||||
|
||||
**--preserve-digests**
|
||||
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
|
||||
**--encrypt-layer** _ints_
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
% skopeo-sync(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-sync - Synchronize images between container registries and local directories.
|
||||
skopeo\-sync - Synchronize images between registry repositories and local directories.
|
||||
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo sync** [*options*] --src _transport_ --dest _transport_ _source_ _destination_
|
||||
|
||||
## DESCRIPTION
|
||||
Synchronize images between container registries and local directories.
|
||||
Synchronize images between registry repoositories and local directories.
|
||||
The synchronization is achieved by copying all the images found at _source_ to _destination_.
|
||||
|
||||
Useful to synchronize a local container registry mirror, and to to populate registries running inside of air-gapped environments.
|
||||
@@ -66,7 +66,7 @@ Print usage statement.
|
||||
|
||||
**--scoped** Prefix images with the source image path, so that multiple images with the same name can be stored at _destination_.
|
||||
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved.
|
||||
**--preserve-digests** Preserve the digests during copying. Fail if the digest cannot be preserved. Consider using `--all` at the same time.
|
||||
|
||||
**--remove-signatures** Do not copy signatures, if any, from _source-image_. This is necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ Print the version number
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest for a manifest-file and write it to standard output. |
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Debugging tool - Publish and sign an image in one step. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verify an image signature. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between container registries and local directories. |
|
||||
| [skopeo-sync(1)](skopeo-sync.1.md)| Synchronize images between registry repositories and local directories. |
|
||||
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
|
||||
75
go.mod
75
go.mod
@@ -3,105 +3,98 @@ module github.com/containers/skopeo
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.48.0
|
||||
github.com/containers/image/v5 v5.21.2-0.20220712113758-29aec5f7bbbf
|
||||
github.com/containers/common v0.50.1
|
||||
github.com/containers/image/v5 v5.23.1-0.20221019201342-d92bac8cb807
|
||||
github.com/containers/ocicrypt v1.1.5
|
||||
github.com/containers/storage v1.41.0
|
||||
github.com/docker/docker v20.10.17+incompatible
|
||||
github.com/containers/storage v1.43.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198
|
||||
github.com/opencontainers/image-spec v1.1.0-rc1
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.1.0 // indirect
|
||||
github.com/BurntSushi/toml v1.2.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/containerd/cgroups v1.0.3 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect
|
||||
github.com/containerd/cgroups v1.0.4 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.12.0 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20200511145503-9c3a6c22cd9a // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/docker/docker v20.10.18+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.4.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-containerregistry v0.10.0 // indirect
|
||||
github.com/google/go-containerregistry v0.11.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/honeycombio/libhoney-go v1.15.8 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.7 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
|
||||
github.com/kr/pretty v0.2.1 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220331220046-b23ab962616e // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20220723181115-27de4befb95e // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.2 // indirect
|
||||
github.com/opencontainers/runc v1.1.4 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 // indirect
|
||||
github.com/opencontainers/selinux v1.10.1 // indirect
|
||||
github.com/opencontainers/selinux v1.10.2 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/sigstore/sigstore v1.3.1-0.20220629021053-b95fc0d626c1 // indirect
|
||||
github.com/sigstore/sigstore v1.4.2 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.7.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.8.0 // indirect
|
||||
github.com/tchap/go-patricia v2.3.0+incompatible // indirect
|
||||
github.com/theupdateframework/go-tuf v0.3.0 // indirect
|
||||
github.com/theupdateframework/go-tuf v0.5.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/vbatts/tar-split v0.11.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.4.2 // indirect
|
||||
github.com/vbauerster/mpb/v7 v7.5.3 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 // indirect
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e // indirect
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220919173607-35f4265a4bc0 // indirect
|
||||
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220720214146-176da50484ac // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -34,7 +34,7 @@ if [[ "$SKOPEO_CONTAINER_TESTS" == "0" ]] && [[ "$CI" != "true" ]]; then
|
||||
echo " the Makefile targets WITHOUT the '-local' suffix."
|
||||
echo "***************************************************************"
|
||||
) > /dev/stderr
|
||||
sleep 5s
|
||||
sleep 5
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"gopkg.in/check.v1"
|
||||
)
|
||||
|
||||
|
||||
@@ -15,11 +15,15 @@ TestRunShell is not really a test; it is a convenient way to use the registry se
|
||||
in openshift.go and CopySuite to get an interactive environment for experimentation.
|
||||
|
||||
To use it, run:
|
||||
|
||||
sudo make shell
|
||||
|
||||
to start a container, then within the container:
|
||||
|
||||
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
|
||||
|
||||
An example of what can be done within the container:
|
||||
|
||||
cd ..; make bin/skopeo PREFIX=/usr install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://quay.io/libpod/busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
|
||||
@@ -50,7 +50,7 @@ function setup() {
|
||||
|
||||
local dir=$TESTDIR/dir
|
||||
|
||||
run_skopeo copy --dest-compress --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
run_skopeo copy --dest-compress-format=zstd $remote_image oci:$dir:latest
|
||||
|
||||
# zstd magic number
|
||||
local magic=$(printf "\x28\xb5\x2f\xfd")
|
||||
|
||||
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
2
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
@@ -1,2 +1,2 @@
|
||||
toml.test
|
||||
/toml.test
|
||||
/toml-test
|
||||
|
||||
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
1
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
@@ -1 +0,0 @@
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
185
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
185
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@@ -1,6 +1,5 @@
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages.
|
||||
reflection interface similar to Go's standard library `json` and `xml` packages.
|
||||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
@@ -10,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
v0.4.0`).
|
||||
|
||||
This library requires Go 1.13 or newer; install it with:
|
||||
This library requires Go 1.13 or newer; add it to your go.mod with:
|
||||
|
||||
% go get github.com/BurntSushi/toml@latest
|
||||
|
||||
@@ -19,16 +18,7 @@ It also comes with a TOML validator CLI tool:
|
||||
% go install github.com/BurntSushi/toml/cmd/tomlv@latest
|
||||
% tomlv some-toml-file.toml
|
||||
|
||||
### Testing
|
||||
This package passes all tests in [toml-test] for both the decoder and the
|
||||
encoder.
|
||||
|
||||
[toml-test]: https://github.com/BurntSushi/toml-test
|
||||
|
||||
### Examples
|
||||
This package works similar to how the Go standard library handles XML and JSON.
|
||||
Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys and
|
||||
values:
|
||||
|
||||
@@ -40,7 +30,7 @@ Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
@@ -48,20 +38,15 @@ type Config struct {
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
DOB time.Time
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
_, err := toml.Decode(tomlData, &conf)
|
||||
// handle error
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML key
|
||||
value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
@@ -73,139 +58,63 @@ type TOML struct {
|
||||
}
|
||||
```
|
||||
|
||||
Beware that like other most other decoders **only exported fields** are
|
||||
considered when encoding and decoding; private fields are silently ignored.
|
||||
Beware that like other decoders **only exported fields** are considered when
|
||||
encoding and decoding; private fields are silently ignored.
|
||||
|
||||
### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
Here's an example that automatically parses values in a `mail.Address`:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
Can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
// Create address type which satisfies the encoding.TextUnmarshaler interface.
|
||||
type address struct {
|
||||
*mail.Address
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
func (a *address) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
a.Address, err = mail.ParseAddress(string(text))
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode it.
|
||||
func decode() {
|
||||
blob := `
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
`
|
||||
|
||||
var contacts struct {
|
||||
Contacts []address
|
||||
}
|
||||
|
||||
_, err := toml.Decode(blob, &contacts)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range contacts.Contacts {
|
||||
fmt.Printf("%#v\n", c.Address)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"}
|
||||
// &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"}
|
||||
}
|
||||
```
|
||||
|
||||
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
|
||||
a similar way.
|
||||
|
||||
### More complex usage
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_example/example.{go,toml}`.
|
||||
See the [`_example/`](/_example) directory for a more complex example.
|
||||
|
||||
227
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
227
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@@ -3,13 +3,16 @@ package toml
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
@@ -18,7 +21,7 @@ type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
// Unmarshal decodes the contents of `data` in TOML format into a pointer `v`.
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||
return err
|
||||
@@ -75,6 +78,9 @@ const (
|
||||
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
|
||||
// in the local timezone.
|
||||
//
|
||||
// time.Duration types are treated as nanoseconds if the TOML value is an
|
||||
// integer, or they're parsed with time.ParseDuration() if they're strings.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond to the
|
||||
// obvious Go types.
|
||||
//
|
||||
@@ -82,7 +88,7 @@ const (
|
||||
// interface, in which case any primitive TOML value (floats, strings, integers,
|
||||
// booleans, datetimes) will be converted to a []byte and given to the value's
|
||||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||
// time duration strings.
|
||||
// email addresses.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
@@ -111,6 +117,7 @@ func NewDecoder(r io.Reader) *Decoder {
|
||||
var (
|
||||
unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Decode TOML data in to the pointer `v`.
|
||||
@@ -122,10 +129,10 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
s = "%v"
|
||||
}
|
||||
|
||||
return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
}
|
||||
|
||||
// Check if this is a supported type: struct, map, interface{}, or something
|
||||
@@ -135,7 +142,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
|
||||
!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
|
||||
!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
|
||||
return MetaData{}, e("cannot decode to type %s", rt)
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt)
|
||||
}
|
||||
|
||||
// TODO: parser should read from io.Reader? Or at the very least, make it
|
||||
@@ -152,10 +159,11 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
|
||||
md := MetaData{
|
||||
mapping: p.mapping,
|
||||
types: p.types,
|
||||
keyInfo: p.keyInfo,
|
||||
keys: p.ordered,
|
||||
decoded: make(map[string]struct{}, len(p.ordered)),
|
||||
context: nil,
|
||||
data: data,
|
||||
}
|
||||
return md, md.unify(p.mapping, rv)
|
||||
}
|
||||
@@ -185,7 +193,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
// Special case. Look for a `Primitive` value.
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
if rv.Type() == primitiveType {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
@@ -197,17 +205,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
rvi := rv.Interface()
|
||||
if v, ok := rvi.(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
|
||||
@@ -218,7 +223,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
@@ -244,15 +248,14 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
|
||||
return md.e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
return md.e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
@@ -261,7 +264,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
return md.e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
}
|
||||
|
||||
@@ -287,13 +290,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
md.context = append(md.context, key)
|
||||
|
||||
err := md.unify(datum, subv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
return e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -301,10 +305,10 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
if k := rv.Type().Key().Kind(); k != reflect.String {
|
||||
return fmt.Errorf(
|
||||
"toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
k, rv.Type())
|
||||
keyType := rv.Type().Key().Kind()
|
||||
if keyType != reflect.String && keyType != reflect.Interface {
|
||||
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
keyType, rv.Type())
|
||||
}
|
||||
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
@@ -322,13 +326,22 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
|
||||
err := md.unify(v, indirect(rvval))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvkey.SetString(k)
|
||||
|
||||
switch keyType {
|
||||
case reflect.Interface:
|
||||
rvkey.Set(reflect.ValueOf(k))
|
||||
case reflect.String:
|
||||
rvkey.SetString(k)
|
||||
}
|
||||
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
@@ -343,7 +356,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
return md.badtype("slice", data)
|
||||
}
|
||||
if l := datav.Len(); l != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
@@ -376,6 +389,18 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(json.Number)
|
||||
if ok {
|
||||
if i, ok := data.(int64); ok {
|
||||
rv.SetString(strconv.FormatInt(i, 10))
|
||||
} else if f, ok := data.(float64); ok {
|
||||
rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
|
||||
} else {
|
||||
return md.badtype("string", data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
@@ -384,11 +409,13 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
rvk := rv.Kind()
|
||||
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
switch rvk {
|
||||
case reflect.Float32:
|
||||
if num < -math.MaxFloat32 || num > math.MaxFloat32 {
|
||||
return e("value %f is out of range for float32", num)
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
@@ -400,20 +427,11 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
if num, ok := data.(int64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
if num < -maxSafeFloat32Int || num > maxSafeFloat32Int {
|
||||
return e("value %d is out of range for float32", num)
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
if num < -maxSafeFloat64Int || num > maxSafeFloat64Int {
|
||||
return e("value %d is out of range for float64", num)
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
default:
|
||||
panic("bug")
|
||||
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
|
||||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -421,50 +439,46 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
_, ok := rv.Interface().(time.Duration)
|
||||
if ok {
|
||||
// Parse as string duration, and fall back to regular integer parsing
|
||||
// (as nanosecond) if this is not a string.
|
||||
if s, ok := data.(string); ok {
|
||||
dur, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return md.parseErr(errParseDuration{s})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
rv.SetInt(int64(dur))
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return md.badtype("integer", data)
|
||||
|
||||
num, ok := data.(int64)
|
||||
if !ok {
|
||||
return md.badtype("integer", data)
|
||||
}
|
||||
|
||||
rvk := rv.Kind()
|
||||
switch {
|
||||
case rvk >= reflect.Int && rvk <= reflect.Int64:
|
||||
if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) ||
|
||||
(rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) ||
|
||||
(rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
case rvk >= reflect.Uint && rvk <= reflect.Uint64:
|
||||
unum := uint64(num)
|
||||
if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) ||
|
||||
rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) ||
|
||||
rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
@@ -489,7 +503,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case TextMarshaler:
|
||||
case encoding.TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -515,7 +529,30 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
||||
}
|
||||
|
||||
func (md *MetaData) badtype(dst string, data interface{}) error {
|
||||
return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst)
|
||||
return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
|
||||
}
|
||||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
}
|
||||
}
|
||||
|
||||
func (md *MetaData) e(format string, args ...interface{}) error {
|
||||
f := "toml: "
|
||||
if len(md.context) > 0 {
|
||||
f = fmt.Sprintf("toml: (last key %q): ", md.context)
|
||||
p := md.keyInfo[md.context.String()].pos
|
||||
if p.Line > 0 {
|
||||
f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf(f+format, args...)
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
@@ -534,7 +571,11 @@ func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
pvi := pv.Interface()
|
||||
if _, ok := pvi.(encoding.TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
if _, ok := pvi.(Unmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
@@ -550,12 +591,12 @@ func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
rvi := rv.Interface()
|
||||
if _, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rvi.(Unmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
|
||||
208
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
208
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@@ -3,6 +3,7 @@ package toml
|
||||
import (
|
||||
"bufio"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -63,6 +64,12 @@ var dblQuotedReplacer = strings.NewReplacer(
|
||||
"\x7f", `\u007f`,
|
||||
)
|
||||
|
||||
var (
|
||||
marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Marshaler is the interface implemented by types that can marshal themselves
|
||||
// into valid TOML.
|
||||
type Marshaler interface {
|
||||
@@ -74,6 +81,9 @@ type Marshaler interface {
|
||||
// The mapping between Go values and TOML values should be precisely the same as
|
||||
// for the Decode* functions.
|
||||
//
|
||||
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
|
||||
// representation.
|
||||
//
|
||||
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
|
||||
// encoding the value as custom TOML.
|
||||
//
|
||||
@@ -85,6 +95,17 @@ type Marshaler interface {
|
||||
//
|
||||
// Go maps will be sorted alphabetically by key for deterministic output.
|
||||
//
|
||||
// The toml struct tag can be used to provide the key name; if omitted the
|
||||
// struct field name will be used. If the "omitempty" option is present the
|
||||
// following value will be skipped:
|
||||
//
|
||||
// - arrays, slices, maps, and string with len of 0
|
||||
// - struct with all zero values
|
||||
// - bool false
|
||||
//
|
||||
// If omitzero is given all int and float types with a value of 0 will be
|
||||
// skipped.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation will return an
|
||||
// error. Examples of this includes maps with non-string keys, slices with nil
|
||||
// elements, embedded non-struct types, and nested slices containing maps or
|
||||
@@ -136,18 +157,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case: time needs to be in ISO8601 format.
|
||||
//
|
||||
// Special case: if we can marshal the type to text, then we used that. This
|
||||
// prevents the encoder for handling these types as generic structs (or
|
||||
// whatever the underlying type of a TextMarshaler is).
|
||||
switch t := rv.Interface().(type) {
|
||||
case time.Time, encoding.TextMarshaler, Marshaler:
|
||||
// If we can marshal the type to text, then we use that. This prevents the
|
||||
// encoder for handling these types as generic structs (or whatever the
|
||||
// underlying type of a TextMarshaler is).
|
||||
switch {
|
||||
case isMarshaler(rv):
|
||||
enc.writeKeyValue(key, rv, false)
|
||||
return
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
case Primitive:
|
||||
enc.encode(key, reflect.ValueOf(t.undecoded))
|
||||
case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented.
|
||||
enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -212,6 +230,9 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalTOML returned nil and no error"))
|
||||
}
|
||||
enc.w.Write(s)
|
||||
return
|
||||
case encoding.TextMarshaler:
|
||||
@@ -219,11 +240,34 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalText returned nil and no error"))
|
||||
}
|
||||
enc.writeQuoted(string(s))
|
||||
return
|
||||
case time.Duration:
|
||||
enc.writeQuoted(v.String())
|
||||
return
|
||||
case json.Number:
|
||||
n, _ := rv.Interface().(json.Number)
|
||||
|
||||
if n == "" { /// Useful zero value.
|
||||
enc.w.WriteByte('0')
|
||||
return
|
||||
} else if v, err := n.Int64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
} else if v, err := n.Float64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
encPanic(errors.New(fmt.Sprintf("Unable to convert \"%s\" to neither int64 nor float64", n)))
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
enc.eElement(rv.Elem())
|
||||
return
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
case reflect.Bool:
|
||||
@@ -259,7 +303,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
default:
|
||||
encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
|
||||
encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -280,7 +324,7 @@ func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
elem := eindirect(rv.Index(i))
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
@@ -294,7 +338,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
trv := eindirect(rv.Index(i))
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
@@ -319,7 +363,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
switch rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv, inline)
|
||||
case reflect.Struct:
|
||||
@@ -341,7 +385,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
@@ -351,7 +395,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
for i, mapKey := range mapKeys {
|
||||
val := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
@@ -379,6 +423,13 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
||||
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table then all keys under it will be in that
|
||||
@@ -395,7 +446,8 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
|
||||
isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct
|
||||
if f.PkgPath != "" && !isEmbed { /// Skip unexported fields.
|
||||
continue
|
||||
}
|
||||
opts := getOptions(f.Tag)
|
||||
@@ -403,27 +455,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
continue
|
||||
}
|
||||
|
||||
frv := rv.Field(i)
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
//
|
||||
// Non-struct anonymous fields use the normal encoding logic.
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if isEmbed {
|
||||
if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct {
|
||||
addFields(frv.Type(), frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
@@ -449,7 +490,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
writeFields := func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
|
||||
|
||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||
continue
|
||||
@@ -502,6 +543,21 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Struct {
|
||||
if rv.Type() == timeType {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
}
|
||||
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
@@ -513,7 +569,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
if isTableArray(rv) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
@@ -523,67 +579,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
if _, ok := rv.Interface().(time.Time); ok {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
default:
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func isMarshaler(rv reflect.Value) bool {
|
||||
switch rv.Interface().(type) {
|
||||
case encoding.TextMarshaler:
|
||||
return true
|
||||
case Marshaler:
|
||||
return true
|
||||
}
|
||||
|
||||
// Someone used a pointer receiver: we can make it work for pointer values.
|
||||
if rv.CanAddr() {
|
||||
if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Addr().Interface().(Marshaler); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml)
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
// isTableArray reports if all entries in the array or slice are a table.
|
||||
func isTableArray(arr reflect.Value) bool {
|
||||
if isNil(arr) || !arr.IsValid() || arr.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
/// Don't allow nil.
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
if tomlTypeOfGo(rv.Index(i)) == nil {
|
||||
ret := true
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
tt := tomlTypeOfGo(eindirect(arr.Index(i)))
|
||||
// Don't allow nil.
|
||||
if tt == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
}
|
||||
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
if ret && !typeEqual(tomlHash, tt) {
|
||||
ret = false
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
return ret
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
@@ -628,6 +652,8 @@ func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Struct:
|
||||
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
@@ -679,13 +705,25 @@ func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
// Resolve any level of pointers to the actual value (e.g. **string → string).
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
if isMarshaler(v) {
|
||||
return v
|
||||
}
|
||||
if v.CanAddr() { /// Special case for marshalers; see #358.
|
||||
if pv := v.Addr(); isMarshaler(pv) {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
if v.IsNil() {
|
||||
return v
|
||||
}
|
||||
|
||||
return eindirect(v.Elem())
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
|
||||
53
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
53
vendor/github.com/BurntSushi/toml/error.go
generated
vendored
@@ -128,9 +128,13 @@ func (pe ParseError) ErrorWithPosition() string {
|
||||
func (pe ParseError) ErrorWithUsage() string {
|
||||
m := pe.ErrorWithPosition()
|
||||
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
|
||||
return m + "Error help:\n\n " +
|
||||
strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") +
|
||||
"\n"
|
||||
lines := strings.Split(strings.TrimSpace(u.Usage()), "\n")
|
||||
for i := range lines {
|
||||
if lines[i] != "" {
|
||||
lines[i] = " " + lines[i]
|
||||
}
|
||||
}
|
||||
return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n"
|
||||
}
|
||||
return m
|
||||
}
|
||||
@@ -160,6 +164,11 @@ type (
|
||||
errLexInvalidDate struct{ v string }
|
||||
errLexInlineTableNL struct{}
|
||||
errLexStringNL struct{}
|
||||
errParseRange struct {
|
||||
i interface{} // int or float
|
||||
size string // "int64", "uint16", etc.
|
||||
}
|
||||
errParseDuration struct{ d string }
|
||||
)
|
||||
|
||||
func (e errLexControl) Error() string {
|
||||
@@ -179,6 +188,10 @@ func (e errLexInlineTableNL) Error() string { return "newlines not allowed withi
|
||||
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
|
||||
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
|
||||
func (e errLexStringNL) Usage() string { return usageStringNewline }
|
||||
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
|
||||
func (e errParseRange) Usage() string { return usageIntOverflow }
|
||||
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
||||
func (e errParseDuration) Usage() string { return usageDuration }
|
||||
|
||||
const usageEscape = `
|
||||
A '\' inside a "-delimited string is interpreted as an escape character.
|
||||
@@ -227,3 +240,37 @@ Instead use """ or ''' to split strings over multiple lines:
|
||||
string = """Hello,
|
||||
world!"""
|
||||
`
|
||||
|
||||
const usageIntOverflow = `
|
||||
This number is too large; this may be an error in the TOML, but it can also be a
|
||||
bug in the program that uses too small of an integer.
|
||||
|
||||
The maximum and minimum values are:
|
||||
|
||||
size │ lowest │ highest
|
||||
───────┼────────────────┼──────────
|
||||
int8 │ -128 │ 127
|
||||
int16 │ -32,768 │ 32,767
|
||||
int32 │ -2,147,483,648 │ 2,147,483,647
|
||||
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
|
||||
uint8 │ 0 │ 255
|
||||
uint16 │ 0 │ 65535
|
||||
uint32 │ 0 │ 4294967295
|
||||
uint64 │ 0 │ 1.8 × 10¹⁸
|
||||
|
||||
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
|
||||
`
|
||||
|
||||
const usageDuration = `
|
||||
A duration must be as "number<unit>", without any spaces. Valid units are:
|
||||
|
||||
ns nanoseconds (billionth of a second)
|
||||
us, µs microseconds (millionth of a second)
|
||||
ms milliseconds (thousands of a second)
|
||||
s seconds
|
||||
m minutes
|
||||
h hours
|
||||
|
||||
You can combine multiple units; for example "5m10s" for 5 minutes and 10
|
||||
seconds.
|
||||
`
|
||||
|
||||
17
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
17
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@@ -82,7 +82,7 @@ func (lx *lexer) nextItem() item {
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
//fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
//fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -716,7 +716,17 @@ func lexMultilineString(lx *lexer) stateFn {
|
||||
if lx.peek() == '"' {
|
||||
/// Check if we already lexed 5 's; if so we have 6 now, and
|
||||
/// that's just too many man!
|
||||
if strings.HasSuffix(lx.current(), `"""""`) {
|
||||
///
|
||||
/// Second check is for the edge case:
|
||||
///
|
||||
/// two quotes allowed.
|
||||
/// vv
|
||||
/// """lol \""""""
|
||||
/// ^^ ^^^---- closing three
|
||||
/// escaped
|
||||
///
|
||||
/// But ugly, but it works
|
||||
if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) {
|
||||
return lx.errorf(`unexpected '""""""'`)
|
||||
}
|
||||
lx.backup()
|
||||
@@ -807,8 +817,7 @@ func lexMultilineRawString(lx *lexer) stateFn {
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
if isNL(lx.next()) { /// \ escaping newline.
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
|
||||
7
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
7
vendor/github.com/BurntSushi/toml/meta.go
generated
vendored
@@ -12,10 +12,11 @@ import (
|
||||
type MetaData struct {
|
||||
context Key // Used only during decoding.
|
||||
|
||||
keyInfo map[string]keyInfo
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]struct{}
|
||||
data []byte // Input file; for errors.
|
||||
}
|
||||
|
||||
// IsDefined reports if the key exists in the TOML data.
|
||||
@@ -50,8 +51,8 @@ func (md *MetaData) IsDefined(key ...string) bool {
|
||||
// Type will return the empty string if given an empty key or a key that does
|
||||
// not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
if typ, ok := md.types[Key(key).String()]; ok {
|
||||
return typ.typeString()
|
||||
if ki, ok := md.keyInfo[Key(key).String()]; ok {
|
||||
return ki.tomlType.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
54
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
54
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@@ -16,12 +16,18 @@ type parser struct {
|
||||
currentKey string // Base key name for everything except hashes.
|
||||
pos Position // Current position in the TOML file.
|
||||
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
|
||||
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
||||
mapping map[string]interface{} // Map keyname → key value.
|
||||
types map[string]tomlType // Map keyname → TOML type.
|
||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||
}
|
||||
|
||||
type keyInfo struct {
|
||||
pos Position
|
||||
tomlType tomlType
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -57,8 +63,8 @@ func parse(data string) (p *parser, err error) {
|
||||
}
|
||||
|
||||
p = &parser{
|
||||
keyInfo: make(map[string]keyInfo),
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]struct{}),
|
||||
@@ -74,6 +80,15 @@ func parse(data string) (p *parser, err error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
@@ -94,7 +109,7 @@ func (p *parser) panicf(format string, v ...interface{}) {
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val)
|
||||
if it.typ == itemError {
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
@@ -146,7 +161,7 @@ func (p *parser) topLevel(item item) {
|
||||
p.assertEqual(itemTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.setType("", tomlHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart: // [[ .. ]]
|
||||
name := p.nextPos()
|
||||
@@ -158,7 +173,7 @@ func (p *parser) topLevel(item item) {
|
||||
p.assertEqual(itemArrayTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.setType("", tomlArrayHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart: // key = ..
|
||||
outerContext := p.context
|
||||
@@ -181,8 +196,9 @@ func (p *parser) topLevel(item item) {
|
||||
}
|
||||
|
||||
/// Set value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ)
|
||||
vItem := p.next()
|
||||
val, typ := p.value(vItem, false)
|
||||
p.set(p.currentKey, val, typ, vItem.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Remove the context we added (preserving any context from [tbl] lines).
|
||||
@@ -266,7 +282,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val)
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "int64"})
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
@@ -304,7 +320,7 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "float64"})
|
||||
} else {
|
||||
p.panicItemf(it, "Invalid float value: %q", it.val)
|
||||
}
|
||||
@@ -343,9 +359,8 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
||||
}
|
||||
|
||||
func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
||||
p.setType(p.currentKey, tomlArray)
|
||||
p.setType(p.currentKey, tomlArray, it.pos)
|
||||
|
||||
// p.setType(p.currentKey, typ)
|
||||
var (
|
||||
types []tomlType
|
||||
|
||||
@@ -414,7 +429,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
||||
|
||||
/// Set the value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ)
|
||||
p.set(p.currentKey, val, typ, it.pos)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[p.currentKey] = val
|
||||
|
||||
@@ -533,9 +548,10 @@ func (p *parser) addContext(key Key, array bool) {
|
||||
}
|
||||
|
||||
// set calls setValue and setType.
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType) {
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
|
||||
p.setValue(key, val)
|
||||
p.setType(key, typ)
|
||||
p.setType(key, typ, pos)
|
||||
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
@@ -599,7 +615,7 @@ func (p *parser) setValue(key string, value interface{}) {
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
func (p *parser) setType(key string, typ tomlType, pos Position) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
keyContext = append(keyContext, p.context...)
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
@@ -611,7 +627,7 @@ func (p *parser) setType(key string, typ tomlType) {
|
||||
if len(keyContext) == 0 {
|
||||
keyContext = Key{""}
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos}
|
||||
}
|
||||
|
||||
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
|
||||
@@ -619,7 +635,7 @@ func (p *parser) setType(key string, typ tomlType) {
|
||||
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
|
||||
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
||||
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
||||
func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
|
||||
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
|
||||
func (p *parser) addImplicitContext(key Key) {
|
||||
p.addImplicit(key)
|
||||
p.addContext(key, false)
|
||||
@@ -710,10 +726,8 @@ func (p *parser) replaceEscapes(it item, str string) string {
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case ' ', '\t':
|
||||
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
|
||||
188
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
188
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@@ -4,17 +4,22 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/cow"
|
||||
"github.com/Microsoft/hcsshim/internal/hcs/schema1"
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/jobobject"
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -28,7 +33,8 @@ type System struct {
|
||||
waitBlock chan struct{}
|
||||
waitError error
|
||||
exitError error
|
||||
os, typ string
|
||||
os, typ, owner string
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
func newSystem(id string) *System {
|
||||
@@ -38,6 +44,11 @@ func newSystem(id string) *System {
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation detail for silo naming, this should NOT be relied upon very heavily.
|
||||
func siloNameFmt(containerID string) string {
|
||||
return fmt.Sprintf(`\Container_%s`, containerID)
|
||||
}
|
||||
|
||||
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
||||
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
|
||||
operation := "hcs::CreateComputeSystem"
|
||||
@@ -127,6 +138,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error {
|
||||
}
|
||||
computeSystem.typ = strings.ToLower(props.SystemType)
|
||||
computeSystem.os = strings.ToLower(props.RuntimeOSType)
|
||||
computeSystem.owner = strings.ToLower(props.Owner)
|
||||
if computeSystem.os == "" && computeSystem.typ == "container" {
|
||||
// Pre-RS5 HCS did not return the OS, but it only supported containers
|
||||
// that ran Windows.
|
||||
@@ -195,7 +207,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
|
||||
if err != nil {
|
||||
return makeSystemError(computeSystem, operation, err, events)
|
||||
}
|
||||
|
||||
computeSystem.startTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -324,11 +336,115 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr
|
||||
return properties, nil
|
||||
}
|
||||
|
||||
// PropertiesV2 returns the requested container properties targeting a V2 schema container.
|
||||
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
// queryInProc handles querying for container properties without reaching out to HCS. `props`
|
||||
// will be updated to contain any data returned from the queries present in `types`. If any properties
|
||||
// failed to be queried they will be tallied up and returned in as the first return value. Failures on
|
||||
// query are NOT considered errors; the only failure case for this method is if the containers job object
|
||||
// cannot be opened.
|
||||
func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) {
|
||||
// In the future we can make use of some new functionality in the HCS that allows you
|
||||
// to pass a job object for HCS to use for the container. Currently, the only way we'll
|
||||
// be able to open the job/silo is if we're running as SYSTEM.
|
||||
jobOptions := &jobobject.Options{
|
||||
UseNTVariant: true,
|
||||
Name: siloNameFmt(computeSystem.id),
|
||||
}
|
||||
job, err := jobobject.Open(ctx, jobOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer job.Close()
|
||||
|
||||
var fallbackQueryTypes []hcsschema.PropertyType
|
||||
for _, propType := range types {
|
||||
switch propType {
|
||||
case hcsschema.PTStatistics:
|
||||
// Handle a bad caller asking for the same type twice. No use in re-querying if this is
|
||||
// filled in already.
|
||||
if props.Statistics == nil {
|
||||
props.Statistics, err = computeSystem.statisticsInProc(job)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("failed to get statistics in-proc")
|
||||
|
||||
fallbackQueryTypes = append(fallbackQueryTypes, propType)
|
||||
}
|
||||
}
|
||||
default:
|
||||
fallbackQueryTypes = append(fallbackQueryTypes, propType)
|
||||
}
|
||||
}
|
||||
|
||||
return fallbackQueryTypes, nil
|
||||
}
|
||||
|
||||
// statisticsInProc emulates what HCS does to grab statistics for a given container with a small
|
||||
// change to make grabbing the private working set total much more efficient.
|
||||
func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) {
|
||||
// Start timestamp for these stats before we grab them to match HCS
|
||||
timestamp := time.Now()
|
||||
|
||||
memInfo, err := job.QueryMemoryStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
processorInfo, err := job.QueryProcessorStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storageInfo, err := job.QueryStorageStats()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation
|
||||
// with the class SystemProcessInformation which returns an array containing system information for *every*
|
||||
// process running on the machine. They then grab the pids that are running in the container and filter down
|
||||
// the entries in the array to only what's running in that silo and start tallying up the total. This doesn't
|
||||
// work well as performance should get worse if more processess are running on the machine in general and not
|
||||
// just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored
|
||||
// as well which isn't great and is wasted work to fetch.
|
||||
//
|
||||
// HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private
|
||||
// working set ourselves and ask for everything else seperately. The optimization we can make here is
|
||||
// to open the silo ourselves and do the same queries for the rest of the info, as well as calculating
|
||||
// the private working set in a more efficient manner by:
|
||||
//
|
||||
// 1. Find the pids running in the silo
|
||||
// 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access)
|
||||
// 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters
|
||||
// 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2.
|
||||
privateWorkingSet, err := job.QueryPrivateWorkingSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &hcsschema.Statistics{
|
||||
Timestamp: timestamp,
|
||||
ContainerStartTime: computeSystem.startTime,
|
||||
Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100,
|
||||
Memory: &hcsschema.MemoryStats{
|
||||
MemoryUsageCommitBytes: memInfo.JobMemory,
|
||||
MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed,
|
||||
MemoryUsagePrivateWorkingSetBytes: privateWorkingSet,
|
||||
},
|
||||
Processor: &hcsschema.ProcessorStats{
|
||||
RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime),
|
||||
RuntimeUser100ns: uint64(processorInfo.TotalUserTime),
|
||||
TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime),
|
||||
},
|
||||
Storage: &hcsschema.StorageStats{
|
||||
ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount),
|
||||
ReadSizeBytes: storageInfo.ReadStats.TotalSize,
|
||||
WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount),
|
||||
WriteSizeBytes: storageInfo.WriteStats.TotalSize,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types.
|
||||
func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) {
|
||||
operation := "hcs::System::PropertiesV2"
|
||||
|
||||
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
|
||||
@@ -345,12 +461,66 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem
|
||||
if propertiesJSON == "" {
|
||||
return nil, ErrUnexpectedValue
|
||||
}
|
||||
properties := &hcsschema.Properties{}
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
|
||||
props := &hcsschema.Properties{}
|
||||
if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil {
|
||||
return nil, makeSystemError(computeSystem, operation, err, nil)
|
||||
}
|
||||
|
||||
return properties, nil
|
||||
return props, nil
|
||||
}
|
||||
|
||||
// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system.
|
||||
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) {
|
||||
computeSystem.handleLock.RLock()
|
||||
defer computeSystem.handleLock.RUnlock()
|
||||
|
||||
// Let HCS tally up the total for VM based queries instead of querying ourselves.
|
||||
if computeSystem.typ != "container" {
|
||||
return computeSystem.hcsPropertiesV2Query(ctx, types)
|
||||
}
|
||||
|
||||
// Define a starter Properties struct with the default fields returned from every
|
||||
// query. Owner is only returned from Statistics but it's harmless to include.
|
||||
properties := &hcsschema.Properties{
|
||||
Id: computeSystem.id,
|
||||
SystemType: computeSystem.typ,
|
||||
RuntimeOsType: computeSystem.os,
|
||||
Owner: computeSystem.owner,
|
||||
}
|
||||
|
||||
logEntry := log.G(ctx)
|
||||
// First lets try and query ourselves without reaching to HCS. If any of the queries fail
|
||||
// we'll take note and fallback to querying HCS for any of the failed types.
|
||||
fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types)
|
||||
if err == nil && len(fallbackTypes) == 0 {
|
||||
return properties, nil
|
||||
} else if err != nil {
|
||||
logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
|
||||
fallbackTypes = types
|
||||
}
|
||||
|
||||
logEntry.WithFields(logrus.Fields{
|
||||
logfields.ContainerID: computeSystem.id,
|
||||
"propertyTypes": fallbackTypes,
|
||||
}).Info("falling back to HCS for property type queries")
|
||||
|
||||
hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Now add in anything that we might have successfully queried in process.
|
||||
if properties.Statistics != nil {
|
||||
hcsProperties.Statistics = properties.Statistics
|
||||
hcsProperties.Owner = properties.Owner
|
||||
}
|
||||
|
||||
// For future support for querying processlist in-proc as well.
|
||||
if properties.ProcessList != nil {
|
||||
hcsProperties.ProcessList = properties.ProcessList
|
||||
}
|
||||
|
||||
return hcsProperties, nil
|
||||
}
|
||||
|
||||
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
|
||||
|
||||
9
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
generated
vendored
@@ -21,10 +21,11 @@ const (
|
||||
)
|
||||
|
||||
type NatPolicy struct {
|
||||
Type PolicyType `json:"Type"`
|
||||
Protocol string `json:",omitempty"`
|
||||
InternalPort uint16 `json:",omitempty"`
|
||||
ExternalPort uint16 `json:",omitempty"`
|
||||
Type PolicyType `json:"Type"`
|
||||
Protocol string `json:",omitempty"`
|
||||
InternalPort uint16 `json:",omitempty"`
|
||||
ExternalPort uint16 `json:",omitempty"`
|
||||
ExternalPortReserved bool `json:",omitempty"`
|
||||
}
|
||||
|
||||
type QosPolicy struct {
|
||||
|
||||
111
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
Normal file
111
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
package jobobject
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/log"
|
||||
"github.com/Microsoft/hcsshim/internal/queue"
|
||||
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
var (
|
||||
ioInitOnce sync.Once
|
||||
initIOErr error
|
||||
// Global iocp handle that will be re-used for every job object
|
||||
ioCompletionPort windows.Handle
|
||||
// Mapping of job handle to queue to place notifications in.
|
||||
jobMap sync.Map
|
||||
)
|
||||
|
||||
// MsgAllProcessesExited is a type representing a message that every process in a job has exited.
|
||||
type MsgAllProcessesExited struct{}
|
||||
|
||||
// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently.
|
||||
// This should not be treated as an error.
|
||||
type MsgUnimplemented struct{}
|
||||
|
||||
// pollIOCP polls the io completion port forever.
|
||||
func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
|
||||
var (
|
||||
overlapped uintptr
|
||||
code uint32
|
||||
key uintptr
|
||||
)
|
||||
|
||||
for {
|
||||
err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Error("failed to poll for job object message")
|
||||
continue
|
||||
}
|
||||
if val, ok := jobMap.Load(key); ok {
|
||||
msq, ok := val.(*queue.MessageQueue)
|
||||
if !ok {
|
||||
log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map")
|
||||
continue
|
||||
}
|
||||
notification, err := parseMessage(code, overlapped)
|
||||
if err != nil {
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"code": code,
|
||||
"overlapped": overlapped,
|
||||
}).Warn("failed to parse job object message")
|
||||
continue
|
||||
}
|
||||
if err := msq.Enqueue(notification); err == queue.ErrQueueClosed {
|
||||
// Write will only return an error when the queue is closed.
|
||||
// The only time a queue would ever be closed is when we call `Close` on
|
||||
// the job it belongs to which also removes it from the jobMap, so something
|
||||
// went wrong here. We can't return as this is reading messages for all jobs
|
||||
// so just log it and move on.
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"code": code,
|
||||
"overlapped": overlapped,
|
||||
}).Warn("tried to write to a closed queue")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
log.G(ctx).Warn("received a message for a job not present in the mapping")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseMessage(code uint32, overlapped uintptr) (interface{}, error) {
|
||||
// Check code and parse out relevant information related to that notification
|
||||
// that we care about. For now all we handle is the message that all processes
|
||||
// in the job have exited.
|
||||
switch code {
|
||||
case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
|
||||
return MsgAllProcessesExited{}, nil
|
||||
// Other messages for completeness and a check to make sure that if we fall
|
||||
// into the default case that this is a code we don't know how to handle.
|
||||
case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME:
|
||||
case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME:
|
||||
case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT:
|
||||
case winapi.JOB_OBJECT_MSG_NEW_PROCESS:
|
||||
case winapi.JOB_OBJECT_MSG_EXIT_PROCESS:
|
||||
case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS:
|
||||
case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT:
|
||||
case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT:
|
||||
case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT:
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown job notification type: %d", code)
|
||||
}
|
||||
return MsgUnimplemented{}, nil
|
||||
}
|
||||
|
||||
// Assigns an IO completion port to get notified of events for the registered job
|
||||
// object.
|
||||
func attachIOCP(job windows.Handle, iocp windows.Handle) error {
|
||||
info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{
|
||||
CompletionKey: job,
|
||||
CompletionPort: iocp,
|
||||
}
|
||||
_, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
|
||||
return err
|
||||
}
|
||||
538
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
Normal file
538
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
Normal file
@@ -0,0 +1,538 @@
|
||||
package jobobject
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/queue"
|
||||
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// This file provides higher level constructs for the win32 job object API.
|
||||
// Most of the core creation and management functions are already present in "golang.org/x/sys/windows"
|
||||
// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information
|
||||
// structs and associated limit flags. Whatever is not present from the job object API
|
||||
// in golang.org/x/sys/windows is located in /internal/winapi.
|
||||
//
|
||||
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects
|
||||
|
||||
// JobObject is a high level wrapper around a Windows job object. Holds a handle to
|
||||
// the job, a queue to receive iocp notifications about the lifecycle
|
||||
// of the job and a mutex for synchronized handle access.
|
||||
type JobObject struct {
|
||||
handle windows.Handle
|
||||
mq *queue.MessageQueue
|
||||
handleLock sync.RWMutex
|
||||
}
|
||||
|
||||
// JobLimits represents the resource constraints that can be applied to a job object.
|
||||
type JobLimits struct {
|
||||
CPULimit uint32
|
||||
CPUWeight uint32
|
||||
MemoryLimitInBytes uint64
|
||||
MaxIOPS int64
|
||||
MaxBandwidth int64
|
||||
}
|
||||
|
||||
type CPURateControlType uint32
|
||||
|
||||
const (
|
||||
WeightBased CPURateControlType = iota
|
||||
RateBased
|
||||
)
|
||||
|
||||
// Processor resource controls
|
||||
const (
|
||||
cpuLimitMin = 1
|
||||
cpuLimitMax = 10000
|
||||
cpuWeightMin = 1
|
||||
cpuWeightMax = 9
|
||||
)
|
||||
|
||||
var (
|
||||
ErrAlreadyClosed = errors.New("the handle has already been closed")
|
||||
ErrNotRegistered = errors.New("job is not registered to receive notifications")
|
||||
)
|
||||
|
||||
// Options represents the set of configurable options when making or opening a job object.
|
||||
type Options struct {
|
||||
// `Name` specifies the name of the job object if a named job object is desired.
|
||||
Name string
|
||||
// `Notifications` specifies if the job will be registered to receive notifications.
|
||||
// Defaults to false.
|
||||
Notifications bool
|
||||
// `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
|
||||
// Defaults to false.
|
||||
UseNTVariant bool
|
||||
// `IOTracking` enables tracking I/O statistics on the job object. More specifically this
|
||||
// calls SetInformationJobObject with the JobObjectIoAttribution class.
|
||||
EnableIOTracking bool
|
||||
}
|
||||
|
||||
// Create creates a job object.
|
||||
//
|
||||
// If options.Name is an empty string, the job will not be assigned a name.
|
||||
//
|
||||
// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`.
|
||||
//
|
||||
// If `options` is nil, use default option values.
|
||||
//
|
||||
// Returns a JobObject structure and an error if there is one.
|
||||
func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
||||
if options == nil {
|
||||
options = &Options{}
|
||||
}
|
||||
|
||||
var jobName *winapi.UnicodeString
|
||||
if options.Name != "" {
|
||||
jobName, err = winapi.NewUnicodeString(options.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var jobHandle windows.Handle
|
||||
if options.UseNTVariant {
|
||||
oa := winapi.ObjectAttributes{
|
||||
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
|
||||
ObjectName: jobName,
|
||||
Attributes: 0,
|
||||
}
|
||||
status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
|
||||
if status != 0 {
|
||||
return nil, winapi.RtlNtStatusToDosError(status)
|
||||
}
|
||||
} else {
|
||||
var jobNameBuf *uint16
|
||||
if jobName != nil && jobName.Buffer != nil {
|
||||
jobNameBuf = jobName.Buffer
|
||||
}
|
||||
jobHandle, err = windows.CreateJobObject(nil, jobNameBuf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
windows.Close(jobHandle)
|
||||
}
|
||||
}()
|
||||
|
||||
job := &JobObject{
|
||||
handle: jobHandle,
|
||||
}
|
||||
|
||||
// If the IOCP we'll be using to receive messages for all jobs hasn't been
|
||||
// created, create it and start polling.
|
||||
if options.Notifications {
|
||||
mq, err := setupNotifications(ctx, job)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.mq = mq
|
||||
}
|
||||
|
||||
if options.EnableIOTracking {
|
||||
if err := enableIOTracking(jobHandle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return job, nil
|
||||
}
|
||||
|
||||
// Open opens an existing job object with name provided in `options`. If no name is provided
|
||||
// return an error since we need to know what job object to open.
|
||||
//
|
||||
// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`.
|
||||
//
|
||||
// Returns a JobObject structure and an error if there is one.
|
||||
func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
||||
if options == nil || (options != nil && options.Name == "") {
|
||||
return nil, errors.New("no job object name specified to open")
|
||||
}
|
||||
|
||||
unicodeJobName, err := winapi.NewUnicodeString(options.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var jobHandle windows.Handle
|
||||
if options != nil && options.UseNTVariant {
|
||||
oa := winapi.ObjectAttributes{
|
||||
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
|
||||
ObjectName: unicodeJobName,
|
||||
Attributes: 0,
|
||||
}
|
||||
status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
|
||||
if status != 0 {
|
||||
return nil, winapi.RtlNtStatusToDosError(status)
|
||||
}
|
||||
} else {
|
||||
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
windows.Close(jobHandle)
|
||||
}
|
||||
}()
|
||||
|
||||
job := &JobObject{
|
||||
handle: jobHandle,
|
||||
}
|
||||
|
||||
// If the IOCP we'll be using to receive messages for all jobs hasn't been
|
||||
// created, create it and start polling.
|
||||
if options != nil && options.Notifications {
|
||||
mq, err := setupNotifications(ctx, job)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.mq = mq
|
||||
}
|
||||
|
||||
return job, nil
|
||||
}
|
||||
|
||||
// helper function to setup notifications for creating/opening a job object
|
||||
func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
ioInitOnce.Do(func() {
|
||||
h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
|
||||
if err != nil {
|
||||
initIOErr = err
|
||||
return
|
||||
}
|
||||
ioCompletionPort = h
|
||||
go pollIOCP(ctx, h)
|
||||
})
|
||||
|
||||
if initIOErr != nil {
|
||||
return nil, initIOErr
|
||||
}
|
||||
|
||||
mq := queue.NewMessageQueue()
|
||||
jobMap.Store(uintptr(job.handle), mq)
|
||||
if err := attachIOCP(job.handle, ioCompletionPort); err != nil {
|
||||
jobMap.Delete(uintptr(job.handle))
|
||||
return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err)
|
||||
}
|
||||
return mq, nil
|
||||
}
|
||||
|
||||
// PollNotification will poll for a job object notification. This call should only be called once
|
||||
// per job (ideally in a goroutine loop) and will block if there is not a notification ready.
|
||||
// This call will return immediately with error `ErrNotRegistered` if the job was not registered
|
||||
// to receive notifications during `Create`. Internally, messages will be queued and there
|
||||
// is no worry of messages being dropped.
|
||||
func (job *JobObject) PollNotification() (interface{}, error) {
|
||||
if job.mq == nil {
|
||||
return nil, ErrNotRegistered
|
||||
}
|
||||
return job.mq.Dequeue()
|
||||
}
|
||||
|
||||
// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
|
||||
// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process
|
||||
// has already started running.
|
||||
func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if err := attrList.Update(
|
||||
winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST,
|
||||
unsafe.Pointer(&job.handle),
|
||||
unsafe.Sizeof(job.handle),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to update proc thread attributes for job object: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the job object handle.
|
||||
func (job *JobObject) Close() error {
|
||||
job.handleLock.Lock()
|
||||
defer job.handleLock.Unlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if err := windows.Close(job.handle); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if job.mq != nil {
|
||||
job.mq.Close()
|
||||
}
|
||||
// Handles now invalid so if the map entry to receive notifications for this job still
|
||||
// exists remove it so we can stop receiving notifications.
|
||||
if _, ok := jobMap.Load(uintptr(job.handle)); ok {
|
||||
jobMap.Delete(uintptr(job.handle))
|
||||
}
|
||||
|
||||
job.handle = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// Assign assigns a process to the job object.
|
||||
func (job *JobObject) Assign(pid uint32) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if pid == 0 {
|
||||
return errors.New("invalid pid: 0")
|
||||
}
|
||||
hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer windows.Close(hProc)
|
||||
return windows.AssignProcessToJobObject(job.handle, hProc)
|
||||
}
|
||||
|
||||
// Terminate terminates the job, essentially calls TerminateProcess on every process in the
|
||||
// job.
|
||||
func (job *JobObject) Terminate(exitCode uint32) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
return windows.TerminateJobObject(job.handle, exitCode)
|
||||
}
|
||||
|
||||
// Pids returns all of the process IDs in the job object.
|
||||
func (job *JobObject) Pids() ([]uint32, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{}
|
||||
err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectBasicProcessIdList,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
)
|
||||
|
||||
// This is either the case where there is only one process or no processes in
|
||||
// the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList
|
||||
// is 1 and just return this, otherwise return an empty slice.
|
||||
if err == nil {
|
||||
if info.NumberOfProcessIdsInList == 1 {
|
||||
return []uint32{uint32(info.ProcessIdList[0])}, nil
|
||||
}
|
||||
// Return empty slice instead of nil to play well with the caller of this.
|
||||
// Do not return an error if no processes are running inside the job
|
||||
return []uint32{}, nil
|
||||
}
|
||||
|
||||
if err != winapi.ERROR_MORE_DATA {
|
||||
return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err)
|
||||
}
|
||||
|
||||
jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1))
|
||||
buf := make([]byte, jobBasicProcessIDListSize)
|
||||
if err = winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectBasicProcessIdList,
|
||||
unsafe.Pointer(&buf[0]),
|
||||
uint32(len(buf)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err)
|
||||
}
|
||||
|
||||
bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0]))
|
||||
pids := make([]uint32, bufInfo.NumberOfProcessIdsInList)
|
||||
for i, bufPid := range bufInfo.AllPids() {
|
||||
pids[i] = uint32(bufPid)
|
||||
}
|
||||
return pids, nil
|
||||
}
|
||||
|
||||
// QueryMemoryStats gets the memory stats for the job object.
|
||||
func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectMemoryUsageInformation,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to query for job object memory stats: %w", err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// QueryProcessorStats gets the processor stats for the job object.
|
||||
func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectBasicAccountingInformation,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to query for job object process stats: %w", err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error
|
||||
// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking()
|
||||
// hasn't been called since creation of the job.
|
||||
func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
|
||||
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
|
||||
}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
winapi.JobObjectIoAttribution,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("failed to query for job object storage stats: %w", err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
|
||||
// private working set for every process running in the job.
|
||||
func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
|
||||
pids, err := job.Pids()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
openAndQueryWorkingSet := func(pid uint32) (uint64, error) {
|
||||
h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
|
||||
if err != nil {
|
||||
// Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a
|
||||
// case where one of the pids in the job exited before we open.
|
||||
return 0, nil
|
||||
}
|
||||
defer func() {
|
||||
_ = windows.Close(h)
|
||||
}()
|
||||
// Check if the process is actually running in the job still. There's a small chance
|
||||
// that the process could have exited and had its pid re-used between grabbing the pids
|
||||
// in the job and opening the handle to it above.
|
||||
var inJob int32
|
||||
if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil {
|
||||
// This shouldn't fail unless we have incorrect access rights which we control
|
||||
// here so probably best to error out if this failed.
|
||||
return 0, err
|
||||
}
|
||||
// Don't report stats for this process as it's not running in the job. This shouldn't be
|
||||
// an error condition though.
|
||||
if inJob == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
var vmCounters winapi.VM_COUNTERS_EX2
|
||||
status := winapi.NtQueryInformationProcess(
|
||||
h,
|
||||
winapi.ProcessVmCounters,
|
||||
unsafe.Pointer(&vmCounters),
|
||||
uint32(unsafe.Sizeof(vmCounters)),
|
||||
nil,
|
||||
)
|
||||
if !winapi.NTSuccess(status) {
|
||||
return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status))
|
||||
}
|
||||
return uint64(vmCounters.PrivateWorkingSetSize), nil
|
||||
}
|
||||
|
||||
var jobWorkingSetSize uint64
|
||||
for _, pid := range pids {
|
||||
workingSet, err := openAndQueryWorkingSet(pid)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
jobWorkingSetSize += workingSet
|
||||
}
|
||||
|
||||
return jobWorkingSetSize, nil
|
||||
}
|
||||
|
||||
// SetIOTracking enables IO tracking for processes in the job object.
|
||||
// This enables use of the QueryStorageStats method.
|
||||
func (job *JobObject) SetIOTracking() error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
return enableIOTracking(job.handle)
|
||||
}
|
||||
|
||||
func enableIOTracking(job windows.Handle) error {
|
||||
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
|
||||
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
|
||||
}
|
||||
if _, err := windows.SetInformationJobObject(
|
||||
job,
|
||||
winapi.JobObjectIoAttribution,
|
||||
uintptr(unsafe.Pointer(&info)),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to enable IO tracking on job object: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
315
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
Normal file
315
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
Normal file
@@ -0,0 +1,315 @@
|
||||
package jobobject
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
const (
|
||||
memoryLimitMax uint64 = 0xffffffffffffffff
|
||||
)
|
||||
|
||||
func isFlagSet(flag, controlFlags uint32) bool {
|
||||
return (flag & controlFlags) == flag
|
||||
}
|
||||
|
||||
// SetResourceLimits sets resource limits on the job object (cpu, memory, storage).
|
||||
func (job *JobObject) SetResourceLimits(limits *JobLimits) error {
|
||||
// Go through and check what limits were specified and apply them to the job.
|
||||
if limits.MemoryLimitInBytes != 0 {
|
||||
if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil {
|
||||
return fmt.Errorf("failed to set job object memory limit: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if limits.CPULimit != 0 {
|
||||
if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil {
|
||||
return fmt.Errorf("failed to set job object cpu limit: %w", err)
|
||||
}
|
||||
} else if limits.CPUWeight != 0 {
|
||||
if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil {
|
||||
return fmt.Errorf("failed to set job object cpu limit: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 {
|
||||
if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil {
|
||||
return fmt.Errorf("failed to set io limit on job object: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate
|
||||
// all processes in the job on the last open handle being closed.
|
||||
func (job *JobObject) SetTerminateOnLastHandleClose() error {
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
|
||||
return job.setExtendedInformation(info)
|
||||
}
|
||||
|
||||
// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`.
|
||||
func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error {
|
||||
if memoryLimitInBytes >= memoryLimitMax {
|
||||
return errors.New("memory limit specified exceeds the max size")
|
||||
}
|
||||
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
info.JobMemoryLimit = uintptr(memoryLimitInBytes)
|
||||
info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY
|
||||
return job.setExtendedInformation(info)
|
||||
}
|
||||
|
||||
// GetMemoryLimit gets the memory limit in bytes of the job object.
|
||||
func (job *JobObject) GetMemoryLimit() (uint64, error) {
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(info.JobMemoryLimit), nil
|
||||
}
|
||||
|
||||
// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to
|
||||
// `rateControlValue` for the job object.
|
||||
func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error {
|
||||
cpuInfo, err := job.getCPURateControlInformation()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch rateControlType {
|
||||
case WeightBased:
|
||||
if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax {
|
||||
return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue)
|
||||
}
|
||||
cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED
|
||||
cpuInfo.Value = rateControlValue
|
||||
case RateBased:
|
||||
if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax {
|
||||
return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue)
|
||||
}
|
||||
cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP
|
||||
cpuInfo.Value = rateControlValue
|
||||
default:
|
||||
return errors.New("invalid job object cpu rate control type")
|
||||
}
|
||||
return job.setCPURateControlInfo(cpuInfo)
|
||||
}
|
||||
|
||||
// GetCPULimit gets the cpu limits for the job object.
|
||||
// `rateControlType` is used to indicate what type of cpu limit to query for.
|
||||
func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) {
|
||||
info, err := job.getCPURateControlInformation()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) {
|
||||
return 0, errors.New("the job does not have cpu rate control enabled")
|
||||
}
|
||||
|
||||
switch rateControlType {
|
||||
case WeightBased:
|
||||
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) {
|
||||
return 0, errors.New("cannot get cpu weight for job object without cpu weight option set")
|
||||
}
|
||||
case RateBased:
|
||||
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) {
|
||||
return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set")
|
||||
}
|
||||
default:
|
||||
return 0, errors.New("invalid job object cpu rate control type")
|
||||
}
|
||||
return info.Value, nil
|
||||
}
|
||||
|
||||
// SetCPUAffinity sets the processor affinity for the job object.
|
||||
// The affinity is passed in as a bitmask.
|
||||
func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY)
|
||||
info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
|
||||
return job.setExtendedInformation(info)
|
||||
}
|
||||
|
||||
// GetCPUAffinity gets the processor affinity for the job object.
|
||||
// The returned affinity is a bitmask.
|
||||
func (job *JobObject) GetCPUAffinity() (uint64, error) {
|
||||
info, err := job.getExtendedInformation()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint64(info.BasicLimitInformation.Affinity), nil
|
||||
}
|
||||
|
||||
// SetIOLimit sets the IO limits specified on the job object.
|
||||
func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error {
|
||||
ioInfo, err := job.getIOLimit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE
|
||||
if maxBandwidth != 0 {
|
||||
ioInfo.MaxBandwidth = maxBandwidth
|
||||
}
|
||||
if maxIOPS != 0 {
|
||||
ioInfo.MaxIops = maxIOPS
|
||||
}
|
||||
return job.setIORateControlInfo(ioInfo)
|
||||
}
|
||||
|
||||
// GetIOMaxBandwidthLimit gets the max bandwidth for the job object.
|
||||
func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) {
|
||||
info, err := job.getIOLimit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return info.MaxBandwidth, nil
|
||||
}
|
||||
|
||||
// GetIOMaxIopsLimit gets the max iops for the job object.
|
||||
func (job *JobObject) GetIOMaxIopsLimit() (int64, error) {
|
||||
info, err := job.getIOLimit()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return info.MaxIops, nil
|
||||
}
|
||||
|
||||
// Helper function for getting a job object's extended information.
|
||||
func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectExtendedLimitInformation,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("query %v returned error: %w", info, err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// Helper function for getting a job object's CPU rate control information.
|
||||
func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{}
|
||||
if err := winapi.QueryInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectCpuRateControlInformation,
|
||||
unsafe.Pointer(&info),
|
||||
uint32(unsafe.Sizeof(info)),
|
||||
nil,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("query %v returned error: %w", info, err)
|
||||
}
|
||||
return &info, nil
|
||||
}
|
||||
|
||||
// Helper function for setting a job object's extended information.
|
||||
func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if _, err := windows.SetInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectExtendedLimitInformation,
|
||||
uintptr(unsafe.Pointer(info)),
|
||||
uint32(unsafe.Sizeof(*info)),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function for querying job handle for IO limit information.
|
||||
func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return nil, ErrAlreadyClosed
|
||||
}
|
||||
|
||||
ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{}
|
||||
var blockCount uint32 = 1
|
||||
|
||||
if _, err := winapi.QueryIoRateControlInformationJobObject(
|
||||
job.handle,
|
||||
nil,
|
||||
&ioInfo,
|
||||
&blockCount,
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err)
|
||||
}
|
||||
|
||||
if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) {
|
||||
return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo)
|
||||
}
|
||||
return ioInfo, nil
|
||||
}
|
||||
|
||||
// Helper function for setting a job object's IO rate control information.
|
||||
func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
|
||||
if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil {
|
||||
return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper function for setting a job object's CPU rate control information.
|
||||
func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error {
|
||||
job.handleLock.RLock()
|
||||
defer job.handleLock.RUnlock()
|
||||
|
||||
if job.handle == 0 {
|
||||
return ErrAlreadyClosed
|
||||
}
|
||||
if _, err := windows.SetInformationJobObject(
|
||||
job.handle,
|
||||
windows.JobObjectCpuRateControlInformation,
|
||||
uintptr(unsafe.Pointer(cpuInfo)),
|
||||
uint32(unsafe.Sizeof(cpuInfo)),
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
92
vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
generated
vendored
Normal file
92
vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
generated
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
package queue
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var ErrQueueClosed = errors.New("the queue is closed for reading and writing")
|
||||
|
||||
// MessageQueue represents a threadsafe message queue to be used to retrieve or
|
||||
// write messages to.
|
||||
type MessageQueue struct {
|
||||
m *sync.RWMutex
|
||||
c *sync.Cond
|
||||
messages []interface{}
|
||||
closed bool
|
||||
}
|
||||
|
||||
// NewMessageQueue returns a new MessageQueue.
|
||||
func NewMessageQueue() *MessageQueue {
|
||||
m := &sync.RWMutex{}
|
||||
return &MessageQueue{
|
||||
m: m,
|
||||
c: sync.NewCond(m),
|
||||
messages: []interface{}{},
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue writes `msg` to the queue.
|
||||
func (mq *MessageQueue) Enqueue(msg interface{}) error {
|
||||
mq.m.Lock()
|
||||
defer mq.m.Unlock()
|
||||
|
||||
if mq.closed {
|
||||
return ErrQueueClosed
|
||||
}
|
||||
mq.messages = append(mq.messages, msg)
|
||||
// Signal a waiter that there is now a value available in the queue.
|
||||
mq.c.Signal()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dequeue will read a value from the queue and remove it. If the queue
|
||||
// is empty, this will block until the queue is closed or a value gets enqueued.
|
||||
func (mq *MessageQueue) Dequeue() (interface{}, error) {
|
||||
mq.m.Lock()
|
||||
defer mq.m.Unlock()
|
||||
|
||||
for !mq.closed && mq.size() == 0 {
|
||||
mq.c.Wait()
|
||||
}
|
||||
|
||||
// We got woken up, check if it's because the queue got closed.
|
||||
if mq.closed {
|
||||
return nil, ErrQueueClosed
|
||||
}
|
||||
|
||||
val := mq.messages[0]
|
||||
mq.messages[0] = nil
|
||||
mq.messages = mq.messages[1:]
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// Size returns the size of the queue.
|
||||
func (mq *MessageQueue) Size() int {
|
||||
mq.m.RLock()
|
||||
defer mq.m.RUnlock()
|
||||
return mq.size()
|
||||
}
|
||||
|
||||
// Nonexported size check to check if the queue is empty inside already locked functions.
|
||||
func (mq *MessageQueue) size() int {
|
||||
return len(mq.messages)
|
||||
}
|
||||
|
||||
// Close closes the queue for future writes or reads. Any attempts to read or write from the
|
||||
// queue after close will return ErrQueueClosed. This is safe to call multiple times.
|
||||
func (mq *MessageQueue) Close() {
|
||||
mq.m.Lock()
|
||||
defer mq.m.Unlock()
|
||||
|
||||
// Already closed, noop
|
||||
if mq.closed {
|
||||
return
|
||||
}
|
||||
|
||||
mq.messages = nil
|
||||
mq.closed = true
|
||||
// If there's anybody currently waiting on a value from Dequeue, we need to
|
||||
// broadcast so the read(s) can return ErrQueueClosed.
|
||||
mq.c.Broadcast()
|
||||
}
|
||||
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
generated
vendored
@@ -1,3 +0,0 @@
|
||||
package winapi
|
||||
|
||||
//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error)
|
||||
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
@@ -24,7 +24,10 @@ const (
|
||||
// Access rights for creating or opening job objects.
|
||||
//
|
||||
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
|
||||
const JOB_OBJECT_ALL_ACCESS = 0x1F001F
|
||||
const (
|
||||
JOB_OBJECT_QUERY = 0x0004
|
||||
JOB_OBJECT_ALL_ACCESS = 0x1F001F
|
||||
)
|
||||
|
||||
// IO limit flags
|
||||
//
|
||||
@@ -93,7 +96,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct {
|
||||
|
||||
// AllPids returns all the process Ids in the job object.
|
||||
func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr {
|
||||
return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList]
|
||||
return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList]
|
||||
}
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information
|
||||
@@ -162,7 +165,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
// PBOOL Result
|
||||
// );
|
||||
//
|
||||
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob
|
||||
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob
|
||||
|
||||
// BOOL QueryInformationJobObject(
|
||||
// HANDLE hJob,
|
||||
@@ -172,7 +175,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
||||
// LPDWORD lpReturnLength
|
||||
// );
|
||||
//
|
||||
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
||||
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
||||
|
||||
// HANDLE OpenJobObjectW(
|
||||
// DWORD dwDesiredAccess,
|
||||
|
||||
57
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
57
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
@@ -6,3 +6,60 @@ const (
|
||||
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
|
||||
PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
|
||||
)
|
||||
|
||||
// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures.
|
||||
const ProcessVmCounters = 3
|
||||
|
||||
// __kernel_entry NTSTATUS NtQueryInformationProcess(
|
||||
// [in] HANDLE ProcessHandle,
|
||||
// [in] PROCESSINFOCLASS ProcessInformationClass,
|
||||
// [out] PVOID ProcessInformation,
|
||||
// [in] ULONG ProcessInformationLength,
|
||||
// [out, optional] PULONG ReturnLength
|
||||
// );
|
||||
//
|
||||
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
|
||||
|
||||
// typedef struct _VM_COUNTERS_EX
|
||||
// {
|
||||
// SIZE_T PeakVirtualSize;
|
||||
// SIZE_T VirtualSize;
|
||||
// ULONG PageFaultCount;
|
||||
// SIZE_T PeakWorkingSetSize;
|
||||
// SIZE_T WorkingSetSize;
|
||||
// SIZE_T QuotaPeakPagedPoolUsage;
|
||||
// SIZE_T QuotaPagedPoolUsage;
|
||||
// SIZE_T QuotaPeakNonPagedPoolUsage;
|
||||
// SIZE_T QuotaNonPagedPoolUsage;
|
||||
// SIZE_T PagefileUsage;
|
||||
// SIZE_T PeakPagefileUsage;
|
||||
// SIZE_T PrivateUsage;
|
||||
// } VM_COUNTERS_EX, *PVM_COUNTERS_EX;
|
||||
//
|
||||
type VM_COUNTERS_EX struct {
|
||||
PeakVirtualSize uintptr
|
||||
VirtualSize uintptr
|
||||
PageFaultCount uint32
|
||||
PeakWorkingSetSize uintptr
|
||||
WorkingSetSize uintptr
|
||||
QuotaPeakPagedPoolUsage uintptr
|
||||
QuotaPagedPoolUsage uintptr
|
||||
QuotaPeakNonPagedPoolUsage uintptr
|
||||
QuotaNonPagedPoolUsage uintptr
|
||||
PagefileUsage uintptr
|
||||
PeakPagefileUsage uintptr
|
||||
PrivateUsage uintptr
|
||||
}
|
||||
|
||||
// typedef struct _VM_COUNTERS_EX2
|
||||
// {
|
||||
// VM_COUNTERS_EX CountersEx;
|
||||
// SIZE_T PrivateWorkingSetSize;
|
||||
// SIZE_T SharedCommitUsage;
|
||||
// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2;
|
||||
//
|
||||
type VM_COUNTERS_EX2 struct {
|
||||
CountersEx VM_COUNTERS_EX
|
||||
PrivateWorkingSetSize uintptr
|
||||
SharedCommitUsage uintptr
|
||||
}
|
||||
|
||||
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
generated
vendored
@@ -12,7 +12,8 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
|
||||
// ULONG SystemInformationLength,
|
||||
// PULONG ReturnLength
|
||||
// );
|
||||
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
|
||||
//
|
||||
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
|
||||
|
||||
type SYSTEM_PROCESS_INFORMATION struct {
|
||||
NextEntryOffset uint32 // ULONG
|
||||
|
||||
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
generated
vendored
@@ -2,4 +2,4 @@
|
||||
// be thought of as an extension to golang.org/x/sys/windows.
|
||||
package winapi
|
||||
|
||||
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
||||
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
||||
|
||||
26
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
26
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
@@ -50,7 +50,6 @@ var (
|
||||
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
|
||||
procSearchPathW = modkernel32.NewProc("SearchPathW")
|
||||
procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread")
|
||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
||||
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
|
||||
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
|
||||
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
|
||||
@@ -61,6 +60,7 @@ var (
|
||||
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||
procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess")
|
||||
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
|
||||
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
||||
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
||||
@@ -100,7 +100,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
||||
return
|
||||
}
|
||||
|
||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
@@ -140,19 +140,7 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes,
|
||||
return
|
||||
}
|
||||
|
||||
func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = syscall.EINVAL
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) {
|
||||
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
@@ -164,7 +152,7 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result
|
||||
return
|
||||
}
|
||||
|
||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
|
||||
if r1 == 0 {
|
||||
if e1 != 0 {
|
||||
@@ -256,6 +244,12 @@ func LocalFree(ptr uintptr) {
|
||||
return
|
||||
}
|
||||
|
||||
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||
r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
|
||||
status = uint32(r0)
|
||||
return
|
||||
}
|
||||
|
||||
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
||||
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
||||
amount = uint32(r0)
|
||||
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@@ -1,316 +0,0 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||
// Convert map to slice to avoid slow iterations on a map.
|
||||
// ƒ is called on the hot path, so converting the map to a slice
|
||||
// beforehand results in significant CPU savings.
|
||||
targets := targetMapToSlice(targetMap)
|
||||
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for _, t := range targets {
|
||||
if t.quantile*s.n <= r {
|
||||
f = (2 * t.epsilon * r) / t.quantile
|
||||
} else {
|
||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
type target struct {
|
||||
quantile float64
|
||||
epsilon float64
|
||||
}
|
||||
|
||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||
targets := make([]target, 0, len(targetMap))
|
||||
|
||||
for quantile, epsilon := range targetMap {
|
||||
t := target{
|
||||
quantile: quantile,
|
||||
epsilon: epsilon,
|
||||
}
|
||||
targets = append(targets, t)
|
||||
}
|
||||
|
||||
return targets
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
@@ -1,22 +0,0 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,69 +0,0 @@
|
||||
# xxhash
|
||||
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| --- | --- | --- |
|
||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||
the following commands under Go 1.11.2:
|
||||
|
||||
```
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -1,235 +0,0 @@
|
||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||
func New() *Digest {
|
||||
var d Digest
|
||||
d.Reset()
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
func (d *Digest) Reset() {
|
||||
d.v1 = prime1v + prime2
|
||||
d.v2 = prime2
|
||||
d.v3 = 0
|
||||
d.v4 = -prime1v
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(d.mem[d.n:], b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[32-d.n:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
i, end := 0, d.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(d.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(d.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
||||
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
||||
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -1,215 +0,0 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the d pointer.
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ d+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
|
||||
RET
|
||||
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@@ -1,76 +0,0 @@
|
||||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
||||
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
||||
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -1,57 +0,0 @@
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
||||
67
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
67
vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go
generated
vendored
@@ -32,6 +32,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -1313,6 +1314,18 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingController) {
|
||||
),
|
||||
wantFailOnLossLess: true,
|
||||
},
|
||||
{
|
||||
name: "hardlink should be replaced to the destination entry",
|
||||
in: tarOf(
|
||||
dir("foo/"),
|
||||
file("foo/foo1", "test"),
|
||||
link("foolink", "foo/foo1"),
|
||||
),
|
||||
wantNumGz: 4, // dir, foo1 + link, TOC, footer
|
||||
want: checks(
|
||||
mustSameEntry("foo/foo1", "foolink"),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -1730,6 +1743,60 @@ func hasEntryOwner(entry string, owner owner) stargzCheck {
|
||||
})
|
||||
}
|
||||
|
||||
func mustSameEntry(files ...string) stargzCheck {
|
||||
return stargzCheckFn(func(t *testing.T, r *Reader) {
|
||||
var first *TOCEntry
|
||||
for _, f := range files {
|
||||
if first == nil {
|
||||
var ok bool
|
||||
first, ok = r.Lookup(f)
|
||||
if !ok {
|
||||
t.Errorf("unknown first file on Lookup: %q", f)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Test Lookup
|
||||
e, ok := r.Lookup(f)
|
||||
if !ok {
|
||||
t.Errorf("unknown file on Lookup: %q", f)
|
||||
return
|
||||
}
|
||||
if e != first {
|
||||
t.Errorf("Lookup: %+v(%p) != %+v(%p)", e, e, first, first)
|
||||
return
|
||||
}
|
||||
|
||||
// Test LookupChild
|
||||
pe, ok := r.Lookup(filepath.Dir(filepath.Clean(f)))
|
||||
if !ok {
|
||||
t.Errorf("failed to get parent of %q", f)
|
||||
return
|
||||
}
|
||||
e, ok = pe.LookupChild(filepath.Base(filepath.Clean(f)))
|
||||
if !ok {
|
||||
t.Errorf("failed to get %q as the child of %+v", f, pe)
|
||||
return
|
||||
}
|
||||
if e != first {
|
||||
t.Errorf("LookupChild: %+v(%p) != %+v(%p)", e, e, first, first)
|
||||
return
|
||||
}
|
||||
|
||||
// Test ForeachChild
|
||||
pe.ForeachChild(func(baseName string, e *TOCEntry) bool {
|
||||
if baseName == filepath.Base(filepath.Clean(f)) {
|
||||
if e != first {
|
||||
t.Errorf("ForeachChild: %+v(%p) != %+v(%p)", e, e, first, first)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func tarOf(s ...tarEntry) []tarEntry { return s }
|
||||
|
||||
type tarEntry interface {
|
||||
|
||||
3
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
3
vendor/github.com/containerd/stargz-snapshotter/estargz/types.go
generated
vendored
@@ -159,7 +159,8 @@ type TOCEntry struct {
|
||||
|
||||
// NumLink is the number of entry names pointing to this entry.
|
||||
// Zero means one name references this entry.
|
||||
NumLink int
|
||||
// This field is calculated during runtime and not recorded in TOC JSON.
|
||||
NumLink int `json:"-"`
|
||||
|
||||
// Xattrs are the extended attribute for the entry.
|
||||
Xattrs map[string][]byte `json:"xattrs,omitempty"`
|
||||
|
||||
43
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
43
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
@@ -3,6 +3,7 @@ package auth
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
@@ -14,7 +15,6 @@ import (
|
||||
"github.com/containers/image/v5/pkg/docker/config"
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
terminal "golang.org/x/term"
|
||||
)
|
||||
@@ -26,8 +26,8 @@ func GetDefaultAuthFile() string {
|
||||
if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" {
|
||||
return authfile
|
||||
}
|
||||
if auth_env := os.Getenv("DOCKER_CONFIG"); auth_env != "" {
|
||||
return filepath.Join(auth_env, "config.json")
|
||||
if authEnv := os.Getenv("DOCKER_CONFIG"); authEnv != "" {
|
||||
return filepath.Join(authEnv, "config.json")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -39,7 +39,7 @@ func CheckAuthFile(authfile string) error {
|
||||
return nil
|
||||
}
|
||||
if _, err := os.Stat(authfile); err != nil {
|
||||
return errors.Wrap(err, "checking authfile")
|
||||
return fmt.Errorf("checking authfile: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -97,12 +97,12 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
||||
|
||||
authConfig, err := config.GetCredentials(systemContext, key)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get credentials")
|
||||
return fmt.Errorf("get credentials: %w", err)
|
||||
}
|
||||
|
||||
if opts.GetLoginSet {
|
||||
if authConfig.Username == "" {
|
||||
return errors.Errorf("not logged into %s", key)
|
||||
return fmt.Errorf("not logged into %s", key)
|
||||
}
|
||||
fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username)
|
||||
return nil
|
||||
@@ -139,7 +139,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
||||
|
||||
username, password, err := getUserAndPass(opts, password, authConfig.Username)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting username and password")
|
||||
return fmt.Errorf("getting username and password: %w", err)
|
||||
}
|
||||
|
||||
if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil {
|
||||
@@ -158,9 +158,9 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
||||
}
|
||||
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
|
||||
logrus.Debugf("error logging into %q: %v", key, unauthorized)
|
||||
return errors.Errorf("error logging into %q: invalid username/password", key)
|
||||
return fmt.Errorf("logging into %q: invalid username/password", key)
|
||||
}
|
||||
return errors.Wrapf(err, "authenticating creds for %q", key)
|
||||
return fmt.Errorf("authenticating creds for %q: %w", key, err)
|
||||
}
|
||||
|
||||
// parseCredentialsKey turns the provided argument into a valid credential key
|
||||
@@ -191,10 +191,10 @@ func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry str
|
||||
// Ideally c/image should provide dedicated validation functionality.
|
||||
ref, err := reference.ParseNormalizedNamed(key)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "parse reference from %q", key)
|
||||
return "", "", fmt.Errorf("parse reference from %q: %w", key, err)
|
||||
}
|
||||
if !reference.IsNameOnly(ref) {
|
||||
return "", "", errors.Errorf("reference %q contains tag or digest", ref.String())
|
||||
return "", "", fmt.Errorf("reference %q contains tag or digest", ref.String())
|
||||
}
|
||||
refRegistry := reference.Domain(ref)
|
||||
if refRegistry != registry { // This should never happen, check just to make sure
|
||||
@@ -232,7 +232,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user
|
||||
}
|
||||
username, err = reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "reading username")
|
||||
return "", "", fmt.Errorf("reading username: %w", err)
|
||||
}
|
||||
// If the user just hit enter, use the displayed user from the
|
||||
// the authentication file. This allows to do a lazy
|
||||
@@ -246,7 +246,7 @@ func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user
|
||||
fmt.Fprint(opts.Stdout, "Password: ")
|
||||
pass, err := terminal.ReadPassword(int(os.Stdin.Fd()))
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "reading password")
|
||||
return "", "", fmt.Errorf("reading password: %w", err)
|
||||
}
|
||||
password = string(pass)
|
||||
fmt.Fprintln(opts.Stdout)
|
||||
@@ -298,14 +298,15 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
|
||||
}
|
||||
|
||||
err = config.RemoveAuthentication(systemContext, key)
|
||||
switch errors.Cause(err) {
|
||||
case nil:
|
||||
if err == nil {
|
||||
fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key)
|
||||
return nil
|
||||
case config.ErrNotLoggedIn:
|
||||
}
|
||||
|
||||
if errors.Is(err, config.ErrNotLoggedIn) {
|
||||
authConfig, err := config.GetCredentials(systemContext, key)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get credentials")
|
||||
return fmt.Errorf("get credentials: %w", err)
|
||||
}
|
||||
|
||||
authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry)
|
||||
@@ -313,10 +314,10 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
|
||||
fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key)
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("Not logged into %s\n", key)
|
||||
default:
|
||||
return errors.Wrapf(err, "logging out of %q", key)
|
||||
return fmt.Errorf("not logged into %s", key)
|
||||
}
|
||||
|
||||
return fmt.Errorf("logging out of %q: %w", key, err)
|
||||
}
|
||||
|
||||
// defaultRegistryWhenUnspecified returns first registry from search list of registry.conf
|
||||
@@ -324,7 +325,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
|
||||
func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) {
|
||||
registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "getting registry from registry.conf, please specify a registry")
|
||||
return "", fmt.Errorf("getting registry from registry.conf, please specify a registry: %w", err)
|
||||
}
|
||||
if len(registriesFromFile) == 0 {
|
||||
return "", errors.New("no registries found in registries.conf, a registry must be provided")
|
||||
|
||||
20
vendor/github.com/containers/common/pkg/capabilities/capabilities.go
generated
vendored
20
vendor/github.com/containers/common/pkg/capabilities/capabilities.go
generated
vendored
@@ -6,11 +6,12 @@ package capabilities
|
||||
// changed significantly to fit the needs of libpod.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
@@ -104,8 +105,8 @@ func AllCapabilities() []string {
|
||||
// NormalizeCapabilities normalizes caps by adding a "CAP_" prefix (if not yet
|
||||
// present).
|
||||
func NormalizeCapabilities(caps []string) ([]string, error) {
|
||||
normalized := make([]string, len(caps))
|
||||
for i, c := range caps {
|
||||
normalized := make([]string, 0, len(caps))
|
||||
for _, c := range caps {
|
||||
c = strings.ToUpper(c)
|
||||
if c == All {
|
||||
normalized = append(normalized, c)
|
||||
@@ -115,9 +116,9 @@ func NormalizeCapabilities(caps []string) ([]string, error) {
|
||||
c = "CAP_" + c
|
||||
}
|
||||
if !stringInSlice(c, capabilityList) {
|
||||
return nil, errors.Wrapf(ErrUnknownCapability, "%q", c)
|
||||
return nil, fmt.Errorf("%q: %w", c, ErrUnknownCapability)
|
||||
}
|
||||
normalized[i] = c
|
||||
normalized = append(normalized, c)
|
||||
}
|
||||
sort.Strings(normalized)
|
||||
return normalized, nil
|
||||
@@ -127,7 +128,7 @@ func NormalizeCapabilities(caps []string) ([]string, error) {
|
||||
func ValidateCapabilities(caps []string) error {
|
||||
for _, c := range caps {
|
||||
if !stringInSlice(c, capabilityList) {
|
||||
return errors.Wrapf(ErrUnknownCapability, "%q", c)
|
||||
return fmt.Errorf("%q: %w", c, ErrUnknownCapability)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -140,8 +141,6 @@ func ValidateCapabilities(caps []string) error {
|
||||
// "ALL" in capAdd adds returns known capabilities
|
||||
// "All" in capDrop returns only the capabilities specified in capAdd
|
||||
func MergeCapabilities(base, adds, drops []string) ([]string, error) {
|
||||
var caps []string
|
||||
|
||||
// Normalize the base capabilities
|
||||
base, err := NormalizeCapabilities(base)
|
||||
if err != nil {
|
||||
@@ -178,17 +177,18 @@ func MergeCapabilities(base, adds, drops []string) ([]string, error) {
|
||||
} else {
|
||||
for _, add := range capAdd {
|
||||
if stringInSlice(add, capDrop) {
|
||||
return nil, errors.Errorf("capability %q cannot be dropped and added", add)
|
||||
return nil, fmt.Errorf("capability %q cannot be dropped and added", add)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, drop := range capDrop {
|
||||
if stringInSlice(drop, capAdd) {
|
||||
return nil, errors.Errorf("capability %q cannot be dropped and added", drop)
|
||||
return nil, fmt.Errorf("capability %q cannot be dropped and added", drop)
|
||||
}
|
||||
}
|
||||
|
||||
caps := make([]string, 0, len(base)+len(capAdd))
|
||||
// Drop any capabilities in capDrop that are in base
|
||||
for _, cap := range base {
|
||||
if stringInSlice(cap, capDrop) {
|
||||
|
||||
2
vendor/github.com/containers/common/pkg/completion/completion.go
generated
vendored
2
vendor/github.com/containers/common/pkg/completion/completion.go
generated
vendored
@@ -51,7 +51,7 @@ func AutocompleteCapabilities(cmd *cobra.Command, args []string, toComplete stri
|
||||
offset = 4
|
||||
}
|
||||
|
||||
var completions []string
|
||||
completions := make([]string, 0, len(caps))
|
||||
for _, cap := range caps {
|
||||
completions = append(completions, convertCase(cap)[offset:])
|
||||
}
|
||||
|
||||
20
vendor/github.com/containers/common/pkg/report/formatter.go
generated
vendored
20
vendor/github.com/containers/common/pkg/report/formatter.go
generated
vendored
@@ -59,16 +59,22 @@ type Formatter struct {
|
||||
func (f *Formatter) Parse(origin Origin, text string) (*Formatter, error) {
|
||||
f.Origin = origin
|
||||
|
||||
// docker tries to be smart and replaces \n with the actual newline character.
|
||||
// For compat we do the same but this will break formats such as '{{printf "\n"}}'
|
||||
// To be backwards compatible with the previous behavior we try to replace and
|
||||
// parse the template. If it fails use the original text and parse again.
|
||||
var normText string
|
||||
switch {
|
||||
case strings.HasPrefix(text, "table "):
|
||||
f.RenderTable = true
|
||||
text = "{{range .}}" + NormalizeFormat(text) + "{{end -}}"
|
||||
normText = "{{range .}}" + NormalizeFormat(text) + "{{end -}}"
|
||||
text = "{{range .}}" + text + "{{end -}}"
|
||||
case OriginUser == origin:
|
||||
text = EnforceRange(NormalizeFormat(text))
|
||||
normText = EnforceRange(NormalizeFormat(text))
|
||||
text = EnforceRange(text)
|
||||
default:
|
||||
text = NormalizeFormat(text)
|
||||
normText = NormalizeFormat(text)
|
||||
}
|
||||
f.text = text
|
||||
|
||||
if f.RenderTable || origin == OriginPodman {
|
||||
tw := tabwriter.NewWriter(f.writer, 12, 2, 2, ' ', tabwriter.StripEscape)
|
||||
@@ -77,10 +83,14 @@ func (f *Formatter) Parse(origin Origin, text string) (*Formatter, error) {
|
||||
f.RenderHeaders = true
|
||||
}
|
||||
|
||||
tmpl, err := f.template.Funcs(template.FuncMap(DefaultFuncs)).Parse(text)
|
||||
tmpl, err := f.template.Funcs(template.FuncMap(DefaultFuncs)).Parse(normText)
|
||||
if err != nil {
|
||||
tmpl, err = f.template.Funcs(template.FuncMap(DefaultFuncs)).Parse(text)
|
||||
f.template = tmpl
|
||||
f.text = text
|
||||
return f, err
|
||||
}
|
||||
f.text = normText
|
||||
f.template = tmpl
|
||||
return f, nil
|
||||
}
|
||||
|
||||
39
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
39
vendor/github.com/containers/common/pkg/retry/retry.go
generated
vendored
@@ -12,25 +12,32 @@ import (
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
errcodev2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// RetryOptions defines the option to retry
|
||||
type RetryOptions struct {
|
||||
MaxRetry int // The number of times to possibly retry
|
||||
Delay time.Duration // The delay to use between retries, if set
|
||||
// Options defines the option to retry.
|
||||
type Options struct {
|
||||
MaxRetry int // The number of times to possibly retry.
|
||||
Delay time.Duration // The delay to use between retries, if set.
|
||||
}
|
||||
|
||||
// RetryIfNecessary retries the operation in exponential backoff with the retryOptions
|
||||
func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions *RetryOptions) error {
|
||||
// RetryOptions is deprecated, use Options.
|
||||
type RetryOptions = Options // nolint:revive
|
||||
|
||||
// RetryIfNecessary deprecated function use IfNecessary.
|
||||
func RetryIfNecessary(ctx context.Context, operation func() error, options *Options) error { // nolint:revive
|
||||
return IfNecessary(ctx, operation, options)
|
||||
}
|
||||
|
||||
// IfNecessary retries the operation in exponential backoff with the retry Options.
|
||||
func IfNecessary(ctx context.Context, operation func() error, options *Options) error {
|
||||
err := operation()
|
||||
for attempt := 0; err != nil && isRetryable(err) && attempt < retryOptions.MaxRetry; attempt++ {
|
||||
for attempt := 0; err != nil && isRetryable(err) && attempt < options.MaxRetry; attempt++ {
|
||||
delay := time.Duration(int(math.Pow(2, float64(attempt)))) * time.Second
|
||||
if retryOptions.Delay != 0 {
|
||||
delay = retryOptions.Delay
|
||||
if options.Delay != 0 {
|
||||
delay = options.Delay
|
||||
}
|
||||
logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, retryOptions.MaxRetry, err)
|
||||
logrus.Warnf("Failed, retrying in %s ... (%d/%d). Error: %v", delay, attempt+1, options.MaxRetry, err)
|
||||
select {
|
||||
case <-time.After(delay):
|
||||
break
|
||||
@@ -43,8 +50,6 @@ func RetryIfNecessary(ctx context.Context, operation func() error, retryOptions
|
||||
}
|
||||
|
||||
func isRetryable(err error) bool {
|
||||
err = errors.Cause(err)
|
||||
|
||||
switch err {
|
||||
case nil:
|
||||
return false
|
||||
@@ -91,6 +96,14 @@ func isRetryable(err error) bool {
|
||||
}
|
||||
}
|
||||
return true
|
||||
case net.Error:
|
||||
if e.Timeout() {
|
||||
return true
|
||||
}
|
||||
if unwrappable, ok := e.(unwrapper); ok {
|
||||
err = unwrappable.Unwrap()
|
||||
return isRetryable(err)
|
||||
}
|
||||
case unwrapper: // Test this last, because various error types might implement .Unwrap()
|
||||
err = e.Unwrap()
|
||||
return isRetryable(err)
|
||||
|
||||
11
vendor/github.com/containers/image/v5/copy/blob.go
generated
vendored
11
vendor/github.com/containers/image/v5/copy/blob.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -36,7 +35,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(stream.reader, srcInfo.Digest)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, perrors.Wrapf(err, "preparing to verify blob %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, fmt.Errorf("preparing to verify blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
stream.reader = digestingReader
|
||||
|
||||
@@ -107,7 +106,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
|
||||
}
|
||||
uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, perrors.Wrap(err, "writing blob")
|
||||
return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
|
||||
}
|
||||
|
||||
uploadedInfo.Annotations = stream.info.Annotations
|
||||
@@ -126,7 +125,7 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
|
||||
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
|
||||
_, err := io.Copy(io.Discard, originalLayerReader)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, perrors.Wrapf(err, "reading input blob %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, fmt.Errorf("reading input blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,8 +164,8 @@ type errorAnnotationReader struct {
|
||||
// Read annotates the error happened during read
|
||||
func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
|
||||
n, err = r.reader.Read(b)
|
||||
if err != io.EOF {
|
||||
return n, perrors.Wrapf(err, "happened during read")
|
||||
if err != nil && err != io.EOF {
|
||||
return n, fmt.Errorf("happened during read: %w", err)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
3
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -28,7 +27,7 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
||||
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
|
||||
format, decompressor, reader, err := compression.DetectCompressionFormat(stream.reader) // We could skip this in some cases, but let's keep the code path uniform
|
||||
if err != nil {
|
||||
return bpDetectCompressionStepData{}, perrors.Wrapf(err, "reading blob %s", srcInfo.Digest)
|
||||
return bpDetectCompressionStepData{}, fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
stream.reader = reader
|
||||
|
||||
|
||||
136
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
136
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@@ -19,7 +19,6 @@ import (
|
||||
"github.com/containers/image/v5/internal/imagesource"
|
||||
"github.com/containers/image/v5/internal/pkg/platform"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
internalsig "github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
@@ -30,7 +29,6 @@ import (
|
||||
encconfig "github.com/containers/ocicrypt/config"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbauerster/mpb/v7"
|
||||
"golang.org/x/sync/semaphore"
|
||||
@@ -208,29 +206,37 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
publicDest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "initializing destination %s", transports.ImageName(destRef))
|
||||
return nil, fmt.Errorf("initializing destination %s: %w", transports.ImageName(destRef), err)
|
||||
}
|
||||
dest := imagedestination.FromPublic(publicDest)
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = perrors.Wrapf(retErr, " (dest: %v)", err)
|
||||
if retErr != nil {
|
||||
retErr = fmt.Errorf(" (dest: %v): %w", err, retErr)
|
||||
} else {
|
||||
retErr = fmt.Errorf(" (dest: %v)", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
publicRawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "initializing source %s", transports.ImageName(srcRef))
|
||||
return nil, fmt.Errorf("initializing source %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
rawSource := imagesource.FromPublic(publicRawSource)
|
||||
defer func() {
|
||||
if err := rawSource.Close(); err != nil {
|
||||
retErr = perrors.Wrapf(retErr, " (src: %v)", err)
|
||||
if retErr != nil {
|
||||
retErr = fmt.Errorf(" (src: %v): %w", err, retErr)
|
||||
} else {
|
||||
retErr = fmt.Errorf(" (src: %v)", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// If reportWriter is not a TTY (e.g., when piping to a file), do not
|
||||
// print the progress bars to avoid long and hard to parse output.
|
||||
// createProgressBar() will print a single line instead.
|
||||
// Instead use printCopyInfo() to print single line "Copying ..." messages.
|
||||
progressOutput := reportWriter
|
||||
if !isTTY(reportWriter) {
|
||||
progressOutput = io.Discard
|
||||
@@ -281,7 +287,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
return nil, fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
|
||||
if !multiImage {
|
||||
@@ -294,21 +300,21 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
// matches the current system to copy, and copy it.
|
||||
mfest, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "reading manifest for %s", transports.ImageName(srcRef))
|
||||
return nil, fmt.Errorf("reading manifest for %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
manifestList, err := manifest.ListFromBlob(mfest, manifestType)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "parsing primary manifest as list for %s", transports.ImageName(srcRef))
|
||||
return nil, fmt.Errorf("parsing primary manifest as list for %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
instanceDigest, err := manifestList.ChooseInstance(options.SourceCtx) // try to pick one that matches options.SourceCtx
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
return nil, fmt.Errorf("choosing an image from manifest list %s: %w", transports.ImageName(srcRef), err)
|
||||
}
|
||||
logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest)
|
||||
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
||||
|
||||
if copiedManifest, _, _, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, nil); err != nil {
|
||||
return nil, perrors.Wrap(err, "copying system image from manifest list")
|
||||
return nil, fmt.Errorf("copying system image from manifest list: %w", err)
|
||||
}
|
||||
} else { /* options.ImageListSelection == CopyAllImages or options.ImageListSelection == CopySpecificImages, */
|
||||
// If we were asked to copy multiple images and can't, that's an error.
|
||||
@@ -328,7 +334,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
}
|
||||
|
||||
if err := c.dest.Commit(ctx, unparsedToplevel); err != nil {
|
||||
return nil, perrors.Wrap(err, "committing the finished image")
|
||||
return nil, fmt.Errorf("committing the finished image: %w", err)
|
||||
}
|
||||
|
||||
return copiedManifest, nil
|
||||
@@ -355,7 +361,7 @@ func supportsMultipleImages(dest types.ImageDestination) bool {
|
||||
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
|
||||
srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
|
||||
if err != nil {
|
||||
return false, nil, "", "", perrors.Wrapf(err, "calculating manifest digest")
|
||||
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
|
||||
}
|
||||
|
||||
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
|
||||
@@ -372,7 +378,7 @@ func compareImageDestinationManifestEqual(ctx context.Context, options *Options,
|
||||
|
||||
destManifestDigest, err := manifest.Digest(destManifest)
|
||||
if err != nil {
|
||||
return false, nil, "", "", perrors.Wrapf(err, "calculating manifest digest")
|
||||
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
|
||||
@@ -390,31 +396,19 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
// Parse the list and get a copy of the original value after it's re-encoded.
|
||||
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "reading manifest list")
|
||||
return nil, fmt.Errorf("reading manifest list: %w", err)
|
||||
}
|
||||
originalList, err := manifest.ListFromBlob(manifestList, manifestType)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "parsing manifest list %q", string(manifestList))
|
||||
return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
|
||||
}
|
||||
updatedList := originalList.Clone()
|
||||
|
||||
// Read and/or clear the set of signatures for this list.
|
||||
var sigs []internalsig.Signature
|
||||
if options.RemoveSignatures {
|
||||
sigs = []internalsig.Signature{}
|
||||
} else {
|
||||
c.Printf("Getting image list signatures\n")
|
||||
s, err := c.rawSource.GetSignaturesWithFormat(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
c.Printf("Checking if image list destination supports signatures\n")
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return nil, perrors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
|
||||
"Getting image list signatures",
|
||||
"Checking if image list destination supports signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
@@ -425,7 +419,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
destIsDigestedReference = true
|
||||
matches, err := manifest.MatchesDigest(manifestList, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "computing digest of source image's manifest")
|
||||
return nil, fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
return nil, errors.New("Digest of source image's manifest would not match destination reference")
|
||||
@@ -457,7 +451,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
}
|
||||
selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "determining manifest list type to write to destination")
|
||||
return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err)
|
||||
}
|
||||
if selectedListType != originalList.MIMEType() {
|
||||
if cannotModifyManifestListReason != "" {
|
||||
@@ -499,7 +493,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
|
||||
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "copying image %d/%d from manifest list", instancesCopied+1, imagesToCopy)
|
||||
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err)
|
||||
}
|
||||
instancesCopied++
|
||||
// Record the result of a possible conversion here.
|
||||
@@ -513,7 +507,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
|
||||
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
|
||||
if err = updatedList.UpdateInstances(updates); err != nil {
|
||||
return nil, perrors.Wrapf(err, "updating manifest list")
|
||||
return nil, fmt.Errorf("updating manifest list: %w", err)
|
||||
}
|
||||
|
||||
// Iterate through supported list types, preferred format first.
|
||||
@@ -528,7 +522,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
if thisListType != updatedList.MIMEType() {
|
||||
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "converting manifest list to list with MIME type %q", thisListType)
|
||||
return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -536,11 +530,11 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
// by serializing them both so that we can compare them.
|
||||
attemptedManifestList, err := attemptedList.Serialize()
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "encoding updated manifest list (%q: %#v)", updatedList.MIMEType(), updatedList.Instances())
|
||||
return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err)
|
||||
}
|
||||
originalManifestList, err := originalList.Serialize()
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "encoding original manifest list for comparison (%q: %#v)", originalList.MIMEType(), originalList.Instances())
|
||||
return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err)
|
||||
}
|
||||
|
||||
// If we can't just use the original value, but we have to change it, flag an error.
|
||||
@@ -587,7 +581,7 @@ func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signatur
|
||||
|
||||
c.Printf("Storing list signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
|
||||
return nil, perrors.Wrap(err, "writing signatures")
|
||||
return nil, fmt.Errorf("writing signatures: %w", err)
|
||||
}
|
||||
|
||||
return manifestList, nil
|
||||
@@ -601,7 +595,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
if err != nil {
|
||||
// FIXME FIXME: How to name a reference for the sub-image?
|
||||
return nil, "", "", perrors.Wrapf(err, "determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
|
||||
}
|
||||
if multiImage {
|
||||
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
@@ -611,11 +605,11 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
// (The multiImage check above only matches the MIME type, which we have received anyway.
|
||||
// Actual parsing of anything should be deferred.)
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return nil, "", "", perrors.Wrap(err, "Source image rejected")
|
||||
return nil, "", "", fmt.Errorf("Source image rejected: %w", err)
|
||||
}
|
||||
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
||||
if err != nil {
|
||||
return nil, "", "", perrors.Wrapf(err, "initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
@@ -627,16 +621,16 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
destIsDigestedReference = true
|
||||
matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, "", "", perrors.Wrapf(err, "computing digest of source image's manifest")
|
||||
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
manifestList, _, err := unparsedToplevel.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, "", "", perrors.Wrapf(err, "reading manifest from source image")
|
||||
return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err)
|
||||
}
|
||||
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, "", "", perrors.Wrapf(err, "computing digest of source image's manifest")
|
||||
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
|
||||
@@ -649,22 +643,11 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
var sigs []internalsig.Signature
|
||||
if options.RemoveSignatures {
|
||||
sigs = []internalsig.Signature{}
|
||||
} else {
|
||||
c.Printf("Getting image source signatures\n")
|
||||
s, err := src.UntrustedSignatures(ctx)
|
||||
if err != nil {
|
||||
return nil, "", "", perrors.Wrap(err, "reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
c.Printf("Checking if image destination supports signatures\n")
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return nil, "", "", perrors.Wrapf(err, "Can not copy signatures to %s", transports.ImageName(c.dest.Reference()))
|
||||
}
|
||||
sigs, err := c.sourceSignatures(ctx, src, options,
|
||||
"Getting image source signatures",
|
||||
"Checking if image destination supports signatures")
|
||||
if err != nil {
|
||||
return nil, "", "", err
|
||||
}
|
||||
|
||||
// Determine if we're allowed to modify the manifest.
|
||||
@@ -778,7 +761,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
|
||||
// so let’s bail out early and with a better error message.
|
||||
if ic.cannotModifyManifestReason != "" {
|
||||
return nil, "", "", perrors.Wrapf(err, "Writing manifest failed and we cannot try conversions: %q", cannotModifyManifestReason)
|
||||
return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
|
||||
}
|
||||
|
||||
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
|
||||
@@ -825,7 +808,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
|
||||
return nil, "", "", perrors.Wrap(err, "writing signatures")
|
||||
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
|
||||
}
|
||||
|
||||
return manifestBytes, retManifestType, retManifestDigest, nil
|
||||
@@ -845,11 +828,11 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return perrors.Wrapf(err, "parsing image configuration")
|
||||
return fmt.Errorf("parsing image configuration: %w", err)
|
||||
}
|
||||
wantedPlatforms, err := platform.WantedPlatforms(sys)
|
||||
if err != nil {
|
||||
return perrors.Wrapf(err, "getting current platform information %#v", sys)
|
||||
return fmt.Errorf("getting current platform information %#v: %w", sys, err)
|
||||
}
|
||||
|
||||
options := newOrderedSet()
|
||||
@@ -1057,13 +1040,13 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
|
||||
}
|
||||
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
|
||||
if err != nil {
|
||||
return nil, "", perrors.Wrap(err, "creating an updated image manifest")
|
||||
return nil, "", fmt.Errorf("creating an updated image manifest: %w", err)
|
||||
}
|
||||
pendingImage = pi
|
||||
}
|
||||
man, _, err := pendingImage.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, "", perrors.Wrap(err, "reading manifest")
|
||||
return nil, "", fmt.Errorf("reading manifest: %w", err)
|
||||
}
|
||||
|
||||
if err := ic.copyConfig(ctx, pendingImage); err != nil {
|
||||
@@ -1080,7 +1063,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanc
|
||||
}
|
||||
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
|
||||
logrus.Debugf("Error %v while writing manifest %q", err, string(man))
|
||||
return nil, "", perrors.Wrapf(err, "writing manifest")
|
||||
return nil, "", fmt.Errorf("writing manifest: %w", err)
|
||||
}
|
||||
return man, manifestDigest, nil
|
||||
}
|
||||
@@ -1100,10 +1083,11 @@ func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
defer progressPool.Wait()
|
||||
bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
|
||||
defer bar.Abort(false)
|
||||
ic.c.printCopyInfo("config", srcInfo)
|
||||
|
||||
configBlob, err := src.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, perrors.Wrapf(err, "reading config blob %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
|
||||
@@ -1155,6 +1139,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
}
|
||||
}
|
||||
|
||||
ic.c.printCopyInfo("blob", srcInfo)
|
||||
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
|
||||
@@ -1183,7 +1169,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
SrcRef: srcRef,
|
||||
})
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", perrors.Wrapf(err, "trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
|
||||
}
|
||||
if reused {
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
@@ -1257,7 +1243,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", perrors.Wrapf(err, "reading blob %s", srcInfo.Digest)
|
||||
return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
@@ -1273,7 +1259,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
return types.BlobInfo{}, "", ctx.Err()
|
||||
case diffIDResult := <-diffIDChan:
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", perrors.Wrap(diffIDResult.err, "computing layer DiffID")
|
||||
return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err)
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
// Don’t record any associations that involve encrypted data. This is a bit crude,
|
||||
|
||||
3
vendor/github.com/containers/image/v5/copy/digesting_reader.go
generated
vendored
3
vendor/github.com/containers/image/v5/copy/digesting_reader.go
generated
vendored
@@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
@@ -48,7 +47,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// Coverage: This should not happen, the hash.Hash interface requires
|
||||
// d.digest.Write to never return an error, and the io.Writer interface
|
||||
// requires n2 == len(input) if no error is returned.
|
||||
return 0, perrors.Wrapf(err, "updating digest during verification: %d vs. %d", n2, n)
|
||||
return 0, fmt.Errorf("updating digest during verification: %d vs. %d: %w", n2, n, err)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
|
||||
8
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
8
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
@@ -1,12 +1,12 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/ocicrypt"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// isOciEncrypted returns a bool indicating if a mediatype is encrypted
|
||||
@@ -41,7 +41,7 @@ func (c *copier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo types.
|
||||
}
|
||||
reader, decryptedDigest, err := ocicrypt.DecryptLayer(c.ociDecryptConfig, stream.reader, desc, false)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "decrypting layer %s", srcInfo.Digest)
|
||||
return nil, fmt.Errorf("decrypting layer %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
stream.reader = reader
|
||||
@@ -92,7 +92,7 @@ func (c *copier) blobPipelineEncryptionStep(stream *sourceStream, toEncrypt bool
|
||||
}
|
||||
reader, finalizer, err := ocicrypt.EncryptLayer(c.ociEncryptConfig, stream.reader, desc)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "encrypting blob %s", srcInfo.Digest)
|
||||
return nil, fmt.Errorf("encrypting blob %s: %w", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
stream.reader = reader
|
||||
@@ -116,7 +116,7 @@ func (d *bpEncryptionStepData) updateCryptoOperationAndAnnotations(operation *ty
|
||||
|
||||
encryptAnnotations, err := d.finalizer()
|
||||
if err != nil {
|
||||
return perrors.Wrap(err, "Unable to finalize encryption")
|
||||
return fmt.Errorf("Unable to finalize encryption: %w", err)
|
||||
}
|
||||
*operation = types.Encrypt
|
||||
if *annotations == nil {
|
||||
|
||||
15
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
15
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
@@ -38,7 +38,8 @@ type progressBar struct {
|
||||
}
|
||||
|
||||
// createProgressBar creates a progressBar in pool. Note that if the copier's reportWriter
|
||||
// is io.Discard, the progress bar's output will be discarded
|
||||
// is io.Discard, the progress bar's output will be discarded. Callers may call printCopyInfo()
|
||||
// to print a single line instead.
|
||||
//
|
||||
// NOTE: Every progress bar created within a progress pool must either successfully
|
||||
// complete or be aborted, or pool.Wait() will hang. That is typically done
|
||||
@@ -95,15 +96,21 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
|
||||
),
|
||||
)
|
||||
}
|
||||
if c.progressOutput == io.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
return &progressBar{
|
||||
Bar: bar,
|
||||
originalSize: info.Size,
|
||||
}
|
||||
}
|
||||
|
||||
// printCopyInfo prints a "Copying ..." message on the copier if the output is
|
||||
// set to `io.Discard`. In that case, the progress bars won't be rendered but
|
||||
// we still want to indicate when blobs and configs are copied.
|
||||
func (c *copier) printCopyInfo(kind string, info types.BlobInfo) {
|
||||
if c.progressOutput == io.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
// mark100PercentComplete marks the progres bars as 100% complete;
|
||||
// it may do so by possibly advancing the current state if it is below the known total.
|
||||
func (bar *progressBar) mark100PercentComplete() {
|
||||
|
||||
34
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
34
vendor/github.com/containers/image/v5/copy/sign.go
generated
vendored
@@ -1,25 +1,51 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
internalsig "github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/signature"
|
||||
"github.com/containers/image/v5/signature/sigstore"
|
||||
"github.com/containers/image/v5/transports"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// sourceSignatures returns signatures from unparsedSource based on options,
|
||||
// and verifies that they can be used (to avoid copying a large image when we
|
||||
// can tell in advance that it would ultimately fail)
|
||||
func (c *copier) sourceSignatures(ctx context.Context, unparsed private.UnparsedImage, options *Options,
|
||||
gettingSignaturesMessage, checkingDestMessage string) ([]internalsig.Signature, error) {
|
||||
var sigs []internalsig.Signature
|
||||
if options.RemoveSignatures {
|
||||
sigs = []internalsig.Signature{}
|
||||
} else {
|
||||
c.Printf("%s\n", gettingSignaturesMessage)
|
||||
s, err := unparsed.UntrustedSignatures(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading signatures: %w", err)
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
c.Printf("%s\n", checkingDestMessage)
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return nil, fmt.Errorf("Can not copy signatures to %s: %w", transports.ImageName(c.dest.Reference()), err)
|
||||
}
|
||||
}
|
||||
return sigs, nil
|
||||
}
|
||||
|
||||
// createSignature creates a new signature of manifest using keyIdentity.
|
||||
func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase string, identity reference.Named) (internalsig.Signature, error) {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "initializing GPG")
|
||||
return nil, fmt.Errorf("initializing GPG: %w", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
return nil, perrors.Wrap(err, "Signing not supported")
|
||||
return nil, fmt.Errorf("Signing not supported: %w", err)
|
||||
}
|
||||
|
||||
if identity != nil {
|
||||
@@ -36,7 +62,7 @@ func (c *copier) createSignature(manifest []byte, keyIdentity string, passphrase
|
||||
c.Printf("Signing manifest using simple signing\n")
|
||||
newSig, err := signature.SignDockerManifestWithOptions(manifest, identity.String(), mech, keyIdentity, &signature.SignOptions{Passphrase: passphrase})
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "creating signature")
|
||||
return nil, fmt.Errorf("creating signature: %w", err)
|
||||
}
|
||||
return internalsig.SimpleSigningFromBlob(newSig), nil
|
||||
}
|
||||
|
||||
11
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
11
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -56,7 +55,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (private.Im
|
||||
// if the contents don't match throw an error
|
||||
dirExists, err := pathExists(ref.resolvedPath)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "checking for path %q", ref.resolvedPath)
|
||||
return nil, fmt.Errorf("checking for path %q: %w", ref.resolvedPath, err)
|
||||
}
|
||||
if dirExists {
|
||||
isEmpty, err := isDirEmpty(ref.resolvedPath)
|
||||
@@ -67,7 +66,7 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (private.Im
|
||||
if !isEmpty {
|
||||
versionExists, err := pathExists(ref.versionPath())
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "checking if path exists %q", ref.versionPath())
|
||||
return nil, fmt.Errorf("checking if path exists %q: %w", ref.versionPath(), err)
|
||||
}
|
||||
if versionExists {
|
||||
contents, err := os.ReadFile(ref.versionPath())
|
||||
@@ -83,20 +82,20 @@ func newImageDestination(sys *types.SystemContext, ref dirReference) (private.Im
|
||||
}
|
||||
// delete directory contents so that only one image is in the directory at a time
|
||||
if err = removeDirContents(ref.resolvedPath); err != nil {
|
||||
return nil, perrors.Wrapf(err, "erasing contents in %q", ref.resolvedPath)
|
||||
return nil, fmt.Errorf("erasing contents in %q: %w", ref.resolvedPath, err)
|
||||
}
|
||||
logrus.Debugf("overwriting existing container image directory %q", ref.resolvedPath)
|
||||
}
|
||||
} else {
|
||||
// create directory if it doesn't exist
|
||||
if err := os.MkdirAll(ref.resolvedPath, 0755); err != nil {
|
||||
return nil, perrors.Wrapf(err, "unable to create directory %q", ref.resolvedPath)
|
||||
return nil, fmt.Errorf("unable to create directory %q: %w", ref.resolvedPath, err)
|
||||
}
|
||||
}
|
||||
// create version file
|
||||
err = os.WriteFile(ref.versionPath(), []byte(version), 0644)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "creating version file %q", ref.versionPath())
|
||||
return nil, fmt.Errorf("creating version file %q: %w", ref.versionPath(), err)
|
||||
}
|
||||
|
||||
d := &dirImageDestination{
|
||||
|
||||
39
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
39
vendor/github.com/containers/image/v5/docker/archive/dest.go
generated
vendored
@@ -3,7 +3,6 @@ package archive
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
@@ -13,8 +12,8 @@ import (
|
||||
type archiveImageDestination struct {
|
||||
*tarfile.Destination // Implements most of types.ImageDestination
|
||||
ref archiveReference
|
||||
archive *tarfile.Writer // Should only be closed if writer != nil
|
||||
writer io.Closer // May be nil if the archive is shared
|
||||
writer *Writer // Should be closed if closeWriter
|
||||
closeWriter bool
|
||||
}
|
||||
|
||||
func newImageDestination(sys *types.SystemContext, ref archiveReference) (private.ImageDestination, error) {
|
||||
@@ -22,29 +21,28 @@ func newImageDestination(sys *types.SystemContext, ref archiveReference) (privat
|
||||
return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||
}
|
||||
|
||||
var archive *tarfile.Writer
|
||||
var writer io.Closer
|
||||
if ref.archiveWriter != nil {
|
||||
archive = ref.archiveWriter
|
||||
writer = nil
|
||||
var writer *Writer
|
||||
var closeWriter bool
|
||||
if ref.writer != nil {
|
||||
writer = ref.writer
|
||||
closeWriter = false
|
||||
} else {
|
||||
fh, err := openArchiveForWriting(ref.path)
|
||||
w, err := NewWriter(sys, ref.path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archive = tarfile.NewWriter(fh)
|
||||
writer = fh
|
||||
writer = w
|
||||
closeWriter = true
|
||||
}
|
||||
tarDest := tarfile.NewDestination(sys, archive, ref.Transport().Name(), ref.ref)
|
||||
tarDest := tarfile.NewDestination(sys, writer.archive, ref.Transport().Name(), ref.ref)
|
||||
if sys != nil && sys.DockerArchiveAdditionalTags != nil {
|
||||
tarDest.AddRepoTags(sys.DockerArchiveAdditionalTags)
|
||||
}
|
||||
return &archiveImageDestination{
|
||||
Destination: tarDest,
|
||||
ref: ref,
|
||||
archive: archive,
|
||||
writer: writer,
|
||||
closeWriter: closeWriter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -56,7 +54,7 @@ func (d *archiveImageDestination) Reference() types.ImageReference {
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *archiveImageDestination) Close() error {
|
||||
if d.writer != nil {
|
||||
if d.closeWriter {
|
||||
return d.writer.Close()
|
||||
}
|
||||
return nil
|
||||
@@ -70,8 +68,15 @@ func (d *archiveImageDestination) Close() error {
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *archiveImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
if d.writer != nil {
|
||||
return d.archive.Close()
|
||||
d.writer.imageCommitted()
|
||||
if d.closeWriter {
|
||||
// We could do this only in .Close(), but failures in .Close() are much more likely to be
|
||||
// ignored by callers that use defer. So, in single-image destinations, try to complete
|
||||
// the archive here.
|
||||
// But if Commit() is never called, let .Close() clean up.
|
||||
err := d.writer.Close()
|
||||
d.closeWriter = false
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
7
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
7
vendor/github.com/containers/image/v5/docker/archive/reader.go
generated
vendored
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Reader manages a single Docker archive, allows listing its contents and accessing
|
||||
@@ -75,7 +74,7 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
|
||||
for _, tag := range image.RepoTags {
|
||||
parsedTag, err := reference.ParseNormalizedNamed(tag)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "Invalid tag %#v in manifest item @%d", tag, imageIndex)
|
||||
return nil, fmt.Errorf("Invalid tag %#v in manifest item @%d: %w", tag, imageIndex, err)
|
||||
}
|
||||
nt, ok := parsedTag.(reference.NamedTagged)
|
||||
if !ok {
|
||||
@@ -83,14 +82,14 @@ func (r *Reader) List() ([][]types.ImageReference, error) {
|
||||
}
|
||||
ref, err := newReference(r.path, nt, -1, r.archive, nil)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "creating a reference for tag %#v in manifest item @%d", tag, imageIndex)
|
||||
return nil, fmt.Errorf("creating a reference for tag %#v in manifest item @%d: %w", tag, imageIndex, err)
|
||||
}
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
ref, err := newReference(r.path, nil, imageIndex, r.archive, nil)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "creating a reference for manifest item @%d", imageIndex)
|
||||
return nil, fmt.Errorf("creating a reference for manifest item @%d: %w", imageIndex, err)
|
||||
}
|
||||
refs = append(refs, ref)
|
||||
}
|
||||
|
||||
11
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
11
vendor/github.com/containers/image/v5/docker/archive/transport.go
generated
vendored
@@ -12,7 +12,6 @@ import (
|
||||
ctrImage "github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -54,7 +53,7 @@ type archiveReference struct {
|
||||
// file, not necessarily path precisely).
|
||||
archiveReader *tarfile.Reader
|
||||
// If not nil, must have been created for path
|
||||
archiveWriter *tarfile.Writer
|
||||
writer *Writer
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
@@ -73,7 +72,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
||||
if len(parts[1]) > 0 && parts[1][0] == '@' {
|
||||
i, err := strconv.Atoi(parts[1][1:])
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "Invalid source index %s", parts[1])
|
||||
return nil, fmt.Errorf("Invalid source index %s: %w", parts[1], err)
|
||||
}
|
||||
if i < 0 {
|
||||
return nil, fmt.Errorf("Invalid source index @%d: must not be negative", i)
|
||||
@@ -82,7 +81,7 @@ func ParseReference(refString string) (types.ImageReference, error) {
|
||||
} else {
|
||||
ref, err := reference.ParseNormalizedNamed(parts[1])
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "docker-archive parsing reference")
|
||||
return nil, fmt.Errorf("docker-archive parsing reference: %w", err)
|
||||
}
|
||||
ref = reference.TagNameOnly(ref)
|
||||
refTagged, isTagged := ref.(reference.NamedTagged)
|
||||
@@ -109,7 +108,7 @@ func NewIndexReference(path string, sourceIndex int) (types.ImageReference, erro
|
||||
// newReference returns a docker archive reference for a path, an optional reference or sourceIndex,
|
||||
// and optionally a tarfile.Reader and/or a tarfile.Writer matching path.
|
||||
func newReference(path string, ref reference.NamedTagged, sourceIndex int,
|
||||
archiveReader *tarfile.Reader, archiveWriter *tarfile.Writer) (types.ImageReference, error) {
|
||||
archiveReader *tarfile.Reader, writer *Writer) (types.ImageReference, error) {
|
||||
if strings.Contains(path, ":") {
|
||||
return nil, fmt.Errorf("Invalid docker-archive: reference: colon in path %q is not supported", path)
|
||||
}
|
||||
@@ -127,7 +126,7 @@ func newReference(path string, ref reference.NamedTagged, sourceIndex int,
|
||||
ref: ref,
|
||||
sourceIndex: sourceIndex,
|
||||
archiveReader: archiveReader,
|
||||
archiveWriter: archiveWriter,
|
||||
writer: writer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
102
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
102
vendor/github.com/containers/image/v5/docker/archive/writer.go
generated
vendored
@@ -2,38 +2,75 @@ package archive
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/types"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Writer manages a single in-progress Docker archive and allows adding images to it.
|
||||
type Writer struct {
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
archive *tarfile.Writer
|
||||
writer io.Closer
|
||||
path string // The original, user-specified path; not the maintained temporary file, if any
|
||||
regularFile bool // path refers to a regular file (e.g. not a pipe)
|
||||
archive *tarfile.Writer
|
||||
writer io.Closer
|
||||
|
||||
// The following state can only be acccessed with the mutex held.
|
||||
mutex sync.Mutex
|
||||
hadCommit bool // At least one successful commit has happened
|
||||
}
|
||||
|
||||
// NewWriter returns a Writer for path.
|
||||
// The caller should call .Close() on the returned object.
|
||||
func NewWriter(sys *types.SystemContext, path string) (*Writer, error) {
|
||||
fh, err := openArchiveForWriting(path)
|
||||
// path can be either a pipe or a regular file
|
||||
// in the case of a pipe, we require that we can open it for write
|
||||
// in the case of a regular file, we don't want to overwrite any pre-existing file
|
||||
// so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
|
||||
// only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
|
||||
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("opening file %q: %w", path, err)
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
fh.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
fhStat, err := fh.Stat()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("statting file %q: %w", path, err)
|
||||
}
|
||||
regularFile := fhStat.Mode().IsRegular()
|
||||
if regularFile && fhStat.Size() != 0 {
|
||||
return nil, errors.New("docker-archive doesn't support modifying existing images")
|
||||
}
|
||||
|
||||
archive := tarfile.NewWriter(fh)
|
||||
|
||||
succeeded = true
|
||||
return &Writer{
|
||||
path: path,
|
||||
archive: archive,
|
||||
writer: fh,
|
||||
path: path,
|
||||
regularFile: regularFile,
|
||||
archive: archive,
|
||||
writer: fh,
|
||||
hadCommit: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// imageCommitted notifies the Writer that at least one image was successfully commited to the stream.
|
||||
func (w *Writer) imageCommitted() {
|
||||
w.mutex.Lock()
|
||||
defer w.mutex.Unlock()
|
||||
w.hadCommit = true
|
||||
}
|
||||
|
||||
// Close writes all outstanding data about images to the archive, and
|
||||
// releases state associated with the Writer, if any.
|
||||
// No more images can be added after this is called.
|
||||
@@ -42,42 +79,25 @@ func (w *Writer) Close() error {
|
||||
if err2 := w.writer.Close(); err2 != nil && err == nil {
|
||||
err = err2
|
||||
}
|
||||
if err == nil && w.regularFile && !w.hadCommit {
|
||||
// Writing to the destination never had a success; delete the destination if we created it.
|
||||
// This is done primarily because we don’t implement adding another image to a pre-existing image, so if we
|
||||
// left a partial archive around (notably because reading from the _source_ has failed), we couldn’t retry without
|
||||
// the caller manually deleting the partial archive. So, delete it instead.
|
||||
//
|
||||
// Archives with at least one successfully created image are left around; they might still be valuable.
|
||||
//
|
||||
// Note a corner case: If there _originally_ was an empty file (which is not a valid archive anyway), this deletes it.
|
||||
// Ideally, if w.regularFile, we should write the full contents to a temporary file and use os.Rename here, only on success.
|
||||
if err2 := os.Remove(w.path); err2 != nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewReference returns an ImageReference that allows adding an image to Writer,
|
||||
// with an optional reference.
|
||||
func (w *Writer) NewReference(destinationRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
return newReference(w.path, destinationRef, -1, nil, w.archive)
|
||||
}
|
||||
|
||||
// openArchiveForWriting opens path for writing a tar archive,
|
||||
// making a few sanity checks.
|
||||
func openArchiveForWriting(path string) (*os.File, error) {
|
||||
// path can be either a pipe or a regular file
|
||||
// in the case of a pipe, we require that we can open it for write
|
||||
// in the case of a regular file, we don't want to overwrite any pre-existing file
|
||||
// so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
|
||||
// only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
|
||||
fh, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "opening file %q", path)
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
fh.Close()
|
||||
}
|
||||
}()
|
||||
fhStat, err := fh.Stat()
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "statting file %q", path)
|
||||
}
|
||||
|
||||
if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
|
||||
return nil, errors.New("docker-archive doesn't support modifying existing images")
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return fh, nil
|
||||
return newReference(w.path, destinationRef, -1, nil, w)
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/daemon/daemon_dest.go
generated
vendored
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/docker/docker/client"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -45,7 +44,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
|
||||
|
||||
c, err := newDockerClient(sys)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "initializing docker engine client")
|
||||
return nil, fmt.Errorf("initializing docker engine client: %w", err)
|
||||
}
|
||||
|
||||
reader, writer := io.Pipe()
|
||||
@@ -87,7 +86,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
|
||||
|
||||
resp, err := c.ImageLoad(ctx, reader, true)
|
||||
if err != nil {
|
||||
err = perrors.Wrap(err, "saving image to docker engine")
|
||||
err = fmt.Errorf("saving image to docker engine: %w", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
6
vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/daemon/daemon_src.go
generated
vendored
@@ -2,11 +2,11 @@ package daemon
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/docker/internal/tarfile"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type daemonImageSource struct {
|
||||
@@ -26,13 +26,13 @@ type daemonImageSource struct {
|
||||
func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonReference) (private.ImageSource, error) {
|
||||
c, err := newDockerClient(sys)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "initializing docker engine client")
|
||||
return nil, fmt.Errorf("initializing docker engine client: %w", err)
|
||||
}
|
||||
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
|
||||
// Either way ImageSave should create a tarball with exactly one image.
|
||||
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "loading image from docker engine")
|
||||
return nil, fmt.Errorf("loading image from docker engine: %w", err)
|
||||
}
|
||||
defer inputStream.Close()
|
||||
|
||||
|
||||
2
vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/daemon/daemon_transport.go
generated
vendored
@@ -53,7 +53,7 @@ func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon)
|
||||
// For daemonImageDestination, it must be a ref, which is NamedTagged.
|
||||
// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest.
|
||||
// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
|
||||
// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.)
|
||||
type daemonReference struct {
|
||||
id digest.Digest
|
||||
ref reference.Named // !reference.IsNameOnly
|
||||
|
||||
@@ -1,46 +1,60 @@
|
||||
package client
|
||||
// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go
|
||||
// Copyright 2022 github.com/distribution/distribution authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/client/auth/challenge"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
)
|
||||
|
||||
// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
// errcode.Errors slice.
|
||||
var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body")
|
||||
var errNoErrorsInBody = errors.New("no error details found in HTTP response body")
|
||||
|
||||
// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is
|
||||
// unexpectedHTTPStatusError is returned when an unexpected HTTP status is
|
||||
// returned when making a registry api call.
|
||||
type UnexpectedHTTPStatusError struct {
|
||||
type unexpectedHTTPStatusError struct {
|
||||
Status string
|
||||
}
|
||||
|
||||
func (e *UnexpectedHTTPStatusError) Error() string {
|
||||
func (e *unexpectedHTTPStatusError) Error() string {
|
||||
return fmt.Sprintf("received unexpected HTTP status: %s", e.Status)
|
||||
}
|
||||
|
||||
// UnexpectedHTTPResponseError is returned when an expected HTTP status code
|
||||
// unexpectedHTTPResponseError is returned when an expected HTTP status code
|
||||
// is returned, but the content was unexpected and failed to be parsed.
|
||||
type UnexpectedHTTPResponseError struct {
|
||||
type unexpectedHTTPResponseError struct {
|
||||
ParseErr error
|
||||
StatusCode int
|
||||
Response []byte
|
||||
}
|
||||
|
||||
func (e *UnexpectedHTTPResponseError) Error() string {
|
||||
func (e *unexpectedHTTPResponseError) Error() string {
|
||||
return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response))
|
||||
}
|
||||
|
||||
func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
||||
var errors errcode.Errors
|
||||
body, err := ioutil.ReadAll(r)
|
||||
body, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -63,7 +77,7 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &errors); err != nil {
|
||||
return &UnexpectedHTTPResponseError{
|
||||
return &unexpectedHTTPResponseError{
|
||||
ParseErr: err,
|
||||
StatusCode: statusCode,
|
||||
Response: body,
|
||||
@@ -73,8 +87,8 @@ func parseHTTPErrorResponse(statusCode int, r io.Reader) error {
|
||||
if len(errors) == 0 {
|
||||
// If there was no error specified in the body, return
|
||||
// UnexpectedHTTPResponseError.
|
||||
return &UnexpectedHTTPResponseError{
|
||||
ParseErr: ErrNoErrorsInBody,
|
||||
return &unexpectedHTTPResponseError{
|
||||
ParseErr: errNoErrorsInBody,
|
||||
StatusCode: statusCode,
|
||||
Response: body,
|
||||
}
|
||||
@@ -94,15 +108,15 @@ func mergeErrors(err1, err2 error) error {
|
||||
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
|
||||
}
|
||||
|
||||
// HandleErrorResponse returns error parsed from HTTP response for an
|
||||
// handleErrorResponse returns error parsed from HTTP response for an
|
||||
// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An
|
||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||
// range.
|
||||
func HandleErrorResponse(resp *http.Response) error {
|
||||
func handleErrorResponse(resp *http.Response) error {
|
||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
for _, c := range challenge.ResponseChallenges(resp) {
|
||||
for _, c := range dockerChallenge.ResponseChallenges(resp) {
|
||||
if c.Scheme == "bearer" {
|
||||
var err errcode.Error
|
||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||
@@ -124,16 +138,10 @@ func HandleErrorResponse(resp *http.Response) error {
|
||||
}
|
||||
}
|
||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||
if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return &UnexpectedHTTPStatusError{Status: resp.Status}
|
||||
}
|
||||
|
||||
// SuccessStatus returns true if the argument is a successful HTTP response
|
||||
// code (in the range 200 - 399 inclusive).
|
||||
func SuccessStatus(status int) bool {
|
||||
return status >= 200 && status <= 399
|
||||
return &unexpectedHTTPStatusError{Status: resp.Status}
|
||||
}
|
||||
128
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
128
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
@@ -27,11 +28,9 @@ import (
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
clientLib "github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -127,8 +126,9 @@ type dockerClient struct {
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
remoteName string
|
||||
actions string
|
||||
resourceType string
|
||||
remoteName string
|
||||
actions string
|
||||
}
|
||||
|
||||
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
|
||||
@@ -218,7 +218,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) {
|
||||
auth, err := config.GetCredentialsForRef(sys, ref.ref)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "getting username and password")
|
||||
return nil, fmt.Errorf("getting username and password: %w", err)
|
||||
}
|
||||
|
||||
sigBase, err := registryConfig.lookasideStorageBaseURL(ref, write)
|
||||
@@ -237,6 +237,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, regis
|
||||
}
|
||||
client.signatureBase = sigBase
|
||||
client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref)
|
||||
client.scope.resourceType = "repository"
|
||||
client.scope.actions = actions
|
||||
client.scope.remoteName = reference.Path(ref.ref)
|
||||
return client, nil
|
||||
@@ -273,7 +274,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||
skipVerify := false
|
||||
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "loading registries")
|
||||
return nil, fmt.Errorf("loading registries: %w", err)
|
||||
}
|
||||
if reg != nil {
|
||||
if reg.Blocked {
|
||||
@@ -301,7 +302,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
|
||||
client, err := newDockerClient(sys, registry, registry)
|
||||
if err != nil {
|
||||
return perrors.Wrapf(err, "creating new docker client")
|
||||
return fmt.Errorf("creating new docker client: %w", err)
|
||||
}
|
||||
client.auth = types.DockerAuthConfig{
|
||||
Username: username,
|
||||
@@ -350,7 +351,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
// We can't use GetCredentialsForRef here because we want to search the whole registry.
|
||||
auth, err := config.GetCredentials(sys, registry)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "getting username and password")
|
||||
return nil, fmt.Errorf("getting username and password: %w", err)
|
||||
}
|
||||
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore
|
||||
@@ -364,7 +365,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
|
||||
client, err := newDockerClient(sys, hostname, registry)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "creating new docker client")
|
||||
return nil, fmt.Errorf("creating new docker client: %w", err)
|
||||
}
|
||||
client.auth = auth
|
||||
if sys != nil {
|
||||
@@ -407,13 +408,13 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
return nil, perrors.Wrapf(err, "couldn't search registry %q", registry)
|
||||
return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err := httpResponseToError(resp, "")
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
return nil, perrors.Wrapf(err, "couldn't search registry %q", registry)
|
||||
return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err)
|
||||
}
|
||||
v2Res := &V2Results{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
|
||||
@@ -475,6 +476,33 @@ func (c *dockerClient) makeRequest(ctx context.Context, method, path string, hea
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
|
||||
}
|
||||
|
||||
// Checks if the auth headers in the response contain an indication of a failed
|
||||
// authorizdation because of an "insufficient_scope" error. If that's the case,
|
||||
// returns the required scope to be used for fetching a new token.
|
||||
func needsRetryWithUpdatedScope(err error, res *http.Response) (bool, *authScope) {
|
||||
if err == nil && res.StatusCode == http.StatusUnauthorized {
|
||||
challenges := parseAuthHeader(res.Header)
|
||||
for _, challenge := range challenges {
|
||||
if challenge.Scheme == "bearer" {
|
||||
if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" {
|
||||
if scope, ok := challenge.Parameters["scope"]; ok && scope != "" {
|
||||
if newScope, err := parseAuthScope(scope); err == nil {
|
||||
return true, newScope
|
||||
} else {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"error": err,
|
||||
"scope": scope,
|
||||
"challenge": challenge,
|
||||
}).Error("Failed to parse the authentication scope from the given challenge")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it,
|
||||
// silently falling back to fallbackDelay if the header is missing or invalid.
|
||||
func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration {
|
||||
@@ -514,6 +542,29 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method stri
|
||||
for {
|
||||
res, err := c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, extraScope)
|
||||
attempts++
|
||||
|
||||
// By default we use pre-defined scopes per operation. In
|
||||
// certain cases, this can fail when our authentication is
|
||||
// insufficient, then we might be getting an error back with a
|
||||
// Www-Authenticate Header indicating an insufficient scope.
|
||||
//
|
||||
// Check for that and update the client challenges to retry after
|
||||
// requesting a new token
|
||||
//
|
||||
// We only try this on the first attempt, to not overload an
|
||||
// already struggling server.
|
||||
// We also cannot retry with a body (stream != nil) as stream
|
||||
// was already read
|
||||
if attempts == 1 && stream == nil && auth != noAuth {
|
||||
if retry, newScope := needsRetryWithUpdatedScope(err, res); retry {
|
||||
logrus.Debug("Detected insufficient_scope error, will retry request with updated scope")
|
||||
// Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently
|
||||
// expect the insufficient_scope errors to happen for those callers. If that changes, we can add support
|
||||
// for more than one extra scope.
|
||||
res, err = c.makeRequestToResolvedURLOnce(ctx, method, url, headers, stream, streamLen, auth, newScope)
|
||||
extraScope = newScope
|
||||
}
|
||||
}
|
||||
if res == nil || res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately
|
||||
stream != nil || // We can't retry with a body (which is not restartable in the general case)
|
||||
attempts == backoffNumIterations {
|
||||
@@ -593,8 +644,18 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
|
||||
cacheKey := ""
|
||||
scopes := []authScope{c.scope}
|
||||
if extraScope != nil {
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below
|
||||
// uses the same separator when formatting a remote request (and because
|
||||
// repository names that we create can't contain colons, and extraScope values
|
||||
// coming from a server come from `parseAuthScope`, which also splits on colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions)
|
||||
if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 {
|
||||
return fmt.Errorf(
|
||||
"Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d",
|
||||
cacheKey,
|
||||
colonCount,
|
||||
)
|
||||
}
|
||||
scopes = append(scopes, *extraScope)
|
||||
}
|
||||
var token bearerToken
|
||||
@@ -649,9 +710,10 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
|
||||
if service, ok := challenge.Parameters["service"]; ok && service != "" {
|
||||
params.Add("service", service)
|
||||
}
|
||||
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
params.Add("grant_type", "refresh_token")
|
||||
@@ -701,8 +763,8 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
||||
}
|
||||
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" {
|
||||
params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -767,7 +829,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
err = ping("http")
|
||||
}
|
||||
if err != nil {
|
||||
err = perrors.Wrapf(err, "pinging container registry %s", c.registry)
|
||||
err = fmt.Errorf("pinging container registry %s: %w", c.registry, err)
|
||||
if c.sys != nil && c.sys.DockerDisableV1Ping {
|
||||
return err
|
||||
}
|
||||
@@ -819,7 +881,7 @@ func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, t
|
||||
logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type"))
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, "", perrors.Wrapf(registryHTTPResponseToError(res), "reading manifest %s in %s", tagOrDigest, ref.ref.Name())
|
||||
return nil, "", fmt.Errorf("reading manifest %s in %s: %w", tagOrDigest, ref.ref.Name(), registryHTTPResponseToError(res))
|
||||
}
|
||||
|
||||
manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize)
|
||||
@@ -922,15 +984,25 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
|
||||
// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error.
|
||||
func isManifestUnknownError(err error) bool {
|
||||
var errs errcode.Errors
|
||||
if !errors.As(err, &errs) || len(errs) == 0 {
|
||||
return false
|
||||
if errors.As(err, &errs) && len(errs) != 0 {
|
||||
firstErr := errs[0]
|
||||
// docker/distribution, and as defined in the spec
|
||||
var ec errcode.ErrorCoder
|
||||
if errors.As(firstErr, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown {
|
||||
return true
|
||||
}
|
||||
// registry.redhat.io as of October 2022
|
||||
var e errcode.Error
|
||||
if errors.As(firstErr, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
err = errs[0]
|
||||
ec, ok := err.(errcode.ErrorCoder)
|
||||
if !ok {
|
||||
return false
|
||||
// ALSO registry.redhat.io as of October 2022
|
||||
var unexpected *unexpectedHTTPResponseError
|
||||
if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) {
|
||||
return true
|
||||
}
|
||||
return ec.ErrorCode() == v2.ErrorCodeManifestUnknown
|
||||
return false
|
||||
}
|
||||
|
||||
// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for
|
||||
@@ -978,7 +1050,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
||||
defer res.Body.Close()
|
||||
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, perrors.Wrapf(clientLib.HandleErrorResponse(res), "downloading signatures for %s in %s", manifestDigest, ref.ref.Name())
|
||||
return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), handleErrorResponse(res))
|
||||
}
|
||||
|
||||
body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize)
|
||||
@@ -988,7 +1060,7 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
||||
|
||||
var parsedBody extensionSignatureList
|
||||
if err := json.Unmarshal(body, &parsedBody); err != nil {
|
||||
return nil, perrors.Wrapf(err, "decoding signature list")
|
||||
return nil, fmt.Errorf("decoding signature list: %w", err)
|
||||
}
|
||||
return &parsedBody, nil
|
||||
}
|
||||
|
||||
7
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
7
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
|
||||
@@ -67,7 +66,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||
path := fmt.Sprintf(tagsPath, reference.Path(dr.ref))
|
||||
client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "failed to create client")
|
||||
return nil, fmt.Errorf("failed to create client: %w", err)
|
||||
}
|
||||
|
||||
tags := make([]string, 0)
|
||||
@@ -135,7 +134,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
|
||||
}
|
||||
client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull")
|
||||
if err != nil {
|
||||
return "", perrors.Wrap(err, "failed to create client")
|
||||
return "", fmt.Errorf("failed to create client: %w", err)
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
|
||||
@@ -150,7 +149,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
|
||||
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return "", perrors.Wrapf(registryHTTPResponseToError(res), "reading digest %s in %s", tagOrDigest, dr.ref.Name())
|
||||
return "", fmt.Errorf("reading digest %s in %s: %w", tagOrDigest, dr.ref.Name(), registryHTTPResponseToError(res))
|
||||
}
|
||||
|
||||
dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest"))
|
||||
|
||||
33
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
33
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@@ -31,7 +31,6 @@ import (
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -167,11 +166,11 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||
return types.BlobInfo{}, perrors.Wrapf(registryHTTPResponseToError(res), "initiating layer upload to %s in %s", uploadPath, d.c.registry)
|
||||
return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, perrors.Wrap(err, "determining upload URL")
|
||||
return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err)
|
||||
}
|
||||
|
||||
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
|
||||
@@ -190,11 +189,11 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if !successStatus(res.StatusCode) {
|
||||
return nil, perrors.Wrapf(registryHTTPResponseToError(res), "uploading layer chunked")
|
||||
return nil, fmt.Errorf("uploading layer chunked: %w", registryHTTPResponseToError(res))
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "determining upload URL")
|
||||
return nil, fmt.Errorf("determining upload URL: %w", err)
|
||||
}
|
||||
return uploadLocation, nil
|
||||
}()
|
||||
@@ -215,7 +214,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||
return types.BlobInfo{}, perrors.Wrapf(registryHTTPResponseToError(res), "uploading layer to %s", uploadLocation)
|
||||
return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", blobDigest)
|
||||
@@ -240,7 +239,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, perrors.Wrapf(registryHTTPResponseToError(res), "checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res))
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
@@ -274,7 +273,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return perrors.Wrap(err, "determining upload URL after a mount attempt")
|
||||
return fmt.Errorf("determining upload URL after a mount attempt: %w", err)
|
||||
}
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope)
|
||||
@@ -290,7 +289,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
default:
|
||||
logrus.Debugf("Error mounting, response %#v", *res)
|
||||
return perrors.Wrapf(registryHTTPResponseToError(res), "mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
return fmt.Errorf("mounting %s from %s to %s: %w", srcDigest, srcRepo.Name(), d.ref.ref.Name(), registryHTTPResponseToError(res))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -359,8 +358,9 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
// Checking candidateRepo, and mounting from it, requires an
|
||||
// expanded token scope.
|
||||
extraScope := &authScope{
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
resourceType: "repository",
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
}
|
||||
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
|
||||
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
|
||||
@@ -416,12 +416,12 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
|
||||
// Double-check that the manifest we've been given matches the digest we've been given.
|
||||
matches, err := manifest.MatchesDigest(m, *instanceDigest)
|
||||
if err != nil {
|
||||
return perrors.Wrapf(err, "digesting manifest in PutManifest")
|
||||
return fmt.Errorf("digesting manifest in PutManifest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
manifestDigest, merr := manifest.Digest(m)
|
||||
if merr != nil {
|
||||
return perrors.Wrapf(err, "Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%v attempting to compute it)", instanceDigest.String(), merr)
|
||||
return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest: %w", instanceDigest.String(), merr)
|
||||
}
|
||||
return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String())
|
||||
}
|
||||
@@ -460,7 +460,7 @@ func (d *dockerImageDestination) uploadManifest(ctx context.Context, m []byte, t
|
||||
defer res.Body.Close()
|
||||
if !successStatus(res.StatusCode) {
|
||||
rawErr := registryHTTPResponseToError(res)
|
||||
err := perrors.Wrapf(rawErr, "uploading manifest %s to %s", tagOrDigest, d.ref.ref.Name())
|
||||
err := fmt.Errorf("uploading manifest %s to %s: %w", tagOrDigest, d.ref.ref.Name(), rawErr)
|
||||
if isManifestInvalidError(rawErr) {
|
||||
err = types.ManifestTypeRejectedError{Err: err}
|
||||
}
|
||||
@@ -653,6 +653,7 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
|
||||
Digest: "", // We will fill this in later.
|
||||
Size: 0,
|
||||
}, nil)
|
||||
ociConfig.RootFS.Type = "layers"
|
||||
} else {
|
||||
logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String())
|
||||
// We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs.
|
||||
@@ -832,7 +833,7 @@ sigExists:
|
||||
randBytes := make([]byte, 16)
|
||||
n, err := rand.Read(randBytes)
|
||||
if err != nil || n != 16 {
|
||||
return perrors.Wrapf(err, "generating random signature len %d", n)
|
||||
return fmt.Errorf("generating random signature len %d: %w", n, err)
|
||||
}
|
||||
signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes)
|
||||
if _, ok := existingSigNames[signatureName]; !ok {
|
||||
@@ -858,7 +859,7 @@ sigExists:
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
|
||||
return perrors.Wrapf(registryHTTPResponseToError(res), "uploading signature to %s in %s", path, d.c.registry)
|
||||
return fmt.Errorf("uploading signature to %s in %s: %w", path, d.c.registry, registryHTTPResponseToError(res))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
5
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@@ -25,7 +25,6 @@ import (
|
||||
"github.com/containers/image/v5/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -52,7 +51,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
||||
}
|
||||
registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "loading registries configuration")
|
||||
return nil, fmt.Errorf("loading registries configuration: %w", err)
|
||||
}
|
||||
if registry == nil {
|
||||
// No configuration was found for the provided reference, so use the
|
||||
@@ -109,7 +108,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerRef
|
||||
// The paired [] at least have some chance of being unambiguous.
|
||||
extras = append(extras, fmt.Sprintf("[%s: %v]", attempts[i].ref.String(), attempts[i].err))
|
||||
}
|
||||
return nil, perrors.Wrapf(primary.err, "(Mirrors also failed: %s): %s", strings.Join(extras, "\n"), primary.ref.String())
|
||||
return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
11
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
11
vendor/github.com/containers/image/v5/docker/errors.go
generated
vendored
@@ -4,8 +4,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/distribution/registry/client"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,7 +33,7 @@ func httpResponseToError(res *http.Response, context string) error {
|
||||
case http.StatusTooManyRequests:
|
||||
return ErrTooManyRequests
|
||||
case http.StatusUnauthorized:
|
||||
err := client.HandleErrorResponse(res)
|
||||
err := handleErrorResponse(res)
|
||||
return ErrUnauthorizedForCredentials{Err: err}
|
||||
default:
|
||||
if context != "" {
|
||||
@@ -48,13 +46,14 @@ func httpResponseToError(res *http.Response, context string) error {
|
||||
// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution
|
||||
// registry
|
||||
func registryHTTPResponseToError(res *http.Response) error {
|
||||
err := client.HandleErrorResponse(res)
|
||||
if e, ok := err.(*client.UnexpectedHTTPResponseError); ok {
|
||||
err := handleErrorResponse(res)
|
||||
if e, ok := err.(*unexpectedHTTPResponseError); ok {
|
||||
response := string(e.Response)
|
||||
if len(response) > 50 {
|
||||
response = response[:50] + "..."
|
||||
}
|
||||
err = fmt.Errorf("StatusCode: %d, %s", e.StatusCode, response)
|
||||
// %.0w makes e visible to error.Unwrap() without including any text
|
||||
err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
8
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -108,11 +108,11 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
|
||||
if options.IsConfig {
|
||||
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, perrors.Wrap(err, "reading Config file stream")
|
||||
return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err)
|
||||
}
|
||||
d.config = buf
|
||||
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
||||
return types.BlobInfo{}, perrors.Wrap(err, "writing Config file")
|
||||
return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err)
|
||||
}
|
||||
} else {
|
||||
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
|
||||
@@ -153,7 +153,7 @@ func (d *Destination) PutManifest(ctx context.Context, m []byte, instanceDigest
|
||||
// so the caller trying a different manifest kind would be pointless.
|
||||
var man manifest.Schema2
|
||||
if err := json.Unmarshal(m, &man); err != nil {
|
||||
return perrors.Wrap(err, "parsing manifest")
|
||||
return fmt.Errorf("parsing manifest: %w", err)
|
||||
}
|
||||
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
||||
return errors.New("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||
|
||||
17
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
17
vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go
generated
vendored
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Reader is a ((docker save)-formatted) tar archive that allows random access to any component.
|
||||
@@ -31,7 +30,7 @@ type Reader struct {
|
||||
func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "opening file %q", path)
|
||||
return nil, fmt.Errorf("opening file %q: %w", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
@@ -39,7 +38,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) {
|
||||
// as a source. Otherwise we pass the stream to NewReaderFromStream.
|
||||
stream, isCompressed, err := compression.AutoDecompress(file)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "detecting compression for file %q", path)
|
||||
return nil, fmt.Errorf("detecting compression for file %q: %w", path, err)
|
||||
}
|
||||
defer stream.Close()
|
||||
if !isCompressed {
|
||||
@@ -56,7 +55,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
|
||||
// Save inputStream to a temporary file
|
||||
tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar")
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "creating temporary file")
|
||||
return nil, fmt.Errorf("creating temporary file: %w", err)
|
||||
}
|
||||
defer tarCopyFile.Close()
|
||||
|
||||
@@ -72,7 +71,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
|
||||
// giving users really confusing "invalid tar header" errors).
|
||||
uncompressedStream, _, err := compression.AutoDecompress(inputStream)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "auto-decompressing input")
|
||||
return nil, fmt.Errorf("auto-decompressing input: %w", err)
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
@@ -81,7 +80,7 @@ func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Read
|
||||
// TODO: This can take quite some time, and should ideally be cancellable
|
||||
// using a context.Context.
|
||||
if _, err := io.Copy(tarCopyFile, uncompressedStream); err != nil {
|
||||
return nil, perrors.Wrapf(err, "copying contents to temporary file %q", tarCopyFile.Name())
|
||||
return nil, fmt.Errorf("copying contents to temporary file %q: %w", tarCopyFile.Name(), err)
|
||||
}
|
||||
succeeded = true
|
||||
|
||||
@@ -114,7 +113,7 @@ func newReader(path string, removeOnClose bool) (*Reader, error) {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
|
||||
return nil, perrors.Wrap(err, "decoding tar manifest.json")
|
||||
return nil, fmt.Errorf("decoding tar manifest.json: %w", err)
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
@@ -147,7 +146,7 @@ func (r *Reader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int)
|
||||
for tagIndex, tag := range r.Manifest[i].RepoTags {
|
||||
parsedTag, err := reference.ParseNormalizedNamed(tag)
|
||||
if err != nil {
|
||||
return nil, -1, perrors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
|
||||
return nil, -1, fmt.Errorf("Invalid tag %#v in manifest.json item @%d: %w", tag, i, err)
|
||||
}
|
||||
if parsedTag.String() == refString {
|
||||
return &r.Manifest[i], tagIndex, nil
|
||||
@@ -259,7 +258,7 @@ func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *
|
||||
func (r *Reader) readTarComponent(path string, limit int) ([]byte, error) {
|
||||
file, err := r.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "loading tar component %s", path)
|
||||
return nil, fmt.Errorf("loading tar component %s: %w", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := iolimits.ReadAtMost(file, limit)
|
||||
|
||||
9
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
9
vendor/github.com/containers/image/v5/docker/internal/tarfile/src.go
generated
vendored
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
"github.com/containers/image/v5/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Source is a partial implementation of types.ImageSource for reading from tarPath.
|
||||
@@ -96,7 +95,7 @@ func (s *Source) ensureCachedDataIsPresentPrivate() error {
|
||||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return perrors.Wrapf(err, "decoding tar config %s", tarManifest.Config)
|
||||
return fmt.Errorf("decoding tar config %s: %w", tarManifest.Config, err)
|
||||
}
|
||||
if parsedConfig.RootFS == nil {
|
||||
return fmt.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
|
||||
@@ -180,7 +179,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
// the slower method of checking if it's compressed.
|
||||
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "auto-decompressing %s to determine its size", layerPath)
|
||||
return nil, fmt.Errorf("auto-decompressing %s to determine its size: %w", layerPath, err)
|
||||
}
|
||||
defer uncompressedStream.Close()
|
||||
|
||||
@@ -188,7 +187,7 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manif
|
||||
if isCompressed {
|
||||
uncompressedSize, err = io.Copy(io.Discard, uncompressedStream)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "reading %s to find its size", layerPath)
|
||||
return nil, fmt.Errorf("reading %s to find its size: %w", layerPath, err)
|
||||
}
|
||||
}
|
||||
li.size = uncompressedSize
|
||||
@@ -303,7 +302,7 @@ func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.B
|
||||
|
||||
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
||||
if err != nil {
|
||||
return nil, 0, perrors.Wrapf(err, "auto-decompressing blob %s", info.Digest)
|
||||
return nil, 0, fmt.Errorf("auto-decompressing blob %s: %w", info.Digest, err)
|
||||
}
|
||||
|
||||
newStream := uncompressedReadCloser{
|
||||
|
||||
17
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
17
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -95,16 +94,16 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
|
||||
// See also the comment in physicalLayerPath.
|
||||
physicalLayerPath := w.physicalLayerPath(layerDigest)
|
||||
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
|
||||
return perrors.Wrap(err, "creating layer symbolic link")
|
||||
return fmt.Errorf("creating layer symbolic link: %w", err)
|
||||
}
|
||||
|
||||
b := []byte("1.0")
|
||||
if err := w.sendBytesLocked(filepath.Join(layerID, legacyVersionFileName), b); err != nil {
|
||||
return perrors.Wrap(err, "writing VERSION file")
|
||||
return fmt.Errorf("writing VERSION file: %w", err)
|
||||
}
|
||||
|
||||
if err := w.sendBytesLocked(filepath.Join(layerID, legacyConfigFileName), configBytes); err != nil {
|
||||
return perrors.Wrap(err, "writing config json file")
|
||||
return fmt.Errorf("writing config json file: %w", err)
|
||||
}
|
||||
|
||||
w.legacyLayers[layerID] = struct{}{}
|
||||
@@ -129,7 +128,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
||||
var config map[string]*json.RawMessage
|
||||
err := json.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return perrors.Wrap(err, "unmarshaling config")
|
||||
return fmt.Errorf("unmarshaling config: %w", err)
|
||||
}
|
||||
for _, attr := range [7]string{"architecture", "config", "container", "container_config", "created", "docker_version", "os"} {
|
||||
layerConfig[attr] = config[attr]
|
||||
@@ -153,7 +152,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
||||
layerConfig["layer_id"] = chainID
|
||||
b, err := json.Marshal(layerConfig) // Note that layerConfig["id"] is not set yet at this point.
|
||||
if err != nil {
|
||||
return perrors.Wrap(err, "marshaling layer config")
|
||||
return fmt.Errorf("marshaling layer config: %w", err)
|
||||
}
|
||||
delete(layerConfig, "layer_id")
|
||||
layerID := digest.Canonical.FromBytes(b).Hex()
|
||||
@@ -161,7 +160,7 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
||||
|
||||
configBytes, err := json.Marshal(layerConfig)
|
||||
if err != nil {
|
||||
return perrors.Wrap(err, "marshaling layer config")
|
||||
return fmt.Errorf("marshaling layer config: %w", err)
|
||||
}
|
||||
|
||||
if err := w.ensureSingleLegacyLayerLocked(layerID, l.Digest, configBytes); err != nil {
|
||||
@@ -281,10 +280,10 @@ func (w *Writer) Close() error {
|
||||
|
||||
b, err = json.Marshal(w.repositories)
|
||||
if err != nil {
|
||||
return perrors.Wrap(err, "marshaling repositories")
|
||||
return fmt.Errorf("marshaling repositories: %w", err)
|
||||
}
|
||||
if err := w.sendBytesLocked(legacyRepositoriesFileName, b); err != nil {
|
||||
return perrors.Wrap(err, "writing config json file")
|
||||
return fmt.Errorf("writing config json file: %w", err)
|
||||
}
|
||||
|
||||
if err := w.tar.Close(); err != nil {
|
||||
|
||||
4
vendor/github.com/containers/image/v5/docker/reference/reference.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/reference/reference.go
generated
vendored
@@ -3,13 +3,13 @@
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
|
||||
5
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
5
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
@@ -15,7 +15,6 @@ import (
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/opencontainers/go-digest"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -135,7 +134,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
var config registryConfiguration
|
||||
err = yaml.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "parsing %s", configPath)
|
||||
return nil, fmt.Errorf("parsing %s: %w", configPath, err)
|
||||
}
|
||||
|
||||
if config.DefaultDocker != nil {
|
||||
@@ -168,7 +167,7 @@ func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference,
|
||||
if topLevel != "" {
|
||||
u, err := url.Parse(topLevel)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
|
||||
return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err)
|
||||
}
|
||||
url = u
|
||||
} else {
|
||||
|
||||
13
vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
generated
vendored
13
vendor/github.com/containers/image/v5/docker/wwwauthenticate.go
generated
vendored
@@ -3,6 +3,7 @@ package docker
|
||||
// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
@@ -70,6 +71,18 @@ func parseAuthHeader(header http.Header) []challenge {
|
||||
return challenges
|
||||
}
|
||||
|
||||
// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions`
|
||||
func parseAuthScope(scopeStr string) (*authScope, error) {
|
||||
if parts := strings.Split(scopeStr, ":"); len(parts) == 3 {
|
||||
return &authScope{
|
||||
resourceType: parts[0],
|
||||
remoteName: parts[1],
|
||||
actions: parts[2],
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr)
|
||||
}
|
||||
|
||||
// NOTE: This is not a fully compliant parser per RFC 7235:
|
||||
// Most notably it does not support more than one challenge within a single header
|
||||
// Some of the whitespace parsing also seems noncompliant.
|
||||
|
||||
9
vendor/github.com/containers/image/v5/internal/image/docker_list.go
generated
vendored
9
vendor/github.com/containers/image/v5/internal/image/docker_list.go
generated
vendored
@@ -6,26 +6,25 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
|
||||
list, err := manifest.Schema2ListFromManifest(manblob)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "parsing schema2 manifest list")
|
||||
return nil, fmt.Errorf("parsing schema2 manifest list: %w", err)
|
||||
}
|
||||
targetManifestDigest, err := list.ChooseInstance(sys)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "choosing image instance")
|
||||
return nil, fmt.Errorf("choosing image instance: %w", err)
|
||||
}
|
||||
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "fetching target platform image selected from manifest list")
|
||||
return nil, fmt.Errorf("fetching target platform image selected from manifest list: %w", err)
|
||||
}
|
||||
|
||||
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "computing manifest digest")
|
||||
return nil, fmt.Errorf("computing manifest digest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
|
||||
|
||||
3
vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
generated
vendored
3
vendor/github.com/containers/image/v5/internal/image/docker_schema2.go
generated
vendored
@@ -17,7 +17,6 @@ import (
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
perrors "github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -294,7 +293,7 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options
|
||||
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "uploading empty layer")
|
||||
return nil, fmt.Errorf("uploading empty layer: %w", err)
|
||||
}
|
||||
if info.Digest != emptyLayerBlobInfo.Digest {
|
||||
return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest)
|
||||
|
||||
9
vendor/github.com/containers/image/v5/internal/image/oci_index.go
generated
vendored
9
vendor/github.com/containers/image/v5/internal/image/oci_index.go
generated
vendored
@@ -6,26 +6,25 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) {
|
||||
index, err := manifest.OCI1IndexFromManifest(manblob)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "parsing OCI1 index")
|
||||
return nil, fmt.Errorf("parsing OCI1 index: %w", err)
|
||||
}
|
||||
targetManifestDigest, err := index.ChooseInstance(sys)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "choosing image instance")
|
||||
return nil, fmt.Errorf("choosing image instance: %w", err)
|
||||
}
|
||||
manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrapf(err, "fetching target platform image selected from image index")
|
||||
return nil, fmt.Errorf("fetching target platform image selected from image index: %w", err)
|
||||
}
|
||||
|
||||
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, perrors.Wrap(err, "computing manifest digest")
|
||||
return nil, fmt.Errorf("computing manifest digest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest)
|
||||
|
||||
3
vendor/github.com/containers/image/v5/internal/image/unparsed.go
generated
vendored
3
vendor/github.com/containers/image/v5/internal/image/unparsed.go
generated
vendored
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
perrors "github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// UnparsedImage implements types.UnparsedImage .
|
||||
@@ -61,7 +60,7 @@ func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) {
|
||||
if digest, haveDigest := i.expectedManifestDigest(); haveDigest {
|
||||
matches, err := manifest.MatchesDigest(m, digest)
|
||||
if err != nil {
|
||||
return nil, "", perrors.Wrap(err, "computing manifest digest")
|
||||
return nil, "", fmt.Errorf("computing manifest digest: %w", err)
|
||||
}
|
||||
if !matches {
|
||||
return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest)
|
||||
|
||||
13
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
13
vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go
generated
vendored
@@ -22,13 +22,14 @@ type Compat struct {
|
||||
// for implementations of private.ImageDestination.
|
||||
//
|
||||
// Use it like this:
|
||||
// type yourDestination struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
// dest := &yourDestination{…}
|
||||
// dest.Compat = impl.AddCompat(dest)
|
||||
//
|
||||
// type yourDestination struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// dest := &yourDestination{…}
|
||||
// dest.Compat = impl.AddCompat(dest)
|
||||
func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
|
||||
return Compat{dest}
|
||||
}
|
||||
|
||||
34
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
generated
vendored
34
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go
generated
vendored
@@ -3,23 +3,25 @@
|
||||
// Compare with imagedestination/impl, which might require non-trivial implementation work.
|
||||
//
|
||||
// There are two kinds of stubs:
|
||||
// - Pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
|
||||
// implementation:
|
||||
//
|
||||
// type yourDestination struct {
|
||||
// stubs.ImplementsPutBlobPartial
|
||||
// …
|
||||
// }
|
||||
// - Stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
// type yourDestination struct {
|
||||
// stubs.NoPutBlobPartialInitialize
|
||||
// …
|
||||
// }
|
||||
// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination
|
||||
// implementation:
|
||||
//
|
||||
// dest := &yourDestination{
|
||||
// …
|
||||
// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
|
||||
// }
|
||||
// type yourDestination struct {
|
||||
// stubs.ImplementsPutBlobPartial
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
//
|
||||
// type yourDestination struct {
|
||||
// stubs.NoPutBlobPartialInitialize
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// dest := &yourDestination{
|
||||
// …
|
||||
// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref),
|
||||
// }
|
||||
package stubs
|
||||
|
||||
13
vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
generated
vendored
13
vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go
generated
vendored
@@ -19,13 +19,14 @@ type Compat struct {
|
||||
// for implementations of private.ImageSource.
|
||||
//
|
||||
// Use it like this:
|
||||
// type yourSource struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
// src := &yourSource{…}
|
||||
// src.Compat = impl.AddCompat(src)
|
||||
//
|
||||
// type yourSource struct {
|
||||
// impl.Compat
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// src := &yourSource{…}
|
||||
// src.Compat = impl.AddCompat(src)
|
||||
func AddCompat(src private.ImageSourceInternalOnly) Compat {
|
||||
return Compat{src}
|
||||
}
|
||||
|
||||
35
vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
generated
vendored
35
vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go
generated
vendored
@@ -3,23 +3,26 @@
|
||||
// Compare with imagesource/impl, which might require non-trivial implementation work.
|
||||
//
|
||||
// There are two kinds of stubs:
|
||||
// - Pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
|
||||
// implementation:
|
||||
//
|
||||
// type yourSource struct {
|
||||
// stubs.ImplementsGetBlobAt
|
||||
// …
|
||||
// }
|
||||
// - Stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
// type yourSource struct {
|
||||
// stubs.NoGetBlobAtInitialize
|
||||
// …
|
||||
// }
|
||||
// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource
|
||||
//
|
||||
// dest := &yourSource{
|
||||
// …
|
||||
// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
|
||||
// }
|
||||
// implementation:
|
||||
//
|
||||
// type yourSource struct {
|
||||
// stubs.ImplementsGetBlobAt
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker
|
||||
// means that a constructor must be called:
|
||||
|
||||
// type yourSource struct {
|
||||
// stubs.NoGetBlobAtInitialize
|
||||
// …
|
||||
// }
|
||||
//
|
||||
// dest := &yourSource{
|
||||
// …
|
||||
// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref),
|
||||
// }
|
||||
package stubs
|
||||
|
||||
13
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
13
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
@@ -228,3 +228,16 @@ func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet,
|
||||
variants := findCompressionMIMETypeSet(variantTable, mimeType)
|
||||
return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm.
|
||||
}
|
||||
|
||||
// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos()
|
||||
// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure.
|
||||
func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer {
|
||||
layers := make([]types.ImageInspectLayer, len(infos))
|
||||
for i, info := range infos {
|
||||
layers[i].MIMEType = info.MediaType
|
||||
layers[i].Digest = info.Digest
|
||||
layers[i].Size = info.Size
|
||||
layers[i].Annotations = info.Annotations
|
||||
}
|
||||
return layers
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user